query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Returns the number of milliseconds for the given number of jiffies (a weird timing unit used the kernel).
def calculate_time_ms(self, jiffies): return int((jiffies * 1000.0) / self._jiffies_per_sec)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __calculate_time_cs(self, jiffies):\n\n return int((jiffies * 100.0) / self._jiffies_per_sec)", "def jiffies(_load_time=time.time()):\n return int(100*(time.time()-_load_time))", "def millis(): \n return int(round(monotonic.monotonic() * C.MILLISECONDS))", "def millis(): \r\n return int(round(monotonic.monotonic() * C.MILLISECONDS))", "def jiffies(_proc_pid_stat = '/proc/%s/stat'%(os.getpid()),\n _load_time=time.time()):\n try:\n f=open(_proc_pid_stat,'r')\n l = f.readline().split(' ')\n f.close()\n return int(l[13])\n except:\n return int(100*(time.time()-_load_time))", "def millis() -> int:", "def millis():\n return int(round(time() * 1000))", "def _nsec_to_usec_round(nsec):\n return (nsec + 500) // 10 ** 3", "def time_millis():\n\n return int(time.time() * 1000)", "def get_time_ms():\n return int(round(time.time() * 1000))", "def time_ms():\n return int(1000 * time.time())", "def get_millis(seconds):\n return seconds * 10 ** 3", "def __timedelta_millis(td):\n return int(round(td.total_seconds(), 3) * 1000)", "def millisecond():\n return int(round(time.time() * 1000))", "def _nowms():\n return int(time.time() * 1000)", "def elapsed_micros(start: int, /) -> int:", "def curTimeMs():\n\treturn int((datetime.utcnow() - datetime(1970,1,1)).total_seconds() * 1000)", "def _time_ms(dt):\n epoch = datetime.datetime.utcfromtimestamp(0)\n diff = dt - epoch\n return diff.total_seconds() * 1000", "def curr_time_millis():\n return 1000 * timeit.default_timer()", "def _get_milleseconds(self):\n return int(round(time.time() * 1000))", "def millis(self):\n return self._micros // 1000", "def unix_time_millisecond(date):\r\n return unix_time(date, float=True) * 1e3", "def millis(start_time):\n dt = datetime.now() - start_time\n ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0\n return ms", "def __micros():\n return round(time.time() * 1000000)", "def getNowMilliseconds():\n return (datetime.datetime.utcnow() - Common.epoch_).total_seconds() * 1000.0", "def ms(self):\n # my clock uses seconds internally\n return 1000 * self.read()", "def dt_epoch_msecs(value):\n return long(calendar.timegm(value.timetuple())) * 1000", "def elapsed_millis(start: int, /) -> int:", "def timestamp(millis=False):\n return int(round(time.time() * (millis and 1000 or 1)))", "def transmission_time_us(self, num_bytes):\n bits_to_transmit = num_bytes * 8\n transmission_time_us = (bits_to_transmit / self.megabits_per_second)\n return transmission_time_us", "def getTime():\n\n return float(time.perf_counter()*1000)", "def _time_ms(self, dt):\n if dt.tzinfo is None:\n dt = dt.replace(tzinfo=pytz.utc)\n return int((dt - self._EPOCH).total_seconds() * 1000)", "def np_dt_epoch_msec(value):\n return value.astype(long) / 1000", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def ml(milliliters):\n return ul(milliliters*1000)", "def as_millis(self):\n return int(ntplib.ntp_to_system_time(self.start) * 1000), int(ntplib.ntp_to_system_time(self.stop) * 1000)", "def part1() -> int:\n longest_sleeper = max(sleep_times, key=lambda g: len(sleep_times[g]))\n sleepiest_minute = max(\n sleep_times[longest_sleeper], key=sleep_times[longest_sleeper].count)\n\n return longest_sleeper * sleepiest_minute", "def poll_interval_in_milliseconds(self):\n\n return self._poll_interval_in_milliseconds", "def units_to_msec(units, resolution):\n time_ms = units * float(resolution) / 1000\n return time_ms", "def EpochNano():\n return int(time.time() * 1000000000)", "def _STEPS2TIME(step):\n return step/1000.", "def __current_milli_time(self):\n\n return int(round(time.time() * 1000))", "def getUnixTime(pool=\"time.apple.com\"):\n time_offset = ntplib.NTPClient().request(pool).offset\n return float(time.time()+time_offset)", "def tick(self):\n prev_last_tick = self.last_tick_\n self.last_tick_ = timeit.default_timer()\n latest_tick_period = self.last_tick_ - prev_last_tick\n return latest_tick_period", "def ms_from_timedelta(td):\n return (td.seconds * 1000) + (td.microseconds / 1000.0)", "def __get_uptime_ms(self):\n\n if self._boot_time_ms is None:\n # We read /proc/uptime once to get the current boot time.\n uptime_file = None\n try:\n uptime_file = open(\"/proc/uptime\", \"r\")\n # The first number in the file is the number of seconds since\n # boot time. So, we just use that to calculate the milliseconds\n # past epoch.\n self._boot_time_ms = int(time.time()) * 1000 - int(\n float(uptime_file.readline().split()[0]) * 1000.0\n )\n finally:\n if uptime_file is not None:\n uptime_file.close()\n\n # Calculate the uptime by just taking current time and subtracting out\n # the boot time.\n return int(time.time()) * 1000 - self._boot_time_ms", "def current_milli_time(self):\n return int(round(time.time() * 1000))", "def sequenceTime_sec(cmds):\n cycles = sum(MemorySequence.cmdTime_cycles(c) for c in cmds)\n return cycles * 40e-9 # assume 25 MHz clock -> 40 ns per cycle", "def _unit_ms(self):\n return (self.time_base / 1000.0) / 60.0", "def unix_time_nanos(dt):\n return timedelta_to_micros(dt - epoch)", "def current_time_millis():\n return int(round(time.time() * 1000))", "def now():\n\n return rospy.Time.now().to_nsec()", "def current_millis():\n return int(round(time.time() * 1000))", "def unit_ms(self):\n return (self.time_base / 1000.0) / 60.0", "def freq_minutes(self):\n return 5", "def frames_to_ms(num_frames: int) -> int:\n return int(16.67 * num_frames)", "def frames_to_ms(num_frames: int) -> int:\n return int(16.67 * num_frames)", "def measure_time(n: int, max_delay: int) -> float:\n start_time = time.time()\n asyncio.run(wait_n(n, max_delay))\n return (time.time() - start_time) / n", "def frames_to_msec(frames, fps=FPS):\n return 1000.0 * frames / fps", "def GetMonotime():\n return float(open(PROC_UPTIME).read().split()[0])", "def _probe_wait_time(self):\n r = self.probe_cycle_time / float(len(self.servers)) #self.probe_cycle_time=5\n r = max(.25, r) # Cap it at four per second\n return r", "def event_time_to_microseconds(interp, ev_time):\n secs_to_usecs = 1000 * 1000\n return r_int64(ev_time * 1000 + interp.startup_time * secs_to_usecs) + \\\n constants.SQUEAK_EPOCH_DELTA_MICROSECONDS", "def milliseconds_offset(cls, timestamp, now=None):\n if isinstance(timestamp, (int, float)):\n base = timestamp\n else:\n base = cls.to_unix(timestamp)\n if now is None:\n now = time.time()\n return (now - base) * 1000", "def get_current_unix_timestamp_ms():\r\n return int(datetime.timestamp(datetime.now())) * 1000", "def num_microseconds(self, td):\n return float(td.microseconds + 1000000 * (td.seconds + 86400 * td.days))", "def measure_time(n: int, max_delay: int) -> float:\n t0 = time.time()\n asyncio.run(wait_n(n, max_delay))\n t1 = time.time()\n total_time = t1 - t0\n return total_time / n", "def time_in_millis(my_time=None):\n\n if my_time:\n t = my_time\n else:\n t = gmtime()\n\n return timegm(t)", "def elapsed_time_in_minutes(number_of_layers, elapsed_bake_time):\n return elapsed_bake_time + (number_of_layers * 2)", "def timeTime(self):\n return self._micros / 1000000.0", "def unixTimeMs(dateAndTime):\n dateAndTime = dateAndTime + datetime.timedelta(hours=HOUR_ADJUSTMENT)\n return int((dateAndTime - EPOCH).total_seconds() * 1000.0)", "def ticks_per_second(self):\n return self._ticks_per_second", "def get_ulid_timestamp(ulid):\n ts_bytes = ulid_to_binary(ulid)[:6]\n ts_bytes = b'\\0\\0' + ts_bytes\n assert len(ts_bytes) == 8\n return (struct.unpack(b'!Q', ts_bytes)[0] / 1000.)", "def _get_time(self, sec, nsec):\n return sec + nsec / (10**9)", "def microseconds_since_epoch(date_time, epoch=None):\n if not epoch:\n epoch = datetime.datetime.utcfromtimestamp(0)\n\n delta = date_time - epoch\n\n # 86400 is 24 * 60 * 60 e.g. total seconds in a day\n return delta.microseconds + (delta.seconds + delta.days * 86400) * 10**6", "def sys_up_time():\n\n with open('/proc/uptime', 'r') as f:\n uptime_seconds = float(f.readline().split()[0])\n return int(uptime_seconds)", "def _TIME2STEPS(time):\n return int(time*1000)", "def _get_cpu_interval(self):\n self._polling_execute_frequency = int(self._plugin_conf[u'main'][u'polling_frequency'])\n\n if 5 <= self._polling_execute_frequency < 60:\n return cpmCPUTotalMonIntervalValue # replaces cpmCPUTotal5SecRev\n elif 60 <= self._polling_execute_frequency < 300:\n return cpmCPUTotal1minRev\n elif 300 <= self._polling_execute_frequency:\n return cpmCPUTotal5minRev\n else:\n return cpmCPUTotal1minRev", "def monotonic():\n # Assumes that monotonic is called more frequently than the wraparound of micropython's\n # utime.ticks_ms()\n global _prev_ticks_ms, _total_ms # pylint: disable=global-statement\n ticks_ms = utime.ticks_ms()\n _total_ms += utime.ticks_diff(ticks_ms, _prev_ticks_ms)\n _prev_ticks_ms = ticks_ms\n return _total_ms * 0.001", "def boot_time():\n # This dirty hack is to adjust the precision of the returned\n # value which may have a 1 second fluctuation, see:\n # https://github.com/giampaolo/psutil/issues/1007\n global _last_btime\n ret = float(cext.boot_time())\n if abs(ret - _last_btime) <= 1:\n return _last_btime\n else:\n _last_btime = ret\n return ret", "def cpu_time(self):", "def ClockUsToTimestamp(clock_us, reference_clock_us, reference_timestamp):\n\n return reference_timestamp + (clock_us - reference_clock_us) / 1.0e6", "def msec_to_units(time_ms, resolution):\n units = time_ms * 1000 / resolution\n return int(units)", "def MillisToSec(self):\n self.Millis = [item / 1000 for item in self.Millis]\n return self.Millis", "def gettime():\n return libruss.russ_gettime()", "def get_timebase(self,dt):\r\n\r\n if dt < 1E-9:\r\n dt = 1E-9\r\n\r\n if dt > 4E-9:\r\n n = int(dt*125E6 + 2)\r\n else:\r\n dt *= 1E9\r\n n = round(log(dt,2))\r\n return n", "def us(self):\n return 1000 * 1000 * self.read()", "def mel2hz(mel):\n\treturn 700 * (10 ** (mel / 2595.0) - 1)", "def get_timestamp() -> int:\n\n return int(time.time() * 1000)", "def time(self) -> float:\n return self.state.game_loop / 22.4 # / (1/1.4) * (1/16)", "def pcr_delta_time_ms(pcr_t1, pcr_t2, offset = 0):\n return float(pcr_t2-pcr_t1)/90000.0 + offset", "def get_timestamp():\n\n # Convert timestamp to int after multiply by 1000 to get millisecond timestamp in int.\n return int(time.time() * 1000)", "def now_timestamp(unit: TimeUnit = TimeUnit.SECONDS) -> float:\n return TimeHelper.to_timestamp(TimeHelper.now(), unit)", "def time(self) -> int:\n return int(round(time.time() * 1000))", "def currentTimeSecs():\n return time.time()", "def time(n):\n steps = 3 + math.ceil(n/5.0)*2\n return steps" ]
[ "0.73772615", "0.7025039", "0.6870207", "0.6851476", "0.6639381", "0.6579558", "0.6533067", "0.63947713", "0.6242466", "0.61780435", "0.6164305", "0.6149434", "0.61466753", "0.6125577", "0.61053866", "0.6076043", "0.6049589", "0.60407495", "0.60054356", "0.59866726", "0.5976186", "0.59511286", "0.59292465", "0.59017617", "0.58917755", "0.58187175", "0.58177817", "0.5814596", "0.57947546", "0.5781713", "0.57773024", "0.57605296", "0.5728779", "0.57079", "0.57079", "0.57079", "0.57079", "0.57079", "0.57079", "0.56879365", "0.56497854", "0.5635667", "0.56270796", "0.5604115", "0.55850327", "0.5555563", "0.55502933", "0.5530579", "0.5527269", "0.5523488", "0.55193186", "0.5517147", "0.5513014", "0.5498009", "0.54976314", "0.54956627", "0.54817665", "0.54776365", "0.54618466", "0.54616773", "0.5456723", "0.5456723", "0.5445389", "0.5435606", "0.542807", "0.54195976", "0.54078937", "0.54012966", "0.5397984", "0.5395873", "0.5364622", "0.53457844", "0.53266406", "0.53256655", "0.5307408", "0.53072035", "0.52931744", "0.52860993", "0.52659935", "0.52652526", "0.5261714", "0.5254328", "0.5251509", "0.52494556", "0.5236165", "0.522545", "0.5208776", "0.5186835", "0.518421", "0.518144", "0.517337", "0.51703453", "0.51607114", "0.5159445", "0.51528627", "0.51460665", "0.51452327", "0.51417935", "0.5137073", "0.51341444" ]
0.82893556
0
Returns the number of milliseconds the system has been up.
def __get_uptime_ms(self): if self._boot_time_ms is None: # We read /proc/uptime once to get the current boot time. uptime_file = None try: uptime_file = open("/proc/uptime", "r") # The first number in the file is the number of seconds since # boot time. So, we just use that to calculate the milliseconds # past epoch. self._boot_time_ms = int(time.time()) * 1000 - int( float(uptime_file.readline().split()[0]) * 1000.0 ) finally: if uptime_file is not None: uptime_file.close() # Calculate the uptime by just taking current time and subtracting out # the boot time. return int(time.time()) * 1000 - self._boot_time_ms
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sys_up_time():\n\n with open('/proc/uptime', 'r') as f:\n uptime_seconds = float(f.readline().split()[0])\n return int(uptime_seconds)", "def getUpTime(self):\n return self.__upTime + time() - self.__fingerTime", "def uptime(self):\n return self._call_txtrader_api('uptime', {})", "def get_server_uptime(self):\n return time.time() - self.init_time", "def uptime(self):\n return self._uptime", "def LingerTime(self) -> int:", "def _getUpTime(self):\n diff = (datetime.datetime.now() - self._startTime).__str__()\n return diff[:diff.find('.')]", "def GetMonotime():\n return float(open(PROC_UPTIME).read().split()[0])", "def get_uptime(self):\n self.__not_implemented()", "def get_os_uptime(self):\n\t\treturn call_sdk_function('PrlStat_GetOsUptime', self.handle)", "def getIdleTime(self):\n return self.__idleTime + time() - self.__fingerTime", "def get_uptime():\n output = Popen('cat /proc/uptime', shell=True, stdout=PIPE)\n return float(output.communicate()[0].split()[0])", "async def uptime(self, ctx):\r\n uptime_seconds = round(\r\n (datetime.now() - self.start_time).total_seconds())\r\n await ctx.send(f\"Current Uptime: {util.format_seconds(uptime_seconds)}\"\r\n )", "def systime():\n return resource.getrusage(resource.RUSAGE_SELF).ru_stime", "def idle(self):\n return (datetime.datetime.now() - self._last_received).total_seconds()", "def usertime():\n return resource.getrusage(resource.RUSAGE_SELF).ru_utime", "def millis() -> int:", "def millis(): \r\n return int(round(monotonic.monotonic() * C.MILLISECONDS))", "def startuptime(self):\n # type: () -> int\n return self._startuptime", "def ms(self):\n # my clock uses seconds internally\n return 1000 * self.read()", "def _nowms():\n return int(time.time() * 1000)", "def startup_time_delta(self):\n return int((time.time() - self.startup_timestamp) * 1000.0)", "def remaining_ms():", "def millis(): \n return int(round(monotonic.monotonic() * C.MILLISECONDS))", "def player_startuptime(self):\n # type: () -> int\n return self._player_startuptime", "def service_time(self):\r\n #print self.node_monitor_address, self.completion_time - self.node_monitor_launch_time\r\n return (self.completion_time - self.node_monitor_launch_time)", "def heartbeat_time(self):\n if self._lasthb is not None:\n return (now() - self._lasthb).seconds\n return 0.0", "def duration(self):\n if self._connected:\n return (datetime.datetime.now() - self._connected).total_seconds()\n return float('inf')", "def time_passed(self):\n return (datetime.now(timezone.utc) - self._time_run).total_seconds()", "def getTime():\n\n return float(time.perf_counter()*1000)", "def uptime(self):\n if self._uptime is None:\n version_data = self._raw_version_data()\n uptime_full_string = version_data[\"uptime\"]\n self._uptime = self._uptime_to_seconds(uptime_full_string)\n\n return self._uptime", "def time_left(self):\n return self.timeout - self.current_milli_time()", "def time_millis():\n\n return int(time.time() * 1000)", "def duration(self):\n return (datetime.datetime.now() - self._when_connected).total_seconds()", "def idle(self) -> float:\n return time.time() - self.last_input_time", "def millis():\n return int(round(time() * 1000))", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def getTime():\n return float(time.perf_counter()*1000)", "def time(self) -> int:\n return int(round(time.time() * 1000))", "def get_time_ms():\n return int(round(time.time() * 1000))", "def read_system_uptime():\n if os.path.exists(_PROC_UPTIME):\n with open(_PROC_UPTIME) as proc_uptime:\n uptime_fields = proc_uptime.read().split()\n return float(uptime_fields[0])\n collectd.error('read_system_uptime: %s does not exist.' % _PROC_UPTIME)\n return 0", "def do_uptime(self, message):\r\n\t\tup_time = time.time() - self.start_time\r\n\t\tstart_time = datetime.datetime.fromtimestamp(self.start_time)\r\n\t\tself.trace(f'Uptime: {duration(up_time)}; running since {start_time}')", "def get_time_before_wifi_sleep(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def time(self):\n return self._clock() - self._starttime", "def time_ms():\n return int(1000 * time.time())", "def curr_time_millis():\n return 1000 * timeit.default_timer()", "def time(self) -> float:\n return self.state.game_loop / 22.4 # / (1/1.4) * (1/16)", "def curTimeMs():\n\treturn int((datetime.utcnow() - datetime(1970,1,1)).total_seconds() * 1000)", "def time(self) -> int:\n return store.app.time", "def currentTimeSecs():\n return time.time()", "def uptime(self) -> timedelta:\n return timedelta(seconds=int(time() - self.start_time))", "def service_time(self):\r\n return (self.completion_time - self.node_monitor_launch_time)", "def current_time_millis():\n return int(round(time.time() * 1000))", "def time(self):\n return pygame.time.get_ticks() - self.start_time", "def time(self) -> int:\n pass", "def proctime():\n r = resource.getrusage(resource.RUSAGE_SELF)\n return r.ru_utime+r.ru_stime", "def __current_milli_time(self):\n\n return int(round(time.time() * 1000))", "def time():\n master = MasterTimer.getMasterTimer()\n\n if master.end_time:\n return master.end_time - master.start_time\n else:\n return time.time() - master.start_time", "def duration(self):\n return time.time() - self.socket_opened", "def boot_time():\n # This dirty hack is to adjust the precision of the returned\n # value which may have a 1 second fluctuation, see:\n # https://github.com/giampaolo/psutil/issues/1007\n global _last_btime\n ret = float(cext.boot_time())\n if abs(ret - _last_btime) <= 1:\n return _last_btime\n else:\n _last_btime = ret\n return ret", "def get_time(cls):\n now = rospy.Time.now()\n return now.secs + now.nsecs*(10**-9) # time in seconds", "def time_left(self):\r\n return 10 - (int(time.time()) - self.start_time)", "def getNowMilliseconds():\n return (datetime.datetime.utcnow() - Common.epoch_).total_seconds() * 1000.0", "def current_millis():\n return int(round(time.time() * 1000))", "def video_startuptime(self):\n # type: () -> int\n return self._video_startuptime", "def time_since_last_state_change(self):\n current_time = rospy.get_rostime()\n difference = current_time - self._timestamps['last_state_change']\n return difference.to_sec()", "def current_milli_time(self):\n return int(round(time.time() * 1000))", "def millis(self):\n return self._micros // 1000", "def __last_time(self):\n if self.__stopped is not None:\n return self.__stopped\n return self.__time()", "def now():\n\treturn time.time() * 1000", "def sleep_time(self):\n now = datetime.utcnow()\n return min(service.next_update_in(now) for service in self.services)", "def now():\n if os.sys.platform == 'win32':\n return time.clock() # best for windows? seems to give finer temporal resolution.\n else:\n return time.time() # best for Unix, others???", "def time_remaining(self) -> float:\n\n return self.event.time - time.time()", "def duration( self ):\n return (self.start and time.process_time()-self.start) or 0", "def time_left(self) -> float:\n return self._alarm_silence - time.monotonic()", "async def uptime(self, ctx):\n await ctx.send(f'Online since {self.bot.uptime.strftime(\"%m/%d/%Y %H:%M UTC\")} '\n f'(~{timeago.format(self.bot.uptime, datetime.utcnow())})')", "def _time(self):\n return time()", "def secondsPassed(self)->int:\n return 0 if not self.used else int((datetime.utcnow() - self.firstAccessDate).total_seconds())", "def last_count_update_time(self):\n return self.__last_count_update_time", "def elapsed(self):\n return datetime.datetime.now() - self.start", "def get_system_time(self):\n\t\treturn call_sdk_function('PrlStatCpu_GetSystemTime', self.handle)", "def now():\n\n return rospy.Time.now().to_nsec()", "def uptime():\n run('uptime')", "def uptime(self, mess, args):\n return '%s\\n' % getUptime()", "def sys_time(self):\n timestamp = None\n for i in range(10):\n while timestamp is None:\n timestamp = self.acquire_system_time()\n break\n return timestamp", "def elapsed(self):\n return self.__last_time() - self.__start", "def get_time_since_last_seen(self):\n return time.time() - self.time_last_seen", "def exptime(self):\n exptime = float(self.get('TRUITIME')) * int(self.get('COADDONE'))\n return exptime", "def seconds_from_last_update(self):\n return (datetime.utcnow() - self.last_update_datetime).total_seconds()", "def cpu_time(self):", "def getTime(self):\n return self.console.time()", "def _get_usr_ping_count(self):\n return self.__usr_ping_count", "def elapsed(self):\n done, data1 = self._request('GS')\n if done:\n if data1[0] != '3':\n raise NotCharging\n done, data2 = self._request('GU')\n if done:\n return {\n 'seconds': int(data1[1]),\n 'Wh': float(data2[0])/3600\n }\n raise EvseError", "def runtime(self):\n return (self.time - self.start).total_seconds()", "def get_time(self):\n\t\treturn time.time()", "def secondsPassed(self)->int:\n return self._lic.params['sessionTimeUsed'].value" ]
[ "0.76271576", "0.74804974", "0.7351499", "0.7336892", "0.7335807", "0.7160131", "0.6963318", "0.69100386", "0.68876153", "0.6848875", "0.67855483", "0.67283034", "0.67062175", "0.6680035", "0.6660014", "0.66218805", "0.66216844", "0.6590366", "0.6590068", "0.6582871", "0.65694773", "0.65612644", "0.65596247", "0.6553396", "0.6550697", "0.6547063", "0.651967", "0.65087134", "0.648552", "0.64714414", "0.64706546", "0.64691675", "0.6458358", "0.6449079", "0.6447029", "0.6437657", "0.6435378", "0.6435378", "0.6435378", "0.6435378", "0.6435378", "0.6435378", "0.6424437", "0.6424126", "0.641885", "0.6416519", "0.64120686", "0.6411066", "0.64045525", "0.64042926", "0.6396094", "0.6378269", "0.63769794", "0.636576", "0.6348658", "0.63435817", "0.6337085", "0.6333727", "0.63076603", "0.6302311", "0.6296694", "0.6283303", "0.6280029", "0.6279905", "0.6267585", "0.62458056", "0.6244931", "0.62120116", "0.6209983", "0.6203991", "0.62022126", "0.6188171", "0.61819476", "0.6180704", "0.61806893", "0.6178287", "0.6176645", "0.61532104", "0.6152288", "0.61516434", "0.6138835", "0.6131436", "0.6122088", "0.6121494", "0.61204046", "0.6113517", "0.6103391", "0.6098941", "0.60889935", "0.608695", "0.6078164", "0.60751116", "0.6068033", "0.6066745", "0.60629064", "0.6060663", "0.60584325", "0.6053902", "0.6052905", "0.60502046" ]
0.74130756
2
Gathers the metrics from the stat file.
def gather_sample(self, stat_file, collector=None): if not collector: collector = {} # The file format is just a single line of all the fields. line = stat_file.readlines()[0] # Chop off first part which is the pid and executable file. The # executable file is terminated with a paren so just search for that. line = line[(line.find(") ") + 2) :] fields = line.split() # Then the fields we want are just at fixed field positions in the # string. Just grab them. # See http://man7.org/linux/man-pages/man5/proc.5.html for reference on field numbers # Keep in mind that we chop first 3 values away (pid, command line, state), so you need to # subtract 3 from the field numbers from the man page (e.g. on the man page nice is number # 19, but in our case it's 16 aka 19 - 3) process_uptime = self.__get_uptime_ms() - self.calculate_time_ms( int(fields[19]) ) collector.update( { Metric("app.cpu", "user"): self.__calculate_time_cs(int(fields[11])), Metric("app.cpu", "system"): self.__calculate_time_cs(int(fields[12])), Metric("app.uptime", None): process_uptime, Metric("app.nice", None): float(fields[16]), Metric("app.threads", None): int(fields[17]), Metric("app.mem.majflt", None): int(fields[9]), Metric("app.io.wait", None): int(fields[39]) if len(fields) >= 39 else 0, } ) return collector
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gather_sample(self, stat_file, collector=None):\n\n if not collector:\n collector = {}\n\n for line in stat_file:\n # Each line has a format of:\n # Tag: Value\n #\n # We parse out all lines looking like that and match the stats we care about.\n m = re.search(r\"^(\\w+):\\s*(\\d+)\", line)\n if m is None:\n continue\n\n field_name = m.group(1)\n int_value = int(m.group(2))\n # FDSize is not the same as the number of open file descriptors. Disable\n # for now.\n # if field_name == \"FDSize\":\n # self.print_sample(\"app.fd\", int_value)\n if field_name == \"VmSize\":\n collector.update({Metric(\"app.mem.bytes\", \"vmsize\"): int_value * 1024})\n elif field_name == \"VmPeak\":\n collector.update(\n {Metric(\"app.mem.bytes\", \"peak_vmsize\"): int_value * 1024}\n )\n elif field_name == \"VmRSS\":\n collector.update(\n {Metric(\"app.mem.bytes\", \"resident\"): int_value * 1024}\n )\n elif field_name == \"VmHWM\":\n collector.update(\n {Metric(\"app.mem.bytes\", \"peak_resident\"): int_value * 1024}\n )\n return collector", "def gather_sample(self, stat_file, collector=None):\n\n if not collector:\n collector = {}\n\n # File format is single value per line with \"fieldname:\" prefix.\n for x in stat_file:\n fields = x.split()\n if len(fields) == 0:\n continue\n if not collector:\n collector = {}\n if fields[0] == \"rchar:\":\n collector.update({Metric(\"app.disk.bytes\", \"read\"): int(fields[1])})\n elif fields[0] == \"syscr:\":\n collector.update({Metric(\"app.disk.requests\", \"read\"): int(fields[1])})\n elif fields[0] == \"wchar:\":\n collector.update({Metric(\"app.disk.bytes\", \"write\"): int(fields[1])})\n elif fields[0] == \"syscw:\":\n collector.update({Metric(\"app.disk.requests\", \"write\"): int(fields[1])})\n return collector", "def setup_metrics_file(self):\n\n with open(self.metrics_path, \"w+\") as f_metrics:\n\n f_metrics.write(get_metrics_file_form())", "def gather_sample(self, stat_file, collector=None):\n\n if not collector:\n collector = {}\n\n for line in stat_file:\n # We just look for the different \"inuse\" lines and output their\n # socket type along with the count.\n m = re.search(r\"(\\w+): inuse (\\d+)\", line)\n if m is not None:\n collector.update(\n {\n Metric(\"app.net.sockets_in_use\", m.group(1).lower()): int(\n m.group(2)\n )\n }\n )\n return collector", "def emit_metrics(self):\n parse_time = time.perf_counter() - self._parsing_start_time\n Stats.gauge(\"dag_processing.total_parse_time\", parse_time)\n Stats.gauge(\"dagbag_size\", sum(stat.num_dags for stat in self._file_stats.values()))\n Stats.gauge(\n \"dag_processing.import_errors\", sum(stat.import_errors for stat in self._file_stats.values())\n )", "def collect_stat(self):\n\n cnstat_dict, ratestat_dict = self.get_cnstat()\n self.cnstat_dict.update(cnstat_dict)\n self.ratestat_dict.update(ratestat_dict)", "def stats(filename):\n from .utils import stats as print_stats\n click.echo('Starting to gather statistics on file {}'.format(filename))\n print_stats(filename)\n click.echo('Statistics printing finished')", "def add_stats(self):\n units = self.get_unit_map()\n for metric in self.raw_metrics:\n unit, metric_type = units.get(metric, (DEFAULT_UNIT, DEFAULT_TYPE))\n if metric_type == \"counter\":\n # Unit/Second\n unit = \"/\".join((unit, \"Second\"))\n self.add_derive_value(metric, unit, self.raw_metrics[metric], rate=True)\n else:\n self.add_gauge_value(metric, unit, self.raw_metrics[metric])", "def gather_sample(self, stat_file, collector=None):\n\n # This file format is weird. Each set of stats is outputted in two\n # lines. First, a header line that list the field names. Then a\n # a value line where each value is specified in the appropriate column.\n # You have to match the column name from the header line to determine\n # what that column's value is. Also, each pair of lines is prefixed\n # with the same name to make it clear they are tied together.\n all_lines = stat_file.readlines()\n # We will create an array of all of the column names in field_names\n # and all of the corresponding values in field_values.\n field_names = []\n field_values = []\n\n # To simplify the stats, we add together the two forms of retransmit\n # I could find in the netstats. Those to fast retransmit Reno and those\n # to selective Ack.\n retransmits = 0\n found_retransmit_metric = False\n\n # Read over lines, looking at adjacent lines. If their row names match,\n # then append their column names and values to field_names\n # and field_values. This will break if the two rows are not adjacent\n # but I do not think that happens in practice. If it does, we just\n # won't report the stats.\n for i in range(0, len(all_lines) - 1):\n names_split = all_lines[i].split()\n values_split = all_lines[i + 1].split()\n # Check the row names are the same.\n if names_split[0] == values_split[0] and len(names_split) == len(\n values_split\n ):\n field_names.extend(names_split)\n field_values.extend(values_split)\n\n if not collector:\n collector = {}\n\n # Now go back and look for the actual stats we care about.\n for i in range(0, len(field_names)):\n if field_names[i] == \"InOctets\":\n collector.update({Metric(\"app.net.bytes\", \"in\"): field_values[i]})\n elif field_names[i] == \"OutOctets\":\n collector.update({Metric(\"app.net.bytes\", \"out\"): field_values[i]})\n elif field_names[i] == \"TCPRenoRecovery\":\n retransmits += int(field_values[i])\n found_retransmit_metric = True\n elif field_names[i] == \"TCPSackRecovery\":\n retransmits += int(field_values[i])\n found_retransmit_metric = True\n\n # If we found both forms of retransmit, add them up.\n if found_retransmit_metric:\n collector.update({Metric(\"app.net.tcp_retransmits\", None): retransmits})\n return collector", "def collect(self):\n if not self._btime:\n return\n\n try:\n with open(\"/proc/self/stat\", 'rb') as stat:\n parts = (stat.read().split(b')')[-1].split())\n\n self.process_metrics[\"vmem\"].set(\"\", float(parts[20]))\n self.process_metrics[\"rss\"].set(\"\", float(parts[21]) * _PAGESIZE)\n start_time_secs = float(parts[19]) / self._ticks\n self.process_metrics[\"start_time\"].set(\n \"\", start_time_secs + self._btime\n )\n utime = float(parts[11]) / self._ticks\n stime = float(parts[12]) / self._ticks\n self.process_metrics[\"cpu\"].set(\"\", utime + stime)\n except IOError:\n pass\n\n try:\n with open(\"/proc/self/limits\", 'rb') as limits:\n for line in limits:\n if line.startswith(b'Max open file'):\n self.process_metrics[\"max_fds\"].set(\n \"\", float(line.split()[3])\n )\n break\n\n self.process_metrics[\"open_fds\"].set(\n \"\", float(len(os.listdir(\"/proc/self/fd\")))\n )\n except (IOError, OSError):\n pass\n\n # Update gc metrics if enabled.\n if \"collected\" in self.process_metrics:\n for generation, stat in enumerate(gc.get_stats()):\n generation = {\"generation\": str(generation)}\n self.process_metrics[\"collected\"].set(\n generation, stat[\"collected\"]\n )\n self.process_metrics[\"uncollectable\"].set(\n generation, stat[\"uncollectable\"]\n )\n self.process_metrics[\"collections\"].set(\n generation, stat[\"collections\"]\n )", "def readStatFile(filePath):\n f = open(filePath, 'r')\n\n allStats = {}\n overviewStats = {}\n category = ''\n folder = ''\n method = ''\n\n for line in f:\n # Check if the line contains the 'cm' character and thus provides information of the specific folder\n if 'cm' in line:\n words = line.split()\n\n for word in words:\n if '/' in word:\n # All processed folder has either a /MoG or /SubSENSE folder. Exploit this to get the filename\n category = os.path.basename(os.path.dirname(os.path.dirname(os.path.normpath(filePath))))\n folder = os.path.basename(os.path.dirname(os.path.normpath(filePath)))\n method = word\n\n # Get the raw FP, TN, etc. count\n folderNumbers = {'TP': words[4], 'FP': words[5], 'FN': words[6], 'TN': words[7],\n 'ErrorShadow': words[8]}\n overviewStats[method] = folderNumbers\n\n\n # CHeck if line is not empty, does not contain certain characters, and that the folder has been found\n if '#' not in line and 'cm' not in line and line and folder and '\\n' != line and method:\n measures = line.split()\n\n isRealMeasure = True\n\n for measure in measures:\n if not RepresentsFloat(measure):\n isRealMeasure = False\n break\n\n\n if len(measures) == 7 and isRealMeasure:\n folderStats = {'recall': measures[0], 'specificity': measures[1], 'FPR': measures[2], 'FNR': measures[3], \n 'PBC': measures[4], 'precision': measures[5], 'f-measure': measures[6]}\n allStats[method] = folderStats\n\n method = ''\n\n return allStats, overviewStats", "def file_stat(self, file_path):", "def update_stat_file(self):\n logfile = \"../data/{}_stat.json\".format(self.ID)\n statobj = {\n 'hp': self.hp,\n 'max_hp': MAX_TANK_HP,\n 'ammo': self.ammo,\n 'score': self.score,\n 'age': self.age,\n 'alive': not self.is_dead(),\n 'color': TANK_COLORS[self.color],\n }\n if USE_SIMULATOR:\n js.globals.handle_stat(self.ID, json.dumps(statobj))\n else:\n with open(logfile, 'w') as f:\n f.write(json.dumps(statobj))", "def process_stat_files(param):\n\n #get the files that are actually in the output directory\n call = ['cp', '-R']\n call.append(param['working_dir']+'results/featureCount/')\n call.append(param['working_dir']+'report/')\n _, _ = subprocess.Popen(call,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE).communicate()\n\n featurecount_file = (param['working_dir']+\n 'results/featureCount/featureCount_stats.txt')\n #extract table\n table = []\n filehandle = open(featurecount_file)\n #header\n table.append(filehandle.readlines()[0].rstrip().split('\\t'))\n table[0] = table[0][1:]\n filehandle.close()\n\n #total number of aligned reads\n tot_reads = param['bam_qc']['unique_aligned_reads']\n counter = [0] * len(param['bam_qc']['unique_aligned_reads'])\n \n filehandle = open(featurecount_file)\n for line in filehandle.readlines()[1:]:\n cur_line = line.rstrip().split('\\t')\n cur_line[0] = re.sub(r'_',' ',cur_line[0])\n if cur_line[0] not in ['Unassigned MultiMapping','Assigned']:\n counter = [ct + int(cr) for ct, cr in zip(counter, cur_line[1:])]\n perc = ([cur_line[0]]+\n MODULE_HELPER.get_percentage(cur_line[1:],\n tot_reads,\n len(cur_line)-1))\n table.append(perc)\n filehandle.close()\n assigned = [tot_reads[idx] - counter[idx] for idx in range(len(tot_reads))]\n perc = ['Assigned'] + MODULE_HELPER.get_percentage(assigned,\n tot_reads,\n len(counter))\n return table", "def MainStats(path, filetype, NrExp, col, start, stop):\n# path= path.split('/') # here is better to google and see what is going on. Or experiment alone\n# path= \"/\".join(path[:-1]) \n dato=ExtractData_raw_files(path, filetype)\n dBase=dato.createDictBase()\n stats = Stats(dBase, NrExp, col, start, stop)\n means, stds=stats.Means_Stds()\n times = stats.time_return()\n return means , stds, times", "def __collect_stats(self, encode, file_name):\n if encode not in self.__hash.keys():\n self.__hash[encode] = []\n self.__hash[encode].append(file_name)\n self.__files_count += 1\n with open(file_name, 'r', encoding=encode) as fr:\n for line in fr:\n self.__lines += 1\n self.__chars += len(line)", "def read_metrics(self):\n raise NotImplementedError()", "def report_meta_metrics(stat_path):\n collectd_stats = get_self_stats(stat_path)\n backend_stats = read_vsys_data('backend_stats', _VSYS_FRONTEND_VERSION)\n submit_meta('collectd', collectd_stats)\n submit_meta('backend', backend_stats)", "def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n used_space = total_space - free_space\n gb_used = used_space / 1024 / 1024 / 1024\n\n # log!(format!(\"Collecting metric gb-used {}\", gb_used), Info)\n add_metric(\"gb-used\", \"{}\".format(gb_used))", "def _summary(in_file):\n data = Counter()\n out_file = in_file + \"_size_stats\"\n if file_exists(out_file):\n return out_file\n with open(in_file) as in_handle:\n for line in in_handle:\n counts = int(line.strip().split(\"_x\")[1])\n line = in_handle.next()\n l = len(line.strip())\n in_handle.next()\n in_handle.next()\n data[l] += counts\n with file_transaction(out_file) as tx_out_file:\n with open(tx_out_file, 'w') as out_handle:\n for l, c in data.items():\n out_handle.write(\"%s %s\\n\" % (l, c))\n return out_file", "def stats(self):\n pass", "def stats(self, file, **options):\n\n options['file'] = file\n\n return self._get('stats', **options)", "def stats(self):", "def dataStats(self):\n print (\"Performing statistical analysis of the data\")\n # stuff to do", "def stat(**kwargs):\n print(\"output stats\")", "def print_file_stats(self):\n\n # current epoch time, file number, filename, filesize, trans secs, status\n print(f\"TRANS_STATS_FILE: {time.time()} {self.batchvals['numfiles']} {self.filevals['filename']} {self.filevals['numbytes']} {self.filevals['end_time'] - self.filevals['start_time']} {self.filevals['status']}\")", "def stat_file(self, path, info):\n return {}", "def load_stats():\n assert isinstance(settings.PARS['numBases'], int)\n assert isinstance(settings.PARS['dataset'], str)\n\n stat_filename = 'stat_{}_{}.json'.format(\n settings.PARS['numBases'], settings.PARS['dataset'])\n stat_full_path = os.path.join(settings.GENERATED_DATA_DIRECTORY, stat_filename)\n\n with open(stat_full_path, 'r') as file_:\n fobj_avg = json.load(file_)\n\n fobj_avg = {int(k): v for k, v in fobj_avg.items()}\n\n return fobj_avg", "def collect_stats(self, cursor):\n metrics = self.config.get('metrics', DEFAULT_METRICS)\n if isinstance(metrics, str):\n if metrics == \"all\":\n # puffer_pool_status is only for 5.5, so we ignore that by default\n metrics = CATEGORIES.keys()\n metrics.remove('buffer_pool_stats')\n else:\n # support comma-separated list\n metrics = re.split(\"\\s*,\\s*\", metrics)\n\n self.logger.debug(\"metrics to collect: %s\" % \", \".join(metrics))\n for cat in metrics:\n if cat in CATEGORIES:\n self.add_category_stats(cat, cursor)\n else:\n self.logger.warning(\"%s is not a valid metric category\" % cat)\n\n if 'newrelic' in metrics:\n self.derive_newrelic_stats()", "def stat (self, path):\r\n pass", "def usagestats_parse(dirpath):\r\n # Create database\r\n # TODO: change to an easier format, probably json.\r\n db, cursor = create_table()\r\n\r\n # Some vars for logging\r\n processed = 0\r\n err = 0\r\n\r\n # Iterate through the /usagestats/ directory and fetch all files\r\n for root, dirnames, filenames in os.walk(dirpath, topdown=True, onerror=None, followlinks=False):\r\n if 'daily' in root or 'weekly' in root or 'monthly' in root or 'yearly' in root:\r\n # Retrieve the folder name to save what the frequency of the usagestats were:\r\n frequency = root.split('/')[-1]\r\n for filename in filenames:\r\n # Check if filename is only numbers (which is an epoch time representation)\r\n if filename.isnumeric():\r\n try:\r\n tree = ET.parse(os.path.join(root, filename))\r\n except ET.ParseError:\r\n parse_file_with_protobuf(os.path.join(root, filename), db)\r\n continue\r\n\r\n # We have sucessfully parsed the usagestats xml.\r\n # So continue processing\r\n tree_root = tree.getroot()\r\n\r\n for elem in tree_root:\r\n parse_sub_elements(frequency, elem, filename, db)\r\n\r\n # query for reporting\r\n cursor.execute('''\r\n select \r\n usage_type,\r\n datetime(lastime/1000, 'UNIXEPOCH', 'localtime') as lasttimeactive,\r\n timeactive as time_Active_in_msecs,\r\n timeactive/1000 as timeactive_in_secs,\r\n case last_time_service_used WHEN '' THEN ''\r\n ELSE datetime(last_time_service_used/1000, 'UNIXEPOCH', 'localtime')\r\n end last_time_service_used,\r\n case last_time_visible WHEN '' THEN ''\r\n ELSE datetime(last_time_visible/1000, 'UNIXEPOCH', 'localtime') \r\n end last_time_visible,\r\n total_time_visible,\r\n app_launch_count,\r\n package,\r\n CASE types\r\n WHEN '1' THEN 'MOVE_TO_FOREGROUND'\r\n WHEN '2' THEN 'MOVE_TO_BACKGROUND'\r\n WHEN '5' THEN 'CONFIGURATION_CHANGE'\r\n WHEN '7' THEN 'USER_INTERACTION'\r\n WHEN '8' THEN 'SHORTCUT_INVOCATION'\r\n ELSE types\r\n END types,\r\n classs,\r\n source,\r\n fullatt\r\n from data\r\n order by lasttimeactive DESC\r\n ''')\r\n all_rows = cursor.fetchall()\r\n\r\n # HTML report section\r\n h = open('./Report.html', 'w')\r\n h.write('<html><body>')\r\n h.write('<h2>Android Usagestats report (Dates are localtime!)</h2>')\r\n h.write('<style> table, th, td {border: 1px solid black; border-collapse: collapse;}</style>')\r\n h.write('<br />')\r\n\r\n # HTML headers\r\n h.write('<table>')\r\n h.write('<tr>')\r\n h.write('<th>Usage Type</th>')\r\n h.write('<th>Last Time Active</th>')\r\n h.write('<th>Time Active in Msecs</th>')\r\n h.write('<th>Time Active in Secs</th>')\r\n h.write('<th>Last Time Service Used</th>')\r\n h.write('<th>Last Time Visible</th>')\r\n h.write('<th>Total Time Visible</th>')\r\n h.write('<th>App Launch Count</th>')\r\n h.write('<th>Package</th>')\r\n h.write('<th>Types</th>')\r\n h.write('<th>Class</th>')\r\n h.write('<th>Source</th>')\r\n h.write('</tr>')\r\n\r\n for row in all_rows:\r\n usage_type = row[0]\r\n lasttimeactive = row[1]\r\n time_Active_in_msecs = row[2]\r\n timeactive_in_secs = row[3]\r\n last_time_service_used = row[4]\r\n last_time_visible = row[5]\r\n total_time_visible = row[6]\r\n app_launch_count = row[7]\r\n package = row[8]\r\n types = row[9]\r\n classs = row[10]\r\n source = row[11]\r\n\r\n processed = processed + 1\r\n # report data\r\n h.write('<tr>')\r\n h.write('<td>' + str(usage_type) + '</td>')\r\n h.write('<td>' + str(lasttimeactive) + '</td>')\r\n h.write('<td>' + str(time_Active_in_msecs) + '</td>')\r\n h.write('<td>' + str(timeactive_in_secs) + '</td>')\r\n h.write('<td>' + str(last_time_service_used) + '</td>')\r\n h.write('<td>' + str(last_time_visible) + '</td>')\r\n h.write('<td>' + str(total_time_visible) + '</td>')\r\n h.write('<td>' + str(app_launch_count) + '</td>')\r\n h.write('<td>' + str(package) + '</td>')\r\n h.write('<td>' + str(types) + '</td>')\r\n h.write('<td>' + str(classs) + '</td>')\r\n h.write('<td>' + str(source) + '</td>')\r\n h.write('</tr>')\r\n\r\n # HTML footer\r\n h.write('<table>')\r\n h.write('<br />')\r\n\r\n print('')\r\n print('Records processed: ' + str(processed))\r\n print('Triage report completed. See Reports.html.')", "def get_all_stats(self) -> Dict[str, Any]:\n return self.http.get(self.config.paths.stat)", "def compute_metrics(self):\n pass", "def sstat(self):\n coh = self.cohorts[0]\n nsample = count_lines(wtccc2_sample_file(coh, opts.platform)) - 2 \n nfac = count_lines(opts.factor_file)\n if nsample != nfac:\n raise Exception('Number of individuals in sample file (%d) does not match number if factor file (%d)' % (\n (nsample, nfac)))\n for chrom in opts.chroms:\n system('gunzip -c %s | sstat -n %d -p -f %s > %s-%02d.sstat' % (\n gen_gz_file(coh, chrom, opts.platform), nsample, opts.factor_file, coh, chrom),\n verbose=True)", "def update_status(self):\n\n # Memory information can be found in status and statm /proc/PID files\n # status file VmRSS equivalent to top's RES column\n # statm disagrees with status VmRSS, I think it may not include\n # sub-processes\n # From: man proc\n # * VmPeak: Peak virtual memory size.\n # * VmSize: Virtual memory size.\n # * VmHWM: Peak resident set size (\"high water mark\").\n # * VmRSS: Resident set size.\n\n # status_fields should be ordered as in the status file\n fields = iter(self.status_fields)\n field = next(fields)\n with open(self.status_path) as f:\n for line in f:\n if line.startswith(field):\n # separated by white-space, 2nd element is value\n # 3rd is units e.g. kB\n # At the moment all fields are ints\n self.status[field] = int(line.split()[1])\n\n try:\n field = next(fields)\n except StopIteration:\n # Just found the last field in status_fields\n break", "def get_stats(self):\n # pool.map needs an arg for each function that will be run\n dmx_mean = [self.dmx.mean()] * len(self.genome_paths)\n with ProcessingPool() as pool:\n results = pool.map(genome.mp_stats, self.genome_paths, dmx_mean)\n self.stats = pd.concat(results)\n self.stats.to_csv(self.stats_path)", "def _print_stat(self):\n if 0 < self.print_stats_interval < time.monotonic() - self.last_stat_print_time:\n if self._file_paths:\n self._log_file_processing_stats(self._file_paths)\n self.last_stat_print_time = time.monotonic()", "def compute_statistics(self):", "def do_stat (self, line) :\n\t\tf = line.strip()\n\n\t\tif not f :\n\t\t\ttarget = self.__wd\n\n\t\telse :\n\t\t\tif self.exists(f) :\n\t\t\t\ttarget = self.__wd['content'][f]\n\t\t\telse :\n\t\t\t\treturn\n\n\t\tfor k in target.keys() :\n\t\t\tif k != 'content' :\n\t\t\t\tprint \"\t%s : %s\" % ( k, target[k] )", "def qc_metrics(self, files_in, qc_files):\n self.cmd(\"{samtools} index {bam_in}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in=files_in[0],\n ),\n shell=True)\n self.cmd(\"{samtools} idxstats {bam_in} | tee {qc_file}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in = files_in[0],\n qc_file = qc_files[0],\n ),\n shell=True,\n log_output=True)\n self.cmd(\"{samtools} flagstat {bam_in} | tee {qc_file}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in = files_in[0],\n qc_file = qc_files[1],\n ),\n shell=True,\n log_output=True)\n \n self.checkpoint(qc_files[0])\n self.checkpoint(qc_files[1])\n self.checkpoint(qc_files[2])", "def get_self_stats(stat_path):\n index_utime = 13\n index_stime = 14\n index_cutime = 15\n index_cstime = 16\n index_vsize = 22\n index_rss = 23\n self_stats = {'utime': 0, 'stime': 0, 'vsize': 0, 'rss': 0}\n\n if not os.path.exists(stat_path):\n collectd.error('mlab: get_self_stats stat path does not exist: %s' %\n stat_path)\n return {}\n\n with open(stat_path, 'r') as stat_file:\n stat_fields = stat_file.read().strip().split()\n\n if len(stat_fields) < 24:\n collectd.error('mlab: get_self_stats found only %s fields.' %\n len(stat_fields))\n return {}\n\n self_stats['utime'] = (\n float(stat_fields[index_utime]) + float(stat_fields[index_cutime]))\n self_stats['stime'] = (\n float(stat_fields[index_stime]) + float(stat_fields[index_cstime]))\n self_stats['vsize'] = int(stat_fields[index_vsize])\n self_stats['rss'] = int(stat_fields[index_rss]) * _PAGESIZE\n return self_stats", "def possible_metrics(filename):\n\n metrics = {}\n\n raw_data = parse_config(filename)\n\n for graph in raw_data:\n for metric in graph['metrics']:\n metrics.update({metric['label']: [(metric['x_stream'], metric['y_stream'], metric['z_stream']), metric['func']]})\n\n return metrics", "def stat(self, handle):\n raise NotImplementedError", "def collect_statistics(self, stat_col, data_streams):\n self.module.collect_statistics(stat_col, data_streams)", "def _get_stats(self):\n self.stats = set()\n self._bstats = set()\n self._h_bstats = set()\n self._tstats = set()\n self._ftstats = set()\n for cl in self.data_classes:\n for stat in cl._bstats:\n self.stats.add(stat)\n self._bstats.add(stat)\n for stat in cl._hbstats:\n self.stats.add(stat)\n self._h_bstats.add(stat)\n for stat in cl._tstats:\n self._tstats.add(stat)\n self.stats.add(stat)\n try:\n trips = cl.triples\n f_stats = cl.read_tfstats(trips,eq=False,lande=False)\n for trip in f_stats:\n for stat in f_stats[trip]:\n self._ftstats.add(stat)\n self.stats.add(stat)\n except:\n AttributeError", "def _read_stats(self, name):\n if os.name == 'nt':\n name = asunicode(name)\n stats = os.stat(name)\n mode = oct(stats.st_mode)[-4:]\n size = stats.st_size\n atime = int(stats.st_atime)\n mtime = int(stats.st_mtime)\n return (mode, size, mtime, atime)", "def cmd_stat(args):", "def run_main():\n # Matching lines against a matcher function.\n matched_lines = match_file(file_names, matcher)\n\n # Will contain data sorted by file.\n binned_data = {}\n\n # Looking through the lines that were inserted into the metrics file via the metrics component.\n for key in matched_lines:\n\n # Grabbing matched lines by the file or orgination.\n buffer = matched_lines[key]\n\n # This will contain dictionaries converted from JSON.\n data = []\n\n # Loop through the collection, appending data converted from JSON entries.\n for line in buffer:\n data.append(extract_data(line))\n\n # Sort the data by file.\n binned_data[key] = sort_data(data)\n\n # Output the final results.\n generate_statistics(binned_data)\n return 0", "def get_stats():\n logger.info(\"Retrieving stats\")\n # create datetime iso format zero hour offset\n current_datetime = datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n # if filename doesn't exist\n if not path.exists(filename):\n return \"Statistics do not exist\", 404\n\n # get current stats\n with open(filename, 'r') as f:\n currentstats = json.loads(f.read())\n\n # return json\n stats_obj = {}\n stats_obj[\"num_users\"] = currentstats[\"num_users\"]\n stats_obj[\"num_facts\"] = currentstats[\"num_facts\"]\n stats_obj[\"most_popular_tag\"] = currentstats[\"most_popular_tag\"]\n # stats_obj[\"avg_jokes_added_weekly\"] = currentstats[\"avg_jokes_added_weekly\"]\n stats_obj[\"num_subscribed_users\"] = currentstats[\"num_subscribed_users\"]\n stats_obj[\"datetime\"] = current_datetime\n\n logger.debug(stats_obj)\n logger.info(\"Returning stats\")\n return stats_obj, 200", "def collectStat(self, thread):\n\t\t# update average page load time\n\t\tif self.updated_count == 0:\n\t\t\tself.average_time = thread.load_time\n\t\telse:\n\t\t\tself.average_time = (self.average_time * self.updated_count + thread.load_time) / (self.updated_count + 1)\n\t\t# update stitistics by HTTP code\n\t\tif thread.code not in self.code_statistics:\n\t\t\tself.code_statistics[thread.code] = 1 \n\t\telse:\n\t\t\tself.code_statistics[thread.code] += 1\n\t\t# update count of processed pages\n\t\tself.updated_count += 1", "def _sync_metadata(self, stat):\n self._done = stat.done\n self._all_files_processed = stat.all_files_processed\n self._last_parsing_stat_received_at = time.monotonic()", "def get_stat(self):\n self.filestat = StatTuple()\n return self.filestat", "def _log_file_processing_stats(self, known_file_paths):\n # File Path: Path to the file containing the DAG definition\n # PID: PID associated with the process that's processing the file. May\n # be empty.\n # Runtime: If the process is currently running, how long it's been\n # running for in seconds.\n # Last Runtime: If the process ran before, how long did it take to\n # finish in seconds\n # Last Run: When the file finished processing in the previous run.\n headers = [\"File Path\", \"PID\", \"Runtime\", \"# DAGs\", \"# Errors\", \"Last Runtime\", \"Last Run\"]\n\n rows = []\n now = timezone.utcnow()\n for file_path in known_file_paths:\n last_runtime = self.get_last_runtime(file_path)\n num_dags = self.get_last_dag_count(file_path)\n num_errors = self.get_last_error_count(file_path)\n file_name = os.path.basename(file_path)\n file_name = os.path.splitext(file_name)[0].replace(os.sep, \".\")\n\n processor_pid = self.get_pid(file_path)\n processor_start_time = self.get_start_time(file_path)\n runtime = (now - processor_start_time) if processor_start_time else None\n last_run = self.get_last_finish_time(file_path)\n if last_run:\n seconds_ago = (now - last_run).total_seconds()\n Stats.gauge(f\"dag_processing.last_run.seconds_ago.{file_name}\", seconds_ago)\n\n rows.append((file_path, processor_pid, runtime, num_dags, num_errors, last_runtime, last_run))\n\n # Sort by longest last runtime. (Can't sort None values in python3)\n rows.sort(key=lambda x: x[3] or 0.0)\n\n formatted_rows = []\n for file_path, pid, runtime, num_dags, num_errors, last_runtime, last_run in rows:\n formatted_rows.append(\n (\n file_path,\n pid,\n f\"{runtime.total_seconds():.2f}s\" if runtime else None,\n num_dags,\n num_errors,\n f\"{last_runtime:.2f}s\" if last_runtime else None,\n last_run.strftime(\"%Y-%m-%dT%H:%M:%S\") if last_run else None,\n )\n )\n log_str = (\n \"\\n\"\n + \"=\" * 80\n + \"\\n\"\n + \"DAG File Processing Stats\\n\\n\"\n + tabulate(formatted_rows, headers=headers)\n + \"\\n\"\n + \"=\" * 80\n )\n\n self.log.info(log_str)", "def __init__(self, stats_file):\n stats = dict()\n self._stats = dict()\n\n for line in stats_file:\n stat = next((regex.match(line).groupdict()\n for regex in FUZZER_STATS_RES if regex.match(line)),\n dict())\n stats.update(stat)\n\n if not stats:\n raise Exception('Empty fuzzer_stats file `%s`' % stats_file.name)\n\n # Automatically create class attributes based on the fuzzer_stats fields\n for k, v in stats.items():\n if k == 'command_line':\n afl_opts = None\n target_args = None\n getopt_error = None\n\n for afl_getopt in AFL_GETOPTS:\n try:\n afl_opts, target_args = getopt(v.split(), afl_getopt)\n break\n except GetoptError as e:\n getopt_error = e\n\n if not afl_opts or not target_args:\n raise getopt_error\n\n setattr(self, 'afl_cmdline', afl_opts)\n setattr(self, 'target_cmdline', target_args)\n else:\n # If convertable to a number, treat as a number\n try:\n v = float(v)\n except ValueError:\n pass\n\n setattr(self, k, v)\n self._stats[k] = v", "def _update_base_stats(self, base_stats):\n self.total_samples += base_stats[\"sample_size\"]\n self.sample = base_stats[\"sample\"]\n self._empty_line_count += base_stats[\"empty_line_count\"]\n self.memory_size += base_stats[\"memory_size\"]", "def compute_metrics(self, results: list) -> dict:\n dump(results, self.out_file_path)\n print_log(\n f'Results has been saved to {self.out_file_path}.',\n logger='current')\n return {}", "def process_file_metrics(root_dir, in_file_names, file_processors):\n manager = mp.Manager()\n file_metrics = manager.dict()\n\n parameters = [(root_dir, key, file_metrics, file_processors) for key in in_file_names]\n\n # main loop\n p = mp.Pool(max(1, mp.cpu_count() - 1))\n p.starmap(_process_file_metrics_parallel, parameters)\n p.close()\n p.join()\n\n return file_metrics", "def _analyzeFile(self, filename):\n date = os.path.basename(filename)[:10]\n if filename.endswith('gz'):\n f = gzip.open(filename)\n else:\n f = open(filename)\n lines = f.read().splitlines()\n for line in lines:\n if re.search('joined the game', line):\n self._analyzeLine(line, date, self._start_times)\n elif re.search('left the game', line) or re.search('lost connection',\n line):\n self._analyzeLine(line, date, self._end_times)\n elif re.search('Stopping server', line):\n self._server_stop_times.append(ConvertTime(date, line))", "def parseFile(self, file):\n return_dict = {}\n with open(file) as f:\n for line in f:\n line = line.strip()\n\n if line:\n if line.startswith('Left'):\n return_dict['Left'] = self.getStats(f)\n elif line.startswith('Right'):\n return_dict['Right'] = self.getStats(f)\n elif line.startswith('Aligned'):\n return_dict['Aligned'] = self.getStats(f, line)\n elif line.startswith('Reads'):\n return_dict['Reads'] = self.getStats(f)\n else:\n matched_summary = re.search('([\\d|.%]+)', line)\n return_dict['Overall'] = matched_summary.group(1)\n\n #return_dict['Summary'] = re.search('(\\d+\\.\\d+%)', line).group(1)\n\n return return_dict", "def stats_process():\n nonlocal d_stats, b_status\n log = slog()\n d_stats = self.stats_compute()\n if self.toConsole() or self.args['duf'] or self.args['du']:\n self.dp.qprint(d_stats['report'], level = self.debugLevel)\n slog_filter = filters_show()\n log.title_set('Size statistics')\n if self.args['table3D']: log.render3D()\n log('Total size (raw): %d\\n' % d_stats['totalSize'] )\n log('Total size (friendly): {:,}\\n'.format(d_stats['totalSize']) )\n log('Total size (human): %s\\n' % d_stats['totalSize_human'] )\n log('Total files: %s\\n' % d_stats['files'] )\n log('Total dirs: %s\\n' % d_stats['dirs'] )\n log('Total runtime: %5.3f s' % other.toc() )\n b_status = b_status and d_stats['status']\n return {\n 'status': b_status,\n 'filterLog': slog_filter,\n 'bodyLog': log\n }", "def gather_sample(self, my_file, collector=None):\n\n pass", "def statAggregator(baseFolder):\n\n allStats, overviewStats = getStatsFromSubfolder(baseFolder)\n\n pickle.dump(allStats, open('allStats.p', 'wb'))\n pickle.dump(overviewStats, open('overviewStats.p', 'wb'))\n\n allStats = pickle.load(open('allStats.p', 'rb'))\n overviewStats = pickle.load(open('overviewStats.p', 'rb'))\n\n\n writeStatTables(allStats, overviewStats)", "def report_cpuavg_for_system(stat_path):\n if not os.path.exists(stat_path):\n collectd.error('stat path does not exist: %s' % stat_path)\n return\n\n with open(stat_path, 'r') as stat_file:\n lines = [line for line in stat_file if line.startswith('cpu ')]\n if len(lines) == 1: # There can be only one [cpu avg].\n fields = lines[0].strip().split()\n if len(fields) >= 9:\n submit_cputotal('user', int(fields[1]))\n submit_cputotal('nice', int(fields[2]))\n submit_cputotal('system', int(fields[3]))\n submit_cputotal('idle', int(fields[4]))\n submit_cputotal('wait', int(fields[5]))\n submit_cputotal('interrupt', int(fields[6]))\n submit_cputotal('softirq', int(fields[7]))\n submit_cputotal('steal', int(fields[8]))\n else:\n collectd.warning('Found too few fields (%s) in stat file: %s' %\n (len(fields), stat_path))\n\n submit_cpucores()", "def stats(self):\n stats = {\n 'lines' : '', # This will count the lines under each split\n 'status_code': self.status_code,\n 'content_type': self.mime,\n 'hop': self.hop_path[-1:],\n 'sum:content_length': self.content_length,\n 'host': self.host(),\n 'source': self.source\n }\n # Add in annotations:\n for annot in self.annotations:\n # Set a prefix based on what it is:\n prefix = ''\n if self.re_tries.match(annot):\n prefix = 'tries:'\n elif self.re_ip.match(annot):\n prefix = \"ip:\"\n # Only emit lines with annotations:\n if annot != \"-\":\n stats[\"%s%s\" % (prefix, annot)] = \"\"\n return stats", "def do_stat(self, arg):\n\t\topts = get_options(parser.parser_stat, arg)\n\t\tif opts is None: return\n\t\tdisplay.print_stats(\n\t\t\t\tself.manager.provide_stats(limit = opts.limit))", "def calculate_statistics(kv, f):\n metadata = dict()\n f.seek(0, SEEK_END)\n b = f.tell()\n f.seek(0)\n metadata['FILE_SIZE_1000'], metadata['FILE_SIZE_1024'] = get_friendly_file_size(b)\n\n if gcode_key.TIME in kv:\n metadata['ETA_SECONDS'] = kv[gcode_key.TIME]\n\n # TODO Distance travelled\n key = 'DISTANCE'\n if key in kv:\n metadata[key] = kv[key]\n\n cubic_volume = get_cubic_volume(kv)\n if cubic_volume is not None:\n metadata['CUBIC_VOLUME'] = cubic_volume\n\n if gcode_key.LAYER_COUNT in kv:\n metadata['LAYER_COUNT'] = kv[gcode_key.LAYER_COUNT]\n\n if gcode_key.FILAMENT_USED in kv:\n metadata['FILAMENT_USED'] = kv[gcode_key.FILAMENT_USED][:-1] + ' m'\n\n return metadata", "def get_stats(self):\n return utils.csv_to_dict(wait(self.proto.stat()))", "def read_statistics(self):\n self.psdata=[]\n self.powerspectra=[]\n self.ds=[]\n self.dsigmasq=[]\n self.dsigma=[]\n self.bsdata=[]\n self.eqbispectra=[]\n self.fNLeq=[]\n\n for sub in range(self.Nsubs):\n self.psdata.append(np.load(self.datadir+self.filebase+\"_\"+str(sub)+\".npy\"))\n self.powerspectra.append(np.trim_zeros(self.psdata[-1][0][1:]))\n self.bsdata.append(np.load(self.datadir+self.fbbispec+\"_\"+str(sub)+\".npy\"))\n self.eqbispectra.append(self.bsdata[-1][0][1:len(self.powerspectra[-1])])\n\n self.ds.append(np.load(self.datadir+\"stat_\"+str(sub)+\".npy\")[0])\n self.dsigmasq.append(np.load(self.datadir+\"stat_\"+str(sub)+\".npy\")[1])\n self.dsigma = np.array([np.sqrt(dsq) for dsq in self.dsigmasq])\n\n self.klist=np.arange(1, len(self.powerspectra[-1]))*(2.*np.pi/self.Lsub)\n # subtract the mean ds\n self.ds = self.ds - np.mean(self.ds)\n self.fNLeq=np.mean(self.eqbispectra, axis=0)\n self.fNLeqsubs=np.mean(self.eqbispectra, axis=1)\n self.fNLeqds=[]\n for i in range(len(self.eqbispectra)):\n self.fNLeqds.append(np.array([self.ds[i]*self.eqbispectra[i][j] for j in range(45)]))", "def calculate_stats(file_data: dict) -> dict:\n specifics = {\n 'assignments': 0,\n 'grade': 0,\n 'graded': 0,\n 'discussion': 0\n }\n for course in file_data['semester_no_dup_crn']:\n x = course.split(DELIMITER)\n if int(x[ASSIGNMENTS]) > 0:\n specifics['assignments'] += 1\n if int(x[GRADE]) > 2:\n specifics['grade'] += 1\n if int(x[GRADED]) > 0:\n specifics['graded'] += 1\n if int(x[DISCUSSION]) > 0:\n specifics['discussion'] += 1\n return {'semester': file_data['semester'],\n 'courses_with_usage': len(file_data['semester_no_dup_crn']),\n 'faculty_with_usage': len(file_data['semester_no_dup_r']),\n 'full_time': len(file_data['full_time']),\n 'total_full_time': file_data['len_full'],\n 'part_time': len(file_data['part_time']),\n 'total_part_time': file_data['len_part'],\n 'staff': len(file_data['staff']),\n 'specifics': specifics,\n 'total_courses': file_data['total_courses']}", "def tally_stats(hdf5_file):\n Stat = namedtuple('Stat', ['cr_count',\n 'img_count',\n 'total_exptime'])\n\n with h5py.File(hdf5_file,mode='r') as f:\n instr = list(f.keys())[0]\n print(instr)\n grp = f['/{}/sizes'.format(instr)]\n num_images = 0\n num_cr = 0\n total_exptime = 0\n for key in grp.keys():\n dset = grp[key][...]\n attrs = grp[key].attrs\n # print(list(attrs.items()))\n num_cr += dset.shape[1]\n num_images += 1\n total_exptime += attrs['exptime']\n\n result = Stat(cr_count=num_cr,\n img_count=num_images,\n total_exptime=total_exptime)\n\n return instr, result", "def init_cur_stats(self):\n self._cur_stats = defaultdict(lambda: defaultdict(int))\n self._cur_stats[\"writes\"][\"/\"] = 0\n self._cur_stats[\"reads\"][\"/\"] = 0\n self._cur_stats[\"total\"][\"/writes\"] = 0\n self._cur_stats[\"total\"][\"/reads\"] = 0\n\n if self._include_bytes:\n self._cur_stats[\"writesBytes\"][\"/\"] = 0\n self._cur_stats[\"readsBytes\"][\"/\"] = 0\n self._cur_stats[\"total\"][\"/writeBytes\"] = 0\n self._cur_stats[\"total\"][\"/readBytes\"] = 0", "def _write_stats(self, stat_type, user=None, summ_type=None):\n if stat_type == \"full collection\":\n self.summary_file.write(\"\\n\\nDataset: {c}\\n\".format(c=self.dataset_name))\n self.summary_file.write(\"Number of unique urls: {u}\\nNumber of unique sites: {s}\\n\".format(u=len(set(self.stat_dict['urls'])), s=len(set(self.stat_dict['sites'])))\n )\n site_cnts = Counter(self.stat_dict['sites']).most_common()\n for site in site_cnts:\n self.summary_file.write(\"{s}: {n}\\n\".format(s=site[0], n=site[1]))\n\n if stat_type == \"token_counts\":\n self.summary_file.write(\"\\n\\nDataset: {c}\\n\".format(c=self.dataset_name))\n for doc_type in self.stat_dict:\n if user is not None:\n self.summary_file.write(\"\\n{0}, {1}\\n\".format(user, summ_type))\n\n self.summary_file.write(\n \"\\nNumber of {d}s: {p}\\nAverage tokens/{d}: {t}\\nAverage sentences/{d}: {s}\\n\".format(\n d=doc_type, p=len(self.stat_dict[doc_type][0]), t=sum(self.stat_dict[doc_type][1])/len(self.stat_dict[doc_type][1]), s=sum(self.stat_dict[doc_type][0])/len(self.stat_dict[doc_type][0])\n )\n )\n\n self.summary_file.write(\n \"Median tokens/{d}: {p}\\nStandard deviation tokens/{d}: {t}\\n\".format(\n d=doc_type, p=np.median(self.stat_dict[doc_type][1]), t=np.std(self.stat_dict[doc_type][1])\n )\n )\n\n self.summary_file.write(\n \"Median sentences/{d}: {p}\\nStandard deviation sentences/{d}: {t}\\n\".format(\n d=doc_type, p=np.median(self.stat_dict[doc_type][0]), t=np.std(self.stat_dict[doc_type][0])\n )\n )", "def analyze_files(self):\n for file in os.listdir(self.directory):\n if file[-3:] == (\".py\"):\n fopen = open(os.path.join(self.directory, file), \"r\")\n try:\n if not (py_file := fopen):\n raise FileNotFoundError\n\n with py_file: # close file after opening\n class_count: int = 0\n fun_count: int = 0\n l_count: int = 0\n ch_count: int = 0\n for line in py_file: # calculate values for the file\n if line.strip().startswith(\"class \"):\n class_count = class_count+1\n elif line.strip().startswith(\"def \"):\n fun_count = fun_count+1\n\n l_count = l_count+1\n ch_count = ch_count+len(line)\n\n self.files_summary[str(os.path.join(self.directory, file))] = {\"class\": class_count, \"function\": fun_count, \"line\": l_count,\n \"char\": ch_count}\n except FileNotFoundError:\n print(f\"File {py_file} is not found or can not be opened\")\n fopen.close()", "def _calc_stats(self):\n\n for res in self.rsts:\n _LOG.info(\"Calculate statistics for '%s'\", res.reportid)\n res.calc_stats(regexs=self._stats_colnames, funcnames=self._stats_funcs)", "def get_member_stats(self):\n self.mstats = {}\n # add in members from expanded_def (which includes any merges)\n for qid in self.expanded_def.keys():\n # check for trailing quantity specifier (!, *, +, ?). Not for name space.\n # ! - required (default), * - 0 or more, + - 1 or more, ? - 0 or 1\n id, qty = self.file.parse_qty(qid, \"!\")\n if id in self.mstats.keys():\n print \"** Error, duplicate (%s) id in group\" % id\n traceback.print_stack()\n sys.exit(1)\n type = 'group' if id.endswith('/') else 'dataset'\n self.mstats[id] = { 'ns': self.sdef['ns'], 'qty': qty,\n 'df': self.expanded_def[qid], 'created': [], 'type': type }\n # add in members from any includes\n # print \"** processing includes\"\n for qidq in self.includes:\n qid, qty = self.file.parse_qty(qidq, \"!\")\n # print \"processing include\", qid\n sdef = self.file.get_sdef(qid, self.sdef['ns'], \"Referenced in include\")\n # print \"obtained sdef:\"\n # pp.pprint(sdef)\n modifiers = self.includes[qidq]\n if len(modifiers) > 0:\n # need to incorporate modifications to definition of included child members\n df = copy.deepcopy(sdef['df'])\n # self.modify(df, modifiers)\n self.merge(df, modifiers) # merges modifiers into definition\n # print \"df after merging modifiers:\"\n else:\n df = sdef['df']\n # print \"df after copy:\"\n id = sdef['id']\n type = sdef['type']\n # pp.pprint(df)\n # qty = '!' # assume includes are required\n if id in self.mstats.keys():\n print \"** Error, duplicate (%s) id in group, referenced by include\" % id\n traceback.print_stack()\n sys.exit(1)\n self.mstats[id] = { 'ns': self.sdef['ns'], 'qty': qty,\n 'df': df, 'created': [], 'type': type }\n # print \"after processing all includes, mstats is:\"\n # pp.pprint(self.mstats)", "def parse(self):\n\n # if data has already been parsed, do nothing\n if self._data:\n return\n\n stats = {\n \"genres\": {},\n \"artists\": {},\n \"global_stats\": {\n \"songs\": 0,\n \"lines\": 0,\n \"words\": 0\n }\n }\n try:\n with open(self._filename) as file:\n objects = ijson.items(file, \"item\")\n\n # compute metrics\n for object in objects:\n\n lines = len(object[\"lyrics\"])\n words = sum([len(line.split()) for line in object[\"lyrics\"]])\n\n genre = object[\"genre\"]\n stats[\"genres\"][genre] = stats[\"genres\"].get(genre, {\"artists\": {}})\n genre_obj = stats[\"genres\"][genre]\n genre_obj[\"songs\"] = genre_obj.get(\"songs\", 0) + 1\n genre_obj[\"lines\"] = genre_obj.get(\"lines\", 0) + lines\n genre_obj[\"words\"] = genre_obj.get(\"words\", 0) + words\n genre_obj[\"is_music\"] = genre_obj.get(\"is_music\", 0)\n if object[\"is_music\"] != \"false\":\n genre_obj[\"is_music\"] += 1\n\n artist = object[\"artist\"]\n stats[\"artists\"][artist] = stats[\"artists\"].get(artist, 0) + 1\n stats[\"genres\"][genre][\"artists\"][artist] = stats[\"genres\"][genre][\"artists\"].get(artist, 0) + 1\n\n # update global stats\n stats[\"global_stats\"][\"songs\"] += 1\n stats[\"global_stats\"][\"lines\"] += lines\n stats[\"global_stats\"][\"words\"] += words\n\n # calculate averages for each genre\n for genre, genre_stats in stats[\"genres\"].items():\n genre_stats[\"avg_line_length\"] = genre_stats[\"words\"] / genre_stats[\"lines\"]\n genre_stats[\"avg_lines\"] = genre_stats[\"lines\"] / genre_stats[\"songs\"]\n genre_stats[\"avg_words\"] = genre_stats[\"words\"] / genre_stats[\"songs\"]\n\n # calculate global averages\n stats[\"global_stats\"][\"avg_line_length\"] = stats[\"global_stats\"][\"words\"] / stats[\"global_stats\"][\"lines\"]\n stats[\"global_stats\"][\"avg_lines\"] = stats[\"global_stats\"][\"lines\"] / stats[\"global_stats\"][\"songs\"]\n stats[\"global_stats\"][\"avg_words\"] = stats[\"global_stats\"][\"words\"] / stats[\"global_stats\"][\"songs\"]\n\n self._data = stats\n\n except IOError as e:\n print(\"Exception occurred: \", e)", "def collect(self):\n self.status['serial'] = self.config.get('dlmconfig', 'serial')\n self.status['timestamp'] = time.strftime('%Y/%m/%d %H:%M:%S', time.localtime())\n self.status['uptime'] = system.stats.uptime()\n self.status['free_disk_space_sdcard'] = system.stats.disk_usage('root')\n self.status['free_disk_space_stick'] = system.stats.disk_usage('sda1')\n self.status['wwan_reception'] = system.interfaces.WwanInterface.signal_strength(self.config.get('network', 'iface'))", "def output_stats(self):\n elapsed = self.timer.elapsed.total_seconds()\n count = self.copied + self.errored\n total = self.total\n # Time per key in milliseconds\n avg = round(elapsed / count * 1000, 3)\n # Time remaining in seconds\n remaining = 1.0 * elapsed / count * (total - count)\n # Time remaining in minutes\n remaining = round(remaining / 60.0, 1)\n # Time taken in minutes\n elapsed = round(elapsed / 60.0, 1)\n\n self.log.info(f\"{self.prefix}: {avg}ms avg, {elapsed}min passed, \"\n f\"{remaining}min remaining. ({count:,}/{total:,})\")", "def parse_file(self):\n with open(self.file_name, 'r', errors='ignore') as log_file:\n for line in log_file:\n self.process_line(line)", "def mymetrics(): \n _update_metric_counters()\n logging.debug(prom_objects_seen.collect())\n return flask.Response(generate_latest(), mimetype='text/plain')", "def get_run_metrics_handle(run_dir):\n #print(\"Examining: {}\".format(run_dir))\n\n valid_to_load = py_interop_run.uchar_vector(py_interop_run.MetricCount, 0)\n for v2l in (py_interop_run.Tile, py_interop_run.ExtendedTile):\n valid_to_load[v2l] = 1\n\n run_metrics = py_interop_run_metrics.run_metrics()\n run_metrics.read(run_dir, valid_to_load)\n\n return run_metrics", "def run_report(self) -> None:\n t1 = self.t1 or time.time()\n\n dt = t1 - self.t0\n\n if dt and self.max_tasks:\n speed = len(self.statistics) / dt / self.max_tasks\n else:\n speed = 0\n\n LOGGER.info('CRAWLER STATISTICS REPORT')\n\n show = list(self.statistics)\n show.sort(key=lambda stat: str(stat.url))\n\n for stat in show:\n self.log_url_metadata(stat)\n\n LOGGER.info(\n f'Completed parsing {len(self.statistics)} urls in {dt} secs; (max_tasks={self.max_tasks}) ({speed} urls per second per task)', # pylint: disable=C0301 # noqa: E501\n )\n\n LOGGER.info(f'Remaining: {self.queue.qsize()}')\n LOGGER.info(f'Total Statistics: {len(self.statistics)}')\n LOGGER.info(f'Datetime: {time.ctime()} local time')", "def compile_global_stats(results_dir='./../data/*/*cr_sizes*hdf5'):\n\n flist = glob.glob(results_dir)\n output = defaultdict(list)\n flist = [f for f in flist if 'nicmos' not in f]\n print(flist)\n flist.append('./../data/STIS/stis_cr_sizes.hdf5')\n results = [dask.delayed(tally_stats)(f) for f in flist]\n results = list(dask.compute(*results, scheduler='processes'))\n\n for instr, data in results:\n output[instr].append(data)\n\n for key in output.keys():\n cr_count = 0\n img_count = 0\n total_exptime = 0\n for val in output[key]:\n cr_count += val.cr_count\n img_count += val.img_count\n total_exptime += val.total_exptime\n output[key] = [cr_count, img_count, total_exptime]\n\n df = pd.DataFrame(output, index=['cr_count', 'img_count', 'total_exptime'])\n print(df)\n print('Total CR count: {}'.format(df.loc['cr_count', :].sum()))\n print('Total number of images analyzed: {}'.format(df.loc['img_count', :].sum()))\n print('Cumulative exposure time: {}'.format(df.loc['total_exptime', :].sum()))", "def get_proc_stats(proc):\n file_size = os.path.getsize(proc['filename'])\n return {\n 'file_size': file_size,\n 'formatted_file_size': size(file_size),\n 'started_at': time.strftime(\n \"%H:%M\", time.localtime(proc['time'])),\n 'recording_time': str(\n timedelta(seconds=int(time.time()) - proc['time']))\n }", "def stats_compute(self, *args, **kwargs):\n totalElements = 0\n totalKeys = 0\n totalSize = 0\n l_stats = []\n d_report = {}\n str_report = \"\"\n l_range = []\n\n if int(self.verbosityLevel) and self.toConsole():\n l_range = tqdm(sorted(self.d_inputTreeCallback.items(),\n key = lambda kv: (kv[1]['diskUsage_raw']),\n reverse = self.b_statsReverse),\n desc = ' Processing stats')\n else:\n l_range = sorted(self.d_inputTreeCallback.items(),\n key = lambda kv: (kv[1]['diskUsage_raw']),\n reverse = self.b_statsReverse)\n\n for k, v in l_range:\n try:\n if not self.args['du'] and not self.args['duf']:\n str_report += \"files: %5d│ raw_size: %12d│ human_size: %8s│ dir: %s\\n\" % (\\\n len(self.d_inputTree[k]),\n self.d_inputTreeCallback[k]['diskUsage_raw'],\n self.d_inputTreeCallback[k]['diskUsage_human'],\n k)\n else:\n str_report += '%-10s%s\\n' % (\n self.d_inputTreeCallback[k]['diskUsage_human'], k)\n except:\n pass\n d_report = {\n 'files': len(self.d_inputTree[k]),\n 'diskUsage_raw': self.d_inputTreeCallback[k]['diskUsage_raw'],\n 'diskUsage_human': self.d_inputTreeCallback[k]['diskUsage_human'],\n 'path': k\n }\n l_stats.append(d_report)\n totalElements += len(v)\n totalKeys += 1\n totalSize += self.d_inputTreeCallback[k]['diskUsage_raw']\n str_totalSize_human = self.sizeof_fmt(totalSize)\n return {\n 'status': True,\n 'report': str_report,\n 'dirs': totalKeys,\n 'files': totalElements,\n 'totalSize': totalSize,\n 'totalSize_human': str_totalSize_human,\n 'l_stats': l_stats,\n 'runTime': other.toc()\n }", "def samtools_stats(filename):\n stats, err = Popen([\"samtools\",\"stats\",filename], stdout=PIPE, stderr=PIPE).communicate()\n if err != \"\":\n raise Exception(err)\n stats = [x.split(\"\\t\") for x in stats.split(\"\\n\")]\n chksum = [x for x in stats if x[0].startswith(\"CHK\")][0]\n stats = dict([(x[1].replace(\":\",\"\"),set_type(x[2]),) for x in stats if x[0].startswith(\"SN\")])\n stats[\"filename\"] = filename\n stats[\"chksum_read_names\"] = chksum[1]\n stats[\"chksum_sequences\"] = chksum[2]\n stats[\"chksum_qualities\"] = chksum[3]\n return stats", "def read_cpu_stats(target_file):\n test_line = target_file.readline()\n if \"CPU\" in test_line:\n logical_processors = target_file.readline().strip()\n processors_desc = target_file.readline().strip()\n threads_cores = target_file.readline().strip()\n return CpuStats(logical_processors, processors_desc, threads_cores)\n if \"logical processors\" in test_line:\n logical_processors = test_line.strip()\n processors_desc = target_file.readline().strip()\n threads_cores = target_file.readline().strip()\n return CpuStats(logical_processors, processors_desc, threads_cores)\n return CpuStats('', '', '')", "def _calc_resource_stats(self, interval):\n result = {}\n\n if 'mem' in self.metrics:\n result['mem'] = self._get_mem_info()\n\n if 'disk-space' in self.metrics:\n result['disk-space'] = self.__get_disk_usage(self.engine.artifacts_dir).percent\n\n if 'engine-loop' in self.metrics:\n result['engine-loop'] = self.engine.engine_loop_utilization\n\n if 'conn-all' in self.metrics:\n try:\n # take all connections without address resolution\n output = subprocess.check_output(['netstat', '-an'])\n output_lines = stream_decode(output).split('\\n') # in py3 stream has 'bytes' type\n est_lines = [line for line in output_lines if line.find('EST') != -1]\n result['conn-all'] = len(est_lines)\n except BaseException as exc:\n self.log.debug(\"Failed to get connections info: %s\", exc)\n result['conn-all'] = 0\n\n if 'cpu' in self.metrics:\n result['cpu'] = self._get_cpu_percent()\n\n if 'bytes-recv' in self.metrics or 'bytes-sent' in self.metrics:\n net = self.__get_net_counters()\n if net is not None:\n tx_bytes = int((net.bytes_sent - self._net_counters.bytes_sent) / float(interval))\n rx_bytes = int((net.bytes_recv - self._net_counters.bytes_recv) / float(interval))\n self._net_counters = net\n else:\n rx_bytes = 0.0\n tx_bytes = 0.0\n\n if 'bytes-recv' in self.metrics:\n result['bytes-recv'] = rx_bytes\n if 'bytes-sent' in self.metrics:\n result['bytes-sent'] = tx_bytes\n\n if 'disk-read' in self.metrics or 'disk-write' in self.metrics:\n disk = self.__get_disk_counters()\n if disk is not None:\n dru = int((disk.read_bytes - self._disk_counters.read_bytes) / float(interval))\n dwu = int((disk.write_bytes - self._disk_counters.write_bytes) / float(interval))\n self._disk_counters = disk\n else:\n dru = 0.0\n dwu = 0.0\n\n if 'disk-read' in self.metrics:\n result['disk-read'] = dru\n if 'disk-write' in self.metrics:\n result['disk-write'] = dwu\n\n return result", "def get_file_stat(host, fqpath):\n statformat = '%F:%n:%i:%a:%s:%h:%u:%g:%U:%G'\n command = \"stat -c '%s' %s\" % (statformat, fqpath)\n rcode, rout, rerr = g.run(host, command)\n if rcode == 0:\n stat_data = {}\n stat_string = rout.strip()\n (filetype, filename, inode,\n access, size, links,\n uid, gid, username, groupname) = stat_string.split(\":\")\n\n stat_data['filetype'] = filetype\n stat_data['filename'] = filename\n stat_data[\"inode\"] = inode\n stat_data[\"access\"] = access\n stat_data[\"size\"] = size\n stat_data[\"links\"] = links\n stat_data[\"username\"] = username\n stat_data[\"groupname\"] = groupname\n stat_data[\"uid\"] = uid\n stat_data[\"gid\"] = gid\n\n return stat_data\n\n g.log.error(\"Could not stat file %s: %s\" % (fqpath, rerr))\n return None", "def sli_stats(dir):\n walker = ana.SittingWalker.from_dir(dir)\n dao = ana.RoboDAO(dir)\n stats = {}\n\n for sitting in walker:\n person = dao.get(sitting['person'])\n stamp = datetime.strptime(sitting['starttime'], \"%Y-%m-%dT%H:%M:%SZ\")\n [year, weekno, dayofweek] = stamp.isocalendar()\n ano = ident = inclass = teacher = 0\n if person.isteacher:\n teacher = 1\n elif len(person.teachers) > 0:\n inclass = 1\n elif person.id[0] == 'A':\n ano = 1\n else:\n ident = 1\n # score [all, ano, indent, inclass, teacher]\n score = [1, ano, ident, inclass, teacher]\n do_score(stats, 'weekofyear', year, weekno, score)\n do_score(stats, 'dayofweek', year, dayofweek, score)\n do_score(stats, 'hourofday', year, stamp.hour, score)\n return stats", "def process_file(self):\n self._processing_logger.log_info('Start processing')\n self.parsing_start_time = datetime.datetime.now()\n if os.path.exists(self.tmp_stat_file_path) \\\n and not HcsParsingUtils.active_processing_exceed_timeout(self.tmp_stat_file_path):\n self._processing_logger.log_info('This file is processed by another parser, skipping...')\n return 2\n self.create_tmp_stat_file()\n hcs_index_file_path = self.hcs_root_dir + MEASUREMENT_INDEX_FILE_PATH\n time_series_details = self._extract_time_series_details(hcs_index_file_path)\n self.generate_ome_xml_info_file()\n xml_info_tree = ET.parse(self.ome_xml_info_file_path).getroot()\n plate_width, plate_height = self._get_plate_configuration(xml_info_tree)\n wells_tags = self.read_wells_tags()\n if wells_tags:\n self._processing_logger.log_info(\"Tags \" + str(wells_tags))\n if not TAGS_PROCESSING_ONLY and not EVAL_PROCESSING_ONLY:\n if not self._localize_related_files():\n self._processing_logger.log_info('Some errors occurred during copying files from the bucket, exiting...')\n return 1\n else:\n self._processing_logger.log_info('Localization is finished.')\n local_preview_dir = os.path.join(self.tmp_local_dir, 'preview')\n hcs_local_index_file_path = get_path_without_trailing_delimiter(self.tmp_local_dir) \\\n + MEASUREMENT_INDEX_FILE_PATH\n for sequence_id, timepoints in time_series_details.items():\n self._processing_logger.log_info('Processing sequence with id={}'.format(sequence_id))\n sequence_index_file_path = self.extract_sequence_data(sequence_id, hcs_local_index_file_path)\n conversion_result = os.system('bash \"{}\" \"{}\" \"{}\" {}'.format(\n OME_TIFF_SEQUENCE_CREATION_SCRIPT, sequence_index_file_path, local_preview_dir, sequence_id))\n if conversion_result != 0:\n self._processing_logger.log_info('File processing was not successful...')\n return 1\n sequence_overview_index_file_path, wells_grid_mapping = self.build_sequence_overview_index(sequence_index_file_path)\n conversion_result = os.system('bash \"{}\" \"{}\" \"{}\" {} \"{}\"'.format(\n OME_TIFF_SEQUENCE_CREATION_SCRIPT, sequence_overview_index_file_path, local_preview_dir,\n sequence_id, 'overview_data.ome.tiff'))\n if conversion_result != 0:\n self._processing_logger.log_info('File processing was not successful: well preview generation failure')\n return 1\n self.write_dict_to_file(os.path.join(local_preview_dir, sequence_id, 'wells_map.json'),\n self.build_wells_map(sequence_id, wells_grid_mapping, wells_tags))\n if LOCALIZE_USE_PIPE == \"true\":\n cloud_transfer_result = os.system('pipe storage cp -f -r \"{}\" \"{}\"'\n .format(local_preview_dir,\n HcsParsingUtils.extract_cloud_path(self.hcs_img_service_dir)))\n else:\n cloud_transfer_result = os.system('aws s3 sync \"{}\" \"{}\"'\n .format(local_preview_dir,\n HcsParsingUtils.extract_cloud_path(self.hcs_img_service_dir)))\n if cloud_transfer_result != 0:\n self._processing_logger.log_info('Results transfer was not successful...')\n return 1\n self._write_hcs_file(time_series_details, plate_width, plate_height)\n if not EVAL_PROCESSING_ONLY:\n tags_processing_result = self.try_process_tags(xml_info_tree, wells_tags)\n if TAGS_PROCESSING_ONLY:\n if wells_tags:\n for sequence_id, timepoints in time_series_details.items():\n path = os.path.join(self.hcs_img_service_dir, sequence_id, 'wells_map.json')\n self.write_dict_to_file(path, self.update_wells_json(path, wells_tags))\n return tags_processing_result\n if not TAGS_PROCESSING_ONLY:\n eval_processing_result = self.try_process_eval()\n if EVAL_PROCESSING_ONLY:\n return eval_processing_result\n self.create_stat_file()\n return 0", "def metrics(self):\n\n data = requests.get(\n f\"http://{self.prometheus_host}:{self.prometheus_port}/metrics\"\n ).content.decode()\n lines = [line for line in data.split(\"\\n\") if not line.startswith(\"#\")]\n metrics = {}\n for line in lines:\n if not line:\n continue\n\n name, value = line.split(\" \")\n\n try:\n value = int(value) # type: ignore\n except ValueError:\n value = float(value) # type: ignore\n\n if \"{\" in name and \"}\" in name:\n base = name[: name.index(\"{\")]\n tags = name[name.index(\"{\") + 1 : -1]\n tags = [tag.split(\"=\") for tag in tags.split(\",\")]\n tags = [(key, val.replace('\"', \"\")) for key, val in tags]\n\n name = base + \"#\" + \",\".join(f\"{k}:{v}\" for k, v in sorted(tags))\n\n metrics[name] = value\n\n return metrics", "def collect(self):\n\n collector = {}\n for gather in self.gathers:\n try:\n stats = gather.run_single_cycle(collector=collector)\n if stats:\n collector.update(stats)\n except Exception as ex:\n self._logger.exception(\n \"Exception while collecting metrics for PID: %s of type: %s. Details: %s\",\n self.pid,\n type(gather),\n repr(ex),\n )\n return collector", "def store_metrics_to_params(self):\n\n model = self.model_name\n\n if self.stats_path.exists():\n with open(self.stats_path, \"rb\") as f:\n stats_dict = pickle.load(f)\n else:\n stats_dict = {}\n\n if model not in stats_dict:\n stats_dict[model] = defaultdict(list)\n\n stats_dict[model]['amine'].append(self.amine)\n stats_dict[model]['accuracies'].append(self.metrics['accuracies'])\n stats_dict[model]['confusion_matrices'].append(\n self.metrics['confusion_matrices'])\n stats_dict[model]['precisions'].append(self.metrics['precisions'])\n stats_dict[model]['recalls'].append(self.metrics['recalls'])\n stats_dict[model]['bcrs'].append(self.metrics['bcrs'])\n\n # Save this dictionary in case we need it later\n with open(self.stats_path, \"wb\") as f:\n pickle.dump(stats_dict, f)", "def und_generate_metrics(udb_file):\n log.info(f\"Running Analysis for commit: {udb_file} ...\")\n # stdout=subprocess.DEVNULL makes silent the stdout ,\n subprocess.call(f\"und analyze -db {udb_file}\", stdout=subprocess.DEVNULL)\n log.info(\"Calculating metrics and creating csv\")\n subprocess.call(f\"und metrics {udb_file}\")", "def read_stats(filename):\n header = {}\n tableinfo = {}\n measures = []\n rowmeasures = []\n\n with open(filename, 'rt') as fp:\n lines = fp.readlines()\n for line in lines:\n if line == line[0]:\n continue\n #parse commented header\n if line.startswith('#'):\n fields = line.split()[1:]\n if len(fields) < 2:\n continue\n tag = fields[0]\n if tag == 'TableCol':\n col_idx = int(fields[1])\n if col_idx not in tableinfo:\n tableinfo[col_idx] = {}\n tableinfo[col_idx][fields[2]] = ' '.join(fields[3:])\n if tableinfo[col_idx][fields[2]] == \"StructName\":\n struct_idx = col_idx\n elif tag == \"Measure\":\n fields = ' '.join(fields).replace('CortexVol ', 'CortexVol, ').split()\n fields = ' '.join(fields[1:]).split(', ')\n measures.append({'structure': fields[0],\n 'name': fields[1],\n 'description': fields[2],\n 'value': fields[3],\n 'units': fields[4],\n 'source': 'Header'})\n elif tag == \"ColHeaders\":\n if len(fields) != len(tableinfo):\n for idx, fieldname in enumerate(fields[1:]):\n if idx + 1 in tableinfo:\n continue\n tableinfo[idx + 1] = {'ColHeader': fieldname,\n 'Units': 'unknown',\n 'FieldName': fieldname}\n else:\n continue\n else:\n header[tag] = ' '.join(fields[1:])\n else:\n #read values\n row = line.split()\n values = {}\n measures.append({'structure': row[struct_idx-1],\n 'items': [],\n 'source': 'Table'}),\n for idx, value in enumerate(row):\n if idx + 1 == struct_idx:\n continue\n measures[-1]['items'].append({\n 'name': tableinfo[idx + 1]['ColHeader'],\n 'description': tableinfo[idx + 1]['FieldName'],\n 'value': value,\n 'units': tableinfo[idx + 1]['Units'],\n })\n return header, tableinfo, measures", "def _update_counters(self, filepath, step):\n\n counters = {}\n\n # Load\n if os.path.exists(self.counters_file):\n with open(self.counters_file) as f:\n counters = json.load(f)\n\n counters[filepath] = dict(step=step)\n\n # Save\n with open(self.counters_file, \"w\") as f:\n json.dump(counters, f, indent=4)", "def statistics(self):\n \n u_self = resource.getrusage(resource.RUSAGE_SELF)\n\tu_children = resource.getrusage(resource.RUSAGE_CHILDREN)\n\t\n\tpath = os.getenv('TMPDIR')\n\tif not path:\n\t path = os.getcwd()\n\t \n\tdisk = 0 \n\tfor root, dirs, files in os.walk(path): \n\t for d in dirs+files:\n\t disk += os.stat(os.path.join(root, d)).st_size\n\n return dict(\n\t cpu = u_self[0]+u_self[1]+u_children[0]+u_children[1],\n\t memory = (u_self[2]+u_children[2])*resource.getpagesize(),\n\t disk = disk,\n\t time = self.elapsed_time(),\n\t signal = self.signal\n\t)", "def gather_sample(self):\n\n for _pid in self._select_processes():\n if not self.__trackers.get(_pid):\n self.__trackers[_pid] = ProcessTracker(_pid, self._logger, self.__id)\n\n self._reset_absolute_metrics()\n\n for _tracker in self.__trackers.values():\n _metrics = _tracker.collect()\n self.record_metrics(_tracker.pid, _metrics)\n\n self._calculate_aggregated_metrics()\n self._remove_dead_processes()\n\n self.print_metrics()", "def run_all_times(self):\n # pylint:disable=protected-access\n # Need to call sys.__getframe() to get the filename and method/func\n # for logging information.\n\n # Useful for logging\n # Logging output: TIME UTC |TYPE (DEBUG, INFO, WARNING, etc.) |\n # [File : function]| Message\n cur_filename = sys._getframe().f_code.co_filename\n cur_function = sys._getframe().f_code.co_name\n\n self.logger.info(cur_filename + '|' + cur_filename +\n ': Starting tc_stat_wrapper...')\n if self.by_config:\n self.set_envs()\n if not self.config_lists_ok():\n self.log_error('There is at least one <>_VAL/<>_NAME pair'\n 'requested in the MET tc-stat config '\n 'file where the size of the lists '\n 'is not equal. Please '\n 'check your MET tc-stat config file.')\n sys.exit(1)\n\n # Don't forget to create the output directory, as MET tc_stat will\n # not do this.\n mkdir_p(self.c_dict['OUTPUT_DIR'])\n\n # Since this is different from the other MET tools, we will build\n # the commands rather than use command builder's methods.\n match_points = str(self.c_dict['MATCH_POINTS'])\n if self.by_config:\n # Running with config file\n\n tc_cmd_list = [self.tc_exe,\n \" -lookin\", self.c_dict['INPUT_DIR'],\n \" -config \", self.c_dict['CONFIG_FILE'],\n self.c_dict['JOBS_LIST']]\n else:\n # Run single job from command line\n tc_cmd_list = [self.tc_exe,\n \" -lookin\", self.c_dict['INPUT_DIR'],\n self.c_dict['CMD_LINE_JOB'],\n \"-match_points\", match_points]\n\n tc_cmd_str = ' '.join(tc_cmd_list)\n\n # Since this wrapper is not using the CommandBuilder to build the cmd,\n # we need to add the met verbosity level to the MET cmd created before\n # we run the command.\n tc_cmd_str = self.cmdrunner.insert_metverbosity_opt(tc_cmd_str)\n\n # Run tc_stat\n try:\n (ret, cmd) = \\\n self.cmdrunner.run_cmd(tc_cmd_str, self.env, app_name=self.app_name)\n if not ret == 0:\n raise ExitStatusException(\n '%s: non-zero exit status' % (repr(cmd),), ret)\n except ExitStatusException as ese:\n self.log_error(ese)\n\n return 0" ]
[ "0.6764025", "0.67190164", "0.6688609", "0.65135384", "0.6446264", "0.6374985", "0.63547957", "0.6333468", "0.63260734", "0.6261242", "0.6237093", "0.6232534", "0.61950576", "0.618388", "0.6182165", "0.6152383", "0.6111384", "0.6086403", "0.6057407", "0.6038634", "0.60113156", "0.60033584", "0.59586376", "0.5924182", "0.59238905", "0.5915536", "0.5908617", "0.5899573", "0.58947885", "0.58782625", "0.58739007", "0.58667886", "0.58425117", "0.58363676", "0.5816476", "0.5814599", "0.5808384", "0.57770455", "0.57702845", "0.5737572", "0.5731689", "0.57253677", "0.5725337", "0.5723464", "0.57070893", "0.5658338", "0.56426424", "0.5641623", "0.5615165", "0.56019306", "0.55860656", "0.5578605", "0.5544316", "0.5540564", "0.55301654", "0.55296326", "0.5528872", "0.5528162", "0.5525054", "0.5505867", "0.55031115", "0.55010104", "0.54997313", "0.5487103", "0.5484262", "0.5481081", "0.54798824", "0.5474314", "0.54685557", "0.5467209", "0.54566526", "0.5443085", "0.54391307", "0.5434449", "0.54342777", "0.5429209", "0.5427282", "0.54255927", "0.54247016", "0.5420793", "0.54195863", "0.541572", "0.5411072", "0.5398803", "0.5395101", "0.5390877", "0.5388187", "0.538809", "0.5384381", "0.5366538", "0.53586876", "0.5353811", "0.5344223", "0.53376335", "0.53356105", "0.5330973", "0.53214604", "0.5318627", "0.5309043", "0.53041494" ]
0.7137697
0
Gathers the metrics from the status file.
def gather_sample(self, stat_file, collector=None): if not collector: collector = {} for line in stat_file: # Each line has a format of: # Tag: Value # # We parse out all lines looking like that and match the stats we care about. m = re.search(r"^(\w+):\s*(\d+)", line) if m is None: continue field_name = m.group(1) int_value = int(m.group(2)) # FDSize is not the same as the number of open file descriptors. Disable # for now. # if field_name == "FDSize": # self.print_sample("app.fd", int_value) if field_name == "VmSize": collector.update({Metric("app.mem.bytes", "vmsize"): int_value * 1024}) elif field_name == "VmPeak": collector.update( {Metric("app.mem.bytes", "peak_vmsize"): int_value * 1024} ) elif field_name == "VmRSS": collector.update( {Metric("app.mem.bytes", "resident"): int_value * 1024} ) elif field_name == "VmHWM": collector.update( {Metric("app.mem.bytes", "peak_resident"): int_value * 1024} ) return collector
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_status(self):\n\n # Memory information can be found in status and statm /proc/PID files\n # status file VmRSS equivalent to top's RES column\n # statm disagrees with status VmRSS, I think it may not include\n # sub-processes\n # From: man proc\n # * VmPeak: Peak virtual memory size.\n # * VmSize: Virtual memory size.\n # * VmHWM: Peak resident set size (\"high water mark\").\n # * VmRSS: Resident set size.\n\n # status_fields should be ordered as in the status file\n fields = iter(self.status_fields)\n field = next(fields)\n with open(self.status_path) as f:\n for line in f:\n if line.startswith(field):\n # separated by white-space, 2nd element is value\n # 3rd is units e.g. kB\n # At the moment all fields are ints\n self.status[field] = int(line.split()[1])\n\n try:\n field = next(fields)\n except StopIteration:\n # Just found the last field in status_fields\n break", "def setup_metrics_file(self):\n\n with open(self.metrics_path, \"w+\") as f_metrics:\n\n f_metrics.write(get_metrics_file_form())", "def collect(self):\n self.status['serial'] = self.config.get('dlmconfig', 'serial')\n self.status['timestamp'] = time.strftime('%Y/%m/%d %H:%M:%S', time.localtime())\n self.status['uptime'] = system.stats.uptime()\n self.status['free_disk_space_sdcard'] = system.stats.disk_usage('root')\n self.status['free_disk_space_stick'] = system.stats.disk_usage('sda1')\n self.status['wwan_reception'] = system.interfaces.WwanInterface.signal_strength(self.config.get('network', 'iface'))", "def gather_sample(self, stat_file, collector=None):\n if not collector:\n collector = {}\n # The file format is just a single line of all the fields.\n line = stat_file.readlines()[0]\n # Chop off first part which is the pid and executable file. The\n # executable file is terminated with a paren so just search for that.\n line = line[(line.find(\") \") + 2) :]\n fields = line.split()\n # Then the fields we want are just at fixed field positions in the\n # string. Just grab them.\n\n # See http://man7.org/linux/man-pages/man5/proc.5.html for reference on field numbers\n # Keep in mind that we chop first 3 values away (pid, command line, state), so you need to\n # subtract 3 from the field numbers from the man page (e.g. on the man page nice is number\n # 19, but in our case it's 16 aka 19 - 3)\n process_uptime = self.__get_uptime_ms() - self.calculate_time_ms(\n int(fields[19])\n )\n\n collector.update(\n {\n Metric(\"app.cpu\", \"user\"): self.__calculate_time_cs(int(fields[11])),\n Metric(\"app.cpu\", \"system\"): self.__calculate_time_cs(int(fields[12])),\n Metric(\"app.uptime\", None): process_uptime,\n Metric(\"app.nice\", None): float(fields[16]),\n Metric(\"app.threads\", None): int(fields[17]),\n Metric(\"app.mem.majflt\", None): int(fields[9]),\n Metric(\"app.io.wait\", None): int(fields[39])\n if len(fields) >= 39\n else 0,\n }\n )\n return collector", "def fetch(self):\n\n\n # Update Prometheus metrics with application metrics\n self.current_requests.set(get_current_requests())\n self.total_uptime.set(get_uptime())\n self.health.state(get_health())", "def emit_metrics(self):\n parse_time = time.perf_counter() - self._parsing_start_time\n Stats.gauge(\"dag_processing.total_parse_time\", parse_time)\n Stats.gauge(\"dagbag_size\", sum(stat.num_dags for stat in self._file_stats.values()))\n Stats.gauge(\n \"dag_processing.import_errors\", sum(stat.import_errors for stat in self._file_stats.values())\n )", "def get_status(self):\n # find status\n # search in summary file first\n self.status = \"running\"\n status = self.search_summary(\"status\")\n if status:\n self.status = status.split()[1]\n # define running time\n # search in summary file first\n self.running_time = \"00:00:00\"\n running_time = self.search_summary(\"running-time\")\n if running_time:\n self.running_time = running_time.split()[1]\n # calculate running time\n else:\n now = datetime.datetime.now()\n elapsed_time = (now - self.ctime).seconds\n hours, remainder = divmod(elapsed_time, 3600)\n minutes, seconds = divmod(remainder, 60)\n self.running_time = (\n f\"{int(hours):02}:{int(minutes):02}:{int(seconds):02}\"\n )", "def add_stats(self):\n units = self.get_unit_map()\n for metric in self.raw_metrics:\n unit, metric_type = units.get(metric, (DEFAULT_UNIT, DEFAULT_TYPE))\n if metric_type == \"counter\":\n # Unit/Second\n unit = \"/\".join((unit, \"Second\"))\n self.add_derive_value(metric, unit, self.raw_metrics[metric], rate=True)\n else:\n self.add_gauge_value(metric, unit, self.raw_metrics[metric])", "def mysql_status(self):\n stamp = int(time.time())\n\n # get data\n conn = self.object.connect()\n result = {}\n try:\n with conn.cursor() as cursor:\n for key in REQUIRED_STATUS_FIELDS:\n cursor.execute('SHOW GLOBAL STATUS LIKE \"%s\";' % key)\n row = cursor.fetchone()\n result[row[0]] = row[1]\n except Exception as e:\n exception_name = e.__class__.__name__\n context.log.debug('failed to collect MySQLd metrics due to %s' % exception_name)\n context.log.debug('additional info:', exc_info=True)\n finally:\n conn.close()\n\n # counters\n counted_vars = {}\n for metric, variable_name in METRICS['counters'].items():\n if variable_name in result:\n counted_vars[metric] = int(result[variable_name])\n\n # compound counter\n counted_vars['mysql.global.writes'] = \\\n counted_vars['mysql.global.insert'] + \\\n counted_vars['mysql.global.update'] + \\\n counted_vars['mysql.global.delete']\n\n self.aggregate_counters(counted_vars, stamp=stamp)\n\n # gauges\n tracked_gauges = {}\n for metric, variable_name in METRICS['gauges'].items():\n if variable_name in result:\n tracked_gauges[metric] = {\n self.object.definition_hash: int(result[variable_name])\n }\n\n # compound gauges\n pool_util = 0\n if ('mysql.global.innodb_buffer_pool_pages_total' in tracked_gauges and\n tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] > 0):\n pool_util = (\n (tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] -\n tracked_gauges['mysql.global.innodb_buffer_pool_pages_free'][self.object.definition_hash]) /\n tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] * 100\n )\n tracked_gauges['mysql.global.innodb_buffer_pool_util'] = {\n self.object.definition_hash: pool_util\n }\n\n hit_ratio = 0\n if ('mysql.global.innodb_buffer_pool_read_requests' in tracked_gauges and\n tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] > 0):\n hit_ratio = (\n (tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] /\n (tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] +\n tracked_gauges['mysql.global.innodb_buffer_pool_reads'][self.object.definition_hash])) * 100\n )\n\n tracked_gauges['mysql.global.innodb_buffer_pool.hit_ratio'] = {\n self.object.definition_hash: hit_ratio\n }\n\n self.aggregate_gauges(tracked_gauges, stamp=stamp)\n\n # finalize\n self.increment_counters()\n self.finalize_gauges()", "def read_metrics(self):\n raise NotImplementedError()", "def stats(filename):\n from .utils import stats as print_stats\n click.echo('Starting to gather statistics on file {}'.format(filename))\n print_stats(filename)\n click.echo('Statistics printing finished')", "def collect(self):\n if not self._btime:\n return\n\n try:\n with open(\"/proc/self/stat\", 'rb') as stat:\n parts = (stat.read().split(b')')[-1].split())\n\n self.process_metrics[\"vmem\"].set(\"\", float(parts[20]))\n self.process_metrics[\"rss\"].set(\"\", float(parts[21]) * _PAGESIZE)\n start_time_secs = float(parts[19]) / self._ticks\n self.process_metrics[\"start_time\"].set(\n \"\", start_time_secs + self._btime\n )\n utime = float(parts[11]) / self._ticks\n stime = float(parts[12]) / self._ticks\n self.process_metrics[\"cpu\"].set(\"\", utime + stime)\n except IOError:\n pass\n\n try:\n with open(\"/proc/self/limits\", 'rb') as limits:\n for line in limits:\n if line.startswith(b'Max open file'):\n self.process_metrics[\"max_fds\"].set(\n \"\", float(line.split()[3])\n )\n break\n\n self.process_metrics[\"open_fds\"].set(\n \"\", float(len(os.listdir(\"/proc/self/fd\")))\n )\n except (IOError, OSError):\n pass\n\n # Update gc metrics if enabled.\n if \"collected\" in self.process_metrics:\n for generation, stat in enumerate(gc.get_stats()):\n generation = {\"generation\": str(generation)}\n self.process_metrics[\"collected\"].set(\n generation, stat[\"collected\"]\n )\n self.process_metrics[\"uncollectable\"].set(\n generation, stat[\"uncollectable\"]\n )\n self.process_metrics[\"collections\"].set(\n generation, stat[\"collections\"]\n )", "def load_status(self):\n # jIn = os.path.join(self.meta[\"wdir\"], meta_file)\n with open(self.meta_filepath, \"r\") as f:\n tmp = json.load(f)\n if \"status\" in tmp.keys():\n # self.meta[\"status\"] = tmp[\"status\"]\n return tmp[\"status\"]", "def collect_stats(self, cursor):\n metrics = self.config.get('metrics', DEFAULT_METRICS)\n if isinstance(metrics, str):\n if metrics == \"all\":\n # puffer_pool_status is only for 5.5, so we ignore that by default\n metrics = CATEGORIES.keys()\n metrics.remove('buffer_pool_stats')\n else:\n # support comma-separated list\n metrics = re.split(\"\\s*,\\s*\", metrics)\n\n self.logger.debug(\"metrics to collect: %s\" % \", \".join(metrics))\n for cat in metrics:\n if cat in CATEGORIES:\n self.add_category_stats(cat, cursor)\n else:\n self.logger.warning(\"%s is not a valid metric category\" % cat)\n\n if 'newrelic' in metrics:\n self.derive_newrelic_stats()", "def get_nag_status(filename, threshold = 0):\n status_file = filename\n\n f = open(status_file, 'r')\n\n line = f.readline()\n\n host_statuses = {}\n\n this_host = None\n this_service = None\n group_type = None\n\n for line in f:\n if line.strip().endswith('{'):\n group_type = line.strip().split()[0]\n continue\n try:\n this_property, value = get_property(line) #fails on lines without =, the try makes us pass\n #not yet reading programstatus or info\n if group_type == 'hoststatus':\n if this_property == 'host_name':\n this_host = value\n host_statuses[this_host] = {}\n host_statuses[this_host]['HOST'] = {}\n host_statuses[this_host]['HOST']['service_comments'] = {}\n else:\n host_statuses[this_host]['HOST'][this_property] = try_to_convert(value)\n elif group_type == 'servicestatus':\n #host_name always comes before service_description\n if this_property == 'host_name':\n this_host = value\n elif this_property == 'service_description':\n this_service = value\n host_statuses[this_host][this_service] = {}\n host_statuses[this_host][this_service][this_property] = value #handy place to have the service description and host name\n host_statuses[this_host][this_service]['host_name'] = this_host\n host_statuses[this_host][this_service]['service_comments'] = {}\n else:\n host_statuses[this_host][this_service][this_property] = try_to_convert(value)\n if this_property == 'current_state' and host_statuses[this_host][this_service][this_property] < threshold:\n #by simply removing the service here, subsequent attempts to add data fail to the next loop iteration\n del host_statuses[this_host][this_service]\n elif this_property == 'last_state_change':\n host_statuses[this_host][this_service]['current_duration'] = time.time() - try_to_convert(value)\n elif group_type == 'servicecomment':\n if this_property == 'host_name':\n this_host = value\n elif this_property == 'service_description':\n this_service = value\n elif this_property == 'entry_type':\n # Need to hang on to this one for one more line\n this_entry_type = try_to_convert(value)\n elif this_property == 'comment_id':\n this_comment_id = value\n host_statuses[this_host][this_service]['service_comments'][value] = {\n 'entry_type': this_entry_type,\n 'comment_id': this_comment_id\n }\n else:\n host_statuses[this_host][this_service]['service_comments'][this_comment_id][this_property] = try_to_convert(value)\n elif group_type == 'hostcomment':\n if this_property == 'host_name':\n this_host = value\n this_service = 'HOST'\n elif this_property == 'entry_type':\n # Need to hang on to this one for one more line\n this_entry_type = try_to_convert(value)\n elif this_property == 'comment_id':\n this_comment_id = value\n host_statuses[this_host][this_service]['service_comments'][value] = {\n 'entry_type': this_entry_type,\n 'comment_id': this_comment_id\n }\n else:\n host_statuses[this_host][this_service]['service_comments'][this_comment_id][this_property] = try_to_convert(value)\n except:\n pass\n f.close()\n return host_statuses", "def collect_stat(self):\n\n cnstat_dict, ratestat_dict = self.get_cnstat()\n self.cnstat_dict.update(cnstat_dict)\n self.ratestat_dict.update(ratestat_dict)", "def gather_sample(self, stat_file, collector=None):\n\n if not collector:\n collector = {}\n\n for line in stat_file:\n # We just look for the different \"inuse\" lines and output their\n # socket type along with the count.\n m = re.search(r\"(\\w+): inuse (\\d+)\", line)\n if m is not None:\n collector.update(\n {\n Metric(\"app.net.sockets_in_use\", m.group(1).lower()): int(\n m.group(2)\n )\n }\n )\n return collector", "def process_stat_files(param):\n\n #get the files that are actually in the output directory\n call = ['cp', '-R']\n call.append(param['working_dir']+'results/featureCount/')\n call.append(param['working_dir']+'report/')\n _, _ = subprocess.Popen(call,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE).communicate()\n\n featurecount_file = (param['working_dir']+\n 'results/featureCount/featureCount_stats.txt')\n #extract table\n table = []\n filehandle = open(featurecount_file)\n #header\n table.append(filehandle.readlines()[0].rstrip().split('\\t'))\n table[0] = table[0][1:]\n filehandle.close()\n\n #total number of aligned reads\n tot_reads = param['bam_qc']['unique_aligned_reads']\n counter = [0] * len(param['bam_qc']['unique_aligned_reads'])\n \n filehandle = open(featurecount_file)\n for line in filehandle.readlines()[1:]:\n cur_line = line.rstrip().split('\\t')\n cur_line[0] = re.sub(r'_',' ',cur_line[0])\n if cur_line[0] not in ['Unassigned MultiMapping','Assigned']:\n counter = [ct + int(cr) for ct, cr in zip(counter, cur_line[1:])]\n perc = ([cur_line[0]]+\n MODULE_HELPER.get_percentage(cur_line[1:],\n tot_reads,\n len(cur_line)-1))\n table.append(perc)\n filehandle.close()\n assigned = [tot_reads[idx] - counter[idx] for idx in range(len(tot_reads))]\n perc = ['Assigned'] + MODULE_HELPER.get_percentage(assigned,\n tot_reads,\n len(counter))\n return table", "def mymetrics(): \n _update_metric_counters()\n logging.debug(prom_objects_seen.collect())\n return flask.Response(generate_latest(), mimetype='text/plain')", "def metrics(self):\n\n data = requests.get(\n f\"http://{self.prometheus_host}:{self.prometheus_port}/metrics\"\n ).content.decode()\n lines = [line for line in data.split(\"\\n\") if not line.startswith(\"#\")]\n metrics = {}\n for line in lines:\n if not line:\n continue\n\n name, value = line.split(\" \")\n\n try:\n value = int(value) # type: ignore\n except ValueError:\n value = float(value) # type: ignore\n\n if \"{\" in name and \"}\" in name:\n base = name[: name.index(\"{\")]\n tags = name[name.index(\"{\") + 1 : -1]\n tags = [tag.split(\"=\") for tag in tags.split(\",\")]\n tags = [(key, val.replace('\"', \"\")) for key, val in tags]\n\n name = base + \"#\" + \",\".join(f\"{k}:{v}\" for k, v in sorted(tags))\n\n metrics[name] = value\n\n return metrics", "def run_report(self) -> None:\n t1 = self.t1 or time.time()\n\n dt = t1 - self.t0\n\n if dt and self.max_tasks:\n speed = len(self.statistics) / dt / self.max_tasks\n else:\n speed = 0\n\n LOGGER.info('CRAWLER STATISTICS REPORT')\n\n show = list(self.statistics)\n show.sort(key=lambda stat: str(stat.url))\n\n for stat in show:\n self.log_url_metadata(stat)\n\n LOGGER.info(\n f'Completed parsing {len(self.statistics)} urls in {dt} secs; (max_tasks={self.max_tasks}) ({speed} urls per second per task)', # pylint: disable=C0301 # noqa: E501\n )\n\n LOGGER.info(f'Remaining: {self.queue.qsize()}')\n LOGGER.info(f'Total Statistics: {len(self.statistics)}')\n LOGGER.info(f'Datetime: {time.ctime()} local time')", "def _update_counters(self, filepath, step):\n\n counters = {}\n\n # Load\n if os.path.exists(self.counters_file):\n with open(self.counters_file) as f:\n counters = json.load(f)\n\n counters[filepath] = dict(step=step)\n\n # Save\n with open(self.counters_file, \"w\") as f:\n json.dump(counters, f, indent=4)", "def statusupdate(filepath):\n pass", "def gather_sample(self, stat_file, collector=None):\n\n if not collector:\n collector = {}\n\n # File format is single value per line with \"fieldname:\" prefix.\n for x in stat_file:\n fields = x.split()\n if len(fields) == 0:\n continue\n if not collector:\n collector = {}\n if fields[0] == \"rchar:\":\n collector.update({Metric(\"app.disk.bytes\", \"read\"): int(fields[1])})\n elif fields[0] == \"syscr:\":\n collector.update({Metric(\"app.disk.requests\", \"read\"): int(fields[1])})\n elif fields[0] == \"wchar:\":\n collector.update({Metric(\"app.disk.bytes\", \"write\"): int(fields[1])})\n elif fields[0] == \"syscw:\":\n collector.update({Metric(\"app.disk.requests\", \"write\"): int(fields[1])})\n return collector", "def start_monitor_loop(self):\n read_file = read_config_file.ConfigFileReader()\n\n communication_time = read_file.get_send_communication_time()\n metrics_array = read_file.get_metrics()\n\n self.add_metrics_to_monitor_object(communication_time, metrics_array)", "def do_GET(self):\n for i in range(0,5):\n \"\"\" gather status update time\"\"\"\n f = open(STATUSTIME, \"rb\")\n try:\n mm = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)\n last = int(mm.readline())\n mm.seek(0)\n mm.close()\n except ValueError as e:\n print(e.message + str(i) + ' failed to read status time')\n continue\n f.close()\n \"\"\" gather json status \"\"\"\n st = open(STATUSFILE, \"rb\")\n try:\n buf = mmap.mmap(st.fileno(), 0, access=mmap.ACCESS_READ)\n raw = (buf.read(len(buf)))\n #print('reading status ' + hashlib.sha1(raw).hexdigest())\n except ValueError as e:\n print(e.message + str(i) + ' failed to read json status')\n continue\n data = None\n if raw is not None:\n try:\n data = raw\n #data = json.loads(raw)\n except ValueError as e:\n print(e.message + str(i) + ' failed to load json status')\n continue\n \"\"\" all done - exit for loop\"\"\"\n break\n else:\n print('all attempts failed')\n self.send_response(500)\n self.end_headers()\n self.wfile.write('\\n')\n return\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n# message = threading.currentThread().getName() + ' ' + str(last) + ' ' +str(data)\n# message = str(raw)\n message = str(data)\n \n self.wfile.write(message)\n self.wfile.write('\\n')\n return", "def update_status_metrics(status: EnodebStatus) -> None:\n # Call every second\n metrics_by_stat_key = {\n 'enodeb_connected': metrics.STAT_ENODEB_CONNECTED,\n 'enodeb_configured': metrics.STAT_ENODEB_CONFIGURED,\n 'opstate_enabled': metrics.STAT_OPSTATE_ENABLED,\n 'rf_tx_on': metrics.STAT_RF_TX_ENABLED,\n 'gps_connected': metrics.STAT_GPS_CONNECTED,\n 'ptp_connected': metrics.STAT_PTP_CONNECTED,\n 'mme_connected': metrics.STAT_MME_CONNECTED,\n }\n\n def get_metric_value(enodeb_status, key):\n # Metrics are \"sticky\" when synced to the cloud - if we don't\n # receive a status update from enodeb, set the metric to 0\n # to explicitly indicate that it was not received, otherwise the\n # metrics collector will continue to report the last value\n if key not in enodeb_status:\n return 0\n\n try:\n return int(enodeb_status[key])\n except ValueError:\n logging.error('Could not cast metric value %s to int',\n enodeb_status[key])\n return 0\n\n for stat_key, metric in metrics_by_stat_key.items():\n metric.set(get_metric_value(status, stat_key))", "def gather_sample(self, stat_file, collector=None):\n\n # This file format is weird. Each set of stats is outputted in two\n # lines. First, a header line that list the field names. Then a\n # a value line where each value is specified in the appropriate column.\n # You have to match the column name from the header line to determine\n # what that column's value is. Also, each pair of lines is prefixed\n # with the same name to make it clear they are tied together.\n all_lines = stat_file.readlines()\n # We will create an array of all of the column names in field_names\n # and all of the corresponding values in field_values.\n field_names = []\n field_values = []\n\n # To simplify the stats, we add together the two forms of retransmit\n # I could find in the netstats. Those to fast retransmit Reno and those\n # to selective Ack.\n retransmits = 0\n found_retransmit_metric = False\n\n # Read over lines, looking at adjacent lines. If their row names match,\n # then append their column names and values to field_names\n # and field_values. This will break if the two rows are not adjacent\n # but I do not think that happens in practice. If it does, we just\n # won't report the stats.\n for i in range(0, len(all_lines) - 1):\n names_split = all_lines[i].split()\n values_split = all_lines[i + 1].split()\n # Check the row names are the same.\n if names_split[0] == values_split[0] and len(names_split) == len(\n values_split\n ):\n field_names.extend(names_split)\n field_values.extend(values_split)\n\n if not collector:\n collector = {}\n\n # Now go back and look for the actual stats we care about.\n for i in range(0, len(field_names)):\n if field_names[i] == \"InOctets\":\n collector.update({Metric(\"app.net.bytes\", \"in\"): field_values[i]})\n elif field_names[i] == \"OutOctets\":\n collector.update({Metric(\"app.net.bytes\", \"out\"): field_values[i]})\n elif field_names[i] == \"TCPRenoRecovery\":\n retransmits += int(field_values[i])\n found_retransmit_metric = True\n elif field_names[i] == \"TCPSackRecovery\":\n retransmits += int(field_values[i])\n found_retransmit_metric = True\n\n # If we found both forms of retransmit, add them up.\n if found_retransmit_metric:\n collector.update({Metric(\"app.net.tcp_retransmits\", None): retransmits})\n return collector", "def print_file_stats(self):\n\n # current epoch time, file number, filename, filesize, trans secs, status\n print(f\"TRANS_STATS_FILE: {time.time()} {self.batchvals['numfiles']} {self.filevals['filename']} {self.filevals['numbytes']} {self.filevals['end_time'] - self.filevals['start_time']} {self.filevals['status']}\")", "def update_stat_file(self):\n logfile = \"../data/{}_stat.json\".format(self.ID)\n statobj = {\n 'hp': self.hp,\n 'max_hp': MAX_TANK_HP,\n 'ammo': self.ammo,\n 'score': self.score,\n 'age': self.age,\n 'alive': not self.is_dead(),\n 'color': TANK_COLORS[self.color],\n }\n if USE_SIMULATOR:\n js.globals.handle_stat(self.ID, json.dumps(statobj))\n else:\n with open(logfile, 'w') as f:\n f.write(json.dumps(statobj))", "def _parse(self):\n with open(self._path, 'r') as file:\n try:\n line = file.readline()\n while line:\n if SENDING in line:\n self._req_set.add(self._get_request(line, True))\n line = file.readline()\n except Exception as err:\n print(\"Failed to read garbage collector log. Log was not a complete test log.\\n\"\n f\"{err!s}\")\n raise TestFailedException", "async def status(self, ctx):\n self.logger.info(misolog.format_log(ctx, f\"\"))\n up_time = time.time() - self.start_time\n m, s = divmod(up_time, 60)\n h, m = divmod(m, 60)\n d, h = divmod(h, 24)\n uptime_string = \"%d days %d hours %d minutes %d seconds\" % (d, h, m, s)\n\n stime = time.time() - psutil.boot_time()\n m, s = divmod(stime, 60)\n h, m = divmod(m, 60)\n d, h = divmod(h, 24)\n system_uptime_string = \"%d days %d hours %d minutes %d seconds\" % (d, h, m, s)\n\n mem = psutil.virtual_memory()\n\n pid = os.getpid()\n memory_use = psutil.Process(pid).memory_info()[0]\n\n content = discord.Embed(title=f\"Miso Bot | version {main.version}\")\n content.set_thumbnail(url=self.client.user.avatar_url)\n\n content.add_field(name=\"Bot process uptime\", value=uptime_string)\n content.add_field(name=\"System CPU Usage\", value=f\"{psutil.cpu_percent()}%\")\n content.add_field(name=\"System uptime\", value=system_uptime_string)\n\n content.add_field(name=\"System RAM Usage\", value=f\"{mem.percent}%\")\n content.add_field(name=\"Bot memory usage\", value=f\"{memory_use/math.pow(1024, 2):.2f}MB\")\n\n await ctx.send(embed=content)", "def get_run_metrics_handle(run_dir):\n #print(\"Examining: {}\".format(run_dir))\n\n valid_to_load = py_interop_run.uchar_vector(py_interop_run.MetricCount, 0)\n for v2l in (py_interop_run.Tile, py_interop_run.ExtendedTile):\n valid_to_load[v2l] = 1\n\n run_metrics = py_interop_run_metrics.run_metrics()\n run_metrics.read(run_dir, valid_to_load)\n\n return run_metrics", "def __process_health(self) -> None:\n status = self.metrics.get(\"Status\", None)\n if status:\n health = status.get(\"Health\", None)\n measurement = \"Health\"\n if health == \"Warning\":\n value = 1\n datapoint = self.__gen_datapoint(measurement, self.label, value)\n self.datapoints.append(datapoint)\n elif health == \"Critical\":\n value = 2\n datapoint = self.__gen_datapoint(measurement, self.label, value)\n self.datapoints.append(datapoint)\n return", "def readStatFile(filePath):\n f = open(filePath, 'r')\n\n allStats = {}\n overviewStats = {}\n category = ''\n folder = ''\n method = ''\n\n for line in f:\n # Check if the line contains the 'cm' character and thus provides information of the specific folder\n if 'cm' in line:\n words = line.split()\n\n for word in words:\n if '/' in word:\n # All processed folder has either a /MoG or /SubSENSE folder. Exploit this to get the filename\n category = os.path.basename(os.path.dirname(os.path.dirname(os.path.normpath(filePath))))\n folder = os.path.basename(os.path.dirname(os.path.normpath(filePath)))\n method = word\n\n # Get the raw FP, TN, etc. count\n folderNumbers = {'TP': words[4], 'FP': words[5], 'FN': words[6], 'TN': words[7],\n 'ErrorShadow': words[8]}\n overviewStats[method] = folderNumbers\n\n\n # CHeck if line is not empty, does not contain certain characters, and that the folder has been found\n if '#' not in line and 'cm' not in line and line and folder and '\\n' != line and method:\n measures = line.split()\n\n isRealMeasure = True\n\n for measure in measures:\n if not RepresentsFloat(measure):\n isRealMeasure = False\n break\n\n\n if len(measures) == 7 and isRealMeasure:\n folderStats = {'recall': measures[0], 'specificity': measures[1], 'FPR': measures[2], 'FNR': measures[3], \n 'PBC': measures[4], 'precision': measures[5], 'f-measure': measures[6]}\n allStats[method] = folderStats\n\n method = ''\n\n return allStats, overviewStats", "def _init_status(self):\n\n status = self._get_status_obj()\n\n for i, step in enumerate(self._run_list):\n\n for module in step.keys():\n module_dict = {module: {'pipeline_index': i}}\n status.data = status.update_dict(status.data, module_dict)\n\n status._dump()", "def test_tableau_server_parse_status_metrics(self):\n xml_response = self.init_default_check()\n got = TableauServerStatusParser.tableau_server_parse_status_metrics(xml_response=xml_response[0])\n expected = 'tableau_server_process_status'\n self.assertEqual(expected, got.name)", "def metrics(_):\r\n collector = BuildsCollector()\r\n build_metrics, headers = collector.get_metrics_table()\r\n print(tabulate(build_metrics, headers=headers))", "def run_main():\n # Matching lines against a matcher function.\n matched_lines = match_file(file_names, matcher)\n\n # Will contain data sorted by file.\n binned_data = {}\n\n # Looking through the lines that were inserted into the metrics file via the metrics component.\n for key in matched_lines:\n\n # Grabbing matched lines by the file or orgination.\n buffer = matched_lines[key]\n\n # This will contain dictionaries converted from JSON.\n data = []\n\n # Loop through the collection, appending data converted from JSON entries.\n for line in buffer:\n data.append(extract_data(line))\n\n # Sort the data by file.\n binned_data[key] = sort_data(data)\n\n # Output the final results.\n generate_statistics(binned_data)\n return 0", "def get_prom_metrics(self):\n base_url = self.get_config().get(\"prometheus_endpoint\", PROM_BASE_URL).rstrip(\"/\")\n\n url = \"%s%slabel/__name__/values\" % (base_url, PROM_API_PATH)\n\n self.debug(\"Getting url: \", url)\n r = requests.get(url)\n\n assert r.status_code == 200, \"Prometheus server returned http code: \" + str(r.status_code)\n\n try:\n data = r.json()\n except:\n raise Exception(\"Failed to parse Prometheus JSON response\")\n\n self.debug(\"Got reponse data: \", data)\n\n assert (\"status\" in data and data[\"status\"] == \"success\"), \"Prometheus server did not return status success\"\n assert \"data\" in data, \"Prometheus server did not return data in output\"\n assert len(data[\"data\"]) > 0, \"Prometheus server returned no metrics\"\n\n known_metrics = data[\"data\"]\n assert isinstance(known_metrics, list)", "def metrics(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'metrics')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n used_space = total_space - free_space\n gb_used = used_space / 1024 / 1024 / 1024\n\n # log!(format!(\"Collecting metric gb-used {}\", gb_used), Info)\n add_metric(\"gb-used\", \"{}\".format(gb_used))", "def qc_metrics(self, files_in, qc_files):\n self.cmd(\"{samtools} index {bam_in}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in=files_in[0],\n ),\n shell=True)\n self.cmd(\"{samtools} idxstats {bam_in} | tee {qc_file}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in = files_in[0],\n qc_file = qc_files[0],\n ),\n shell=True,\n log_output=True)\n self.cmd(\"{samtools} flagstat {bam_in} | tee {qc_file}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in = files_in[0],\n qc_file = qc_files[1],\n ),\n shell=True,\n log_output=True)\n \n self.checkpoint(qc_files[0])\n self.checkpoint(qc_files[1])\n self.checkpoint(qc_files[2])", "def stats(self):\n pass", "def upload_statistics(self):\n logger.info('Importing statistics...')\n call_command('import_qc', self.accession, self.rootpath, '--pipeline', self.version)\n logger.info('Stats successfully imported.')", "def get_all_stats(self) -> Dict[str, Any]:\n return self.http.get(self.config.paths.stat)", "def possible_metrics(filename):\n\n metrics = {}\n\n raw_data = parse_config(filename)\n\n for graph in raw_data:\n for metric in graph['metrics']:\n metrics.update({metric['label']: [(metric['x_stream'], metric['y_stream'], metric['z_stream']), metric['func']]})\n\n return metrics", "def test_status():\n status = {'INFO': 0, \"WARNING\": 0, \"ERROR\": 0}\n try:\n with open(\"test.log\",'r') as log_file:\n for line in log_file:\n if \"INFO :\" in line:\n status[\"INFO\"] += 1\n elif \"WARNING :\" in line:\n status[\"WARNING\"] += 1\n elif \"ERROR :\" in line:\n status[\"ERROR\"] += 1\n\n except FileNotFoundError as e:\n raise FileNotFoundError(\"File Not present with the given name\")\n\n assert status[\"INFO\"] == 50\n assert status[\"WARNING\"] == 11\n assert status[\"ERROR\"] == 3\n print (status)", "def parse_file(self):\n with open(self.file_name, 'r', errors='ignore') as log_file:\n for line in log_file:\n self.process_line(line)", "def report_meta_metrics(stat_path):\n collectd_stats = get_self_stats(stat_path)\n backend_stats = read_vsys_data('backend_stats', _VSYS_FRONTEND_VERSION)\n submit_meta('collectd', collectd_stats)\n submit_meta('backend', backend_stats)", "def prometheus_metrics(request):\n if not settings.DEBUG:\n return HttpResponseNotFound()\n\n # DEPRECATED: prometheus_multiproc_dir has been replaced by PROMETHEUS_MULTIPROC_DIR\n if \"PROMETHEUS_MULTIPROC_DIR\" in os.environ or \"prometheus_multiproc_dir\" in os.environ:\n registry = prometheus_client.CollectorRegistry()\n multiprocess.MultiProcessCollector(registry)\n else:\n registry = prometheus_client.REGISTRY\n metrics_page = prometheus_client.generate_latest(registry)\n return HttpResponse(\n metrics_page, content_type=prometheus_client.CONTENT_TYPE_LATEST\n )", "def status():\n schedule_log(\"Starting Elasticsearch Monitor\")\n\n command_text = 'curl http://127.0.0.1:9200/_stats'\n\n schedule_log('Running: %s' % command_text)\n\n output, error = safe_run(command_text)\n\n try:\n data = json.loads(output)\n\n schedule_log('Loaded json, saving.')\n\n save(True, output, mongo_database(), mongo_collection(), output)\n except Exception as ex:\n schedule_log('Reporting as failed.')\n schedule_log('%s' % ex)\n schedule_log(output)\n error = '%s'\n\n if error:\n save(False, {}, mongo_database(), mongo_collection(), error)\n\n schedule_log('Finished')", "def compute_metrics(self):\n pass", "def MainStats(path, filetype, NrExp, col, start, stop):\n# path= path.split('/') # here is better to google and see what is going on. Or experiment alone\n# path= \"/\".join(path[:-1]) \n dato=ExtractData_raw_files(path, filetype)\n dBase=dato.createDictBase()\n stats = Stats(dBase, NrExp, col, start, stop)\n means, stds=stats.Means_Stds()\n times = stats.time_return()\n return means , stds, times", "def list_metrics(self):\n pass", "def update_statistics(status):\n if not os.path.isfile(CONFIG['stats_file']):\n current_stats = {}\n else:\n current_stats = json.loads(open(CONFIG['stats_file'], 'r').read())\n # current_stats = delete_old_statistics(current_stats)\n\n current_key = int(datetime.datetime.now().strftime('%Y%m%d%H%M'))\n for host, state in ((h['host'], h['status']) for h in status):\n if host not in current_stats:\n current_stats[host] = {}\n\n # get newest entry of host\n newest_state = None, None\n for key, entry in current_stats[host].items():\n if newest_state[0] is None or int(key) > int(newest_state[0]):\n newest_state = key, entry\n if newest_state[1] != state:\n # state has changed. Write it.\n current_stats[host][current_key] = state\n\n # write stats\n open(CONFIG['stats_file'], 'w').write(json.dumps(current_stats))", "def _summary(in_file):\n data = Counter()\n out_file = in_file + \"_size_stats\"\n if file_exists(out_file):\n return out_file\n with open(in_file) as in_handle:\n for line in in_handle:\n counts = int(line.strip().split(\"_x\")[1])\n line = in_handle.next()\n l = len(line.strip())\n in_handle.next()\n in_handle.next()\n data[l] += counts\n with file_transaction(out_file) as tx_out_file:\n with open(tx_out_file, 'w') as out_handle:\n for l, c in data.items():\n out_handle.write(\"%s %s\\n\" % (l, c))\n return out_file", "def usagestats_parse(dirpath):\r\n # Create database\r\n # TODO: change to an easier format, probably json.\r\n db, cursor = create_table()\r\n\r\n # Some vars for logging\r\n processed = 0\r\n err = 0\r\n\r\n # Iterate through the /usagestats/ directory and fetch all files\r\n for root, dirnames, filenames in os.walk(dirpath, topdown=True, onerror=None, followlinks=False):\r\n if 'daily' in root or 'weekly' in root or 'monthly' in root or 'yearly' in root:\r\n # Retrieve the folder name to save what the frequency of the usagestats were:\r\n frequency = root.split('/')[-1]\r\n for filename in filenames:\r\n # Check if filename is only numbers (which is an epoch time representation)\r\n if filename.isnumeric():\r\n try:\r\n tree = ET.parse(os.path.join(root, filename))\r\n except ET.ParseError:\r\n parse_file_with_protobuf(os.path.join(root, filename), db)\r\n continue\r\n\r\n # We have sucessfully parsed the usagestats xml.\r\n # So continue processing\r\n tree_root = tree.getroot()\r\n\r\n for elem in tree_root:\r\n parse_sub_elements(frequency, elem, filename, db)\r\n\r\n # query for reporting\r\n cursor.execute('''\r\n select \r\n usage_type,\r\n datetime(lastime/1000, 'UNIXEPOCH', 'localtime') as lasttimeactive,\r\n timeactive as time_Active_in_msecs,\r\n timeactive/1000 as timeactive_in_secs,\r\n case last_time_service_used WHEN '' THEN ''\r\n ELSE datetime(last_time_service_used/1000, 'UNIXEPOCH', 'localtime')\r\n end last_time_service_used,\r\n case last_time_visible WHEN '' THEN ''\r\n ELSE datetime(last_time_visible/1000, 'UNIXEPOCH', 'localtime') \r\n end last_time_visible,\r\n total_time_visible,\r\n app_launch_count,\r\n package,\r\n CASE types\r\n WHEN '1' THEN 'MOVE_TO_FOREGROUND'\r\n WHEN '2' THEN 'MOVE_TO_BACKGROUND'\r\n WHEN '5' THEN 'CONFIGURATION_CHANGE'\r\n WHEN '7' THEN 'USER_INTERACTION'\r\n WHEN '8' THEN 'SHORTCUT_INVOCATION'\r\n ELSE types\r\n END types,\r\n classs,\r\n source,\r\n fullatt\r\n from data\r\n order by lasttimeactive DESC\r\n ''')\r\n all_rows = cursor.fetchall()\r\n\r\n # HTML report section\r\n h = open('./Report.html', 'w')\r\n h.write('<html><body>')\r\n h.write('<h2>Android Usagestats report (Dates are localtime!)</h2>')\r\n h.write('<style> table, th, td {border: 1px solid black; border-collapse: collapse;}</style>')\r\n h.write('<br />')\r\n\r\n # HTML headers\r\n h.write('<table>')\r\n h.write('<tr>')\r\n h.write('<th>Usage Type</th>')\r\n h.write('<th>Last Time Active</th>')\r\n h.write('<th>Time Active in Msecs</th>')\r\n h.write('<th>Time Active in Secs</th>')\r\n h.write('<th>Last Time Service Used</th>')\r\n h.write('<th>Last Time Visible</th>')\r\n h.write('<th>Total Time Visible</th>')\r\n h.write('<th>App Launch Count</th>')\r\n h.write('<th>Package</th>')\r\n h.write('<th>Types</th>')\r\n h.write('<th>Class</th>')\r\n h.write('<th>Source</th>')\r\n h.write('</tr>')\r\n\r\n for row in all_rows:\r\n usage_type = row[0]\r\n lasttimeactive = row[1]\r\n time_Active_in_msecs = row[2]\r\n timeactive_in_secs = row[3]\r\n last_time_service_used = row[4]\r\n last_time_visible = row[5]\r\n total_time_visible = row[6]\r\n app_launch_count = row[7]\r\n package = row[8]\r\n types = row[9]\r\n classs = row[10]\r\n source = row[11]\r\n\r\n processed = processed + 1\r\n # report data\r\n h.write('<tr>')\r\n h.write('<td>' + str(usage_type) + '</td>')\r\n h.write('<td>' + str(lasttimeactive) + '</td>')\r\n h.write('<td>' + str(time_Active_in_msecs) + '</td>')\r\n h.write('<td>' + str(timeactive_in_secs) + '</td>')\r\n h.write('<td>' + str(last_time_service_used) + '</td>')\r\n h.write('<td>' + str(last_time_visible) + '</td>')\r\n h.write('<td>' + str(total_time_visible) + '</td>')\r\n h.write('<td>' + str(app_launch_count) + '</td>')\r\n h.write('<td>' + str(package) + '</td>')\r\n h.write('<td>' + str(types) + '</td>')\r\n h.write('<td>' + str(classs) + '</td>')\r\n h.write('<td>' + str(source) + '</td>')\r\n h.write('</tr>')\r\n\r\n # HTML footer\r\n h.write('<table>')\r\n h.write('<br />')\r\n\r\n print('')\r\n print('Records processed: ' + str(processed))\r\n print('Triage report completed. See Reports.html.')", "def stats(self):\n stats = {\n 'lines' : '', # This will count the lines under each split\n 'status_code': self.status_code,\n 'content_type': self.mime,\n 'hop': self.hop_path[-1:],\n 'sum:content_length': self.content_length,\n 'host': self.host(),\n 'source': self.source\n }\n # Add in annotations:\n for annot in self.annotations:\n # Set a prefix based on what it is:\n prefix = ''\n if self.re_tries.match(annot):\n prefix = 'tries:'\n elif self.re_ip.match(annot):\n prefix = \"ip:\"\n # Only emit lines with annotations:\n if annot != \"-\":\n stats[\"%s%s\" % (prefix, annot)] = \"\"\n return stats", "def dataStats(self):\n print (\"Performing statistical analysis of the data\")\n # stuff to do", "def add_statistics_to_status(status):\n return [{\n 'host': h['host'],\n 'status': h['status'],\n 'stats': get_statistics_for_host(h['host']),\n } for h in status]", "def metrics(self):\n self.metrics = []\n \n self.clients()\n\n if len(self.metrics) > 0:\n return self.metrics\n else:\n return []", "def all_status():\n print(\"Getting repo status.\\n\\nYou may be prompted for credentials...\")\n\n os.chdir(STATUS_DIR)\n attention = \"\"\n messages = []\n TIME_STAMP = datetime.now().strftime(\"%a_%d_%b_%Y_%H_%M_%S_%p\")\n\n fname = \"REPO_STATUS_@_{}.md\".format(TIME_STAMP)\n with open(fname, 'w+') as f:\n f.write(\"# Repository status as at {}\\n\\n\".format(TIME_STAMP))\n \n for each in load_multiple(_all=True):\n name = each.name\n status = each.status()\n\n messages.append(\"## {}\\n\\n```cmd\\n{}```\\n\".format(name, status))\n\n if need_attention(status):\n attention += \"1. {}\\n\".format(name)\n\n f.write(\"## REPOS NEEDING ATTENTION\\n\\n\")\n f.write(attention)\n f.write(\"\\n-------\\n\\n\")\n f.write(\"## STATUS MESSAGES\\n\\n\")\n f.write(\"\\n\".join(messages))\n\n print(\"\\n\\nDone. Status file saved in \", STATUS_DIR)\n os.chdir(BASE_DIR)\n return", "def __parse(self):\n lines = self.file.readlines()\n name_idx = 2\n name_idx_found = False\n pathre = re.compile(r\"^[A-Z]:[\\\\/]\\w+\")\n for i in range(0, len(lines)):\n line = lines[i]\n if line.strip() != \"\": # check if line isn't empty\n if pathre.match(line):\n self.path = line.strip()\n continue\n tokens = line.split()\n time_str = tokens[0] + \" \" + tokens[1]\n try:\n time = datetime.strptime(time_str, \"%m/%d/%y %H:%M:%S\")\n except ValueError:\n raise LogParseError('Invalid log format. Date must be first \\\n token for each log event.') \n if not name_idx_found:\n name_idx = tokens.index('Monitoring')\n name_idx_found = True\n name = \"\"\n if tokens[name_idx].strip() == 'Monitoring':\n name = tokens[name_idx].lower() + \" \" + tokens[name_idx + 1].lower()\n duration = 0.0\n else:\n name = tokens[name_idx].lower()\n duration = tokens[name_idx + 1]\n self.events[name] = Event(time, name, duration)\n self.start = self.events['monitoring started']\n self.end = self.events['monitoring stopped']", "def stats(self):", "def _analyzeFile(self, filename):\n date = os.path.basename(filename)[:10]\n if filename.endswith('gz'):\n f = gzip.open(filename)\n else:\n f = open(filename)\n lines = f.read().splitlines()\n for line in lines:\n if re.search('joined the game', line):\n self._analyzeLine(line, date, self._start_times)\n elif re.search('left the game', line) or re.search('lost connection',\n line):\n self._analyzeLine(line, date, self._end_times)\n elif re.search('Stopping server', line):\n self._server_stop_times.append(ConvertTime(date, line))", "def report(self):\n\n job_summary = {}\n for job in self._jobs:\n \n if job.step_name not in job_summary:\n job_summary[ job.step_name ] = {}\n job_summary[ job.step_name ][ 'DONE' ] = 0\n job_summary[ job.step_name ][ 'RUNNING' ] = 0\n job_summary[ job.step_name ][ 'QUEUING' ] = 0\n job_summary[ job.step_name ][ 'FAILED' ] = 0\n job_summary[ job.step_name ][ 'UNKNOWN' ] = 0\n job_summary[ job.step_name ][ 'max_mem' ] = 0\n job_summary[ job.step_name ][ 'cputime' ] = 0\n\n if job.status == Job_status.FINISHED:\n job_summary[ job.step_name ][ 'DONE' ] += 1\n if job.cputime is not None:\n job_summary[ job.step_name ]['cputime'] += int(job.cputime)\n\n if job.max_memory is not None and job.max_memory > job_summary[ job.step_name ][ 'max_mem']:\n job_summary[ job.step_name ][ 'max_mem'] = int(job.max_memory)\n\n elif job.status == Job_status.RUNNING:\n job_summary[ job.step_name ][ 'RUNNING' ] += 1\n elif job.status == Job_status.QUEUEING or job.status == Job_status.SUBMITTED:\n job_summary[ job.step_name ][ 'QUEUING' ] += 1\n elif job.status == Job_status.FAILED or job.status == Job_status.NO_RESTART:\n job_summary[ job.step_name ][ 'FAILED' ] += 1\n else:\n job_summary[ job.step_name ][ 'UNKNOWN' ] += 1\n\n\n\n local_time = strftime(\"%d/%m/%Y %H:%M\", time.localtime())\n \n\n pickle_file = \"{}.{}\".format(self.pipeline.project_name, self.pipeline._pid)\n\n print(\"[{} @{} {}]\".format( local_time,self.pipeline._hostname , pickle_file))\n\n print(\"{:20} || {:12} || {:12} || {:2s} {:2s} {:2s} {:2s} {:2s}\".format(\"Run stats\", \"Runtime\", \"Max Mem\", \"D\",\"R\",\"Q\",\"F\",\"U\"))\n\n for step in sorted(self.pipeline._workflow._analysis_order, key=self.pipeline._workflow._analysis_order.__getitem__):\n if step not in job_summary:\n continue\n\n print(\"{:20} || {:12} || {:12} || {:02d}/{:02d}/{:02d}/{:02d}/{:02d}\".format(step, \n self.format_time(job_summary[ step ]['cputime']),\n self.format_memory(job_summary[ step ]['max_mem']),\n job_summary[ step ][ 'DONE' ],\n job_summary[ step ][ 'RUNNING' ],\n job_summary[ step ][ 'QUEUING' ],\n job_summary[ step ][ 'FAILED' ],\n job_summary[ step ][ 'UNKNOWN' ]))", "def get_stats():\n logger.info(\"Retrieving stats\")\n # create datetime iso format zero hour offset\n current_datetime = datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n # if filename doesn't exist\n if not path.exists(filename):\n return \"Statistics do not exist\", 404\n\n # get current stats\n with open(filename, 'r') as f:\n currentstats = json.loads(f.read())\n\n # return json\n stats_obj = {}\n stats_obj[\"num_users\"] = currentstats[\"num_users\"]\n stats_obj[\"num_facts\"] = currentstats[\"num_facts\"]\n stats_obj[\"most_popular_tag\"] = currentstats[\"most_popular_tag\"]\n # stats_obj[\"avg_jokes_added_weekly\"] = currentstats[\"avg_jokes_added_weekly\"]\n stats_obj[\"num_subscribed_users\"] = currentstats[\"num_subscribed_users\"]\n stats_obj[\"datetime\"] = current_datetime\n\n logger.debug(stats_obj)\n logger.info(\"Returning stats\")\n return stats_obj, 200", "def read_data(self, command):\n commands = []\n results = {}\n if self.data is None:\n results[\"mean\"] = \"n/a\"\n results[\"max\"] = \"n/a\"\n results[\"min\"] = \"n/a\"\n results[\"total\"] = \"n/a\"\n return results\n\n for log in self.data:\n # If \"status\" is present, a javascript title was sent\n if \"status\" in log:\n pass\n else:\n curr_command = log[command]\n if curr_command is not None:\n commands.append(curr_command)\n if commands: # Check if there's actual commands to process\n results[\"mean\"] = Job.mean(commands)\n results[\"max\"] = max(commands)\n results[\"min\"] = min(commands)\n results[\"total\"] = Job.total(commands)\n\n return results", "def main() -> None:\n jobStatus = list()\n adt = AuditManager(\n config.get(\"Audit\").get(\"database\"),\n config.get(\"Audit\").get(\"user\"),\n config.get(\"Audit\").get(\"password\"),\n config.get(\"Audit\").get(\"host\"),\n config.get(\"Audit\").get(\"port\"),\n )\n jobMeta = adt.getStepLogData()\n adt.closeConnection()\n with concurrent.futures.ThreadPoolExecutor(\n max_workers=config.get(\"spark\").get(\"parallelJobs\", 2)\n ) as executor:\n spark_jobs = {\n executor.submit(processFile, fileMeta): fileMeta for fileMeta in jobMeta\n }\n for status in concurrent.futures.as_completed(spark_jobs):\n fileStatus = status.result()\n jobStatus.append(fileStatus)\n logger.info(jobStatus)", "def load_stats():\n assert isinstance(settings.PARS['numBases'], int)\n assert isinstance(settings.PARS['dataset'], str)\n\n stat_filename = 'stat_{}_{}.json'.format(\n settings.PARS['numBases'], settings.PARS['dataset'])\n stat_full_path = os.path.join(settings.GENERATED_DATA_DIRECTORY, stat_filename)\n\n with open(stat_full_path, 'r') as file_:\n fobj_avg = json.load(file_)\n\n fobj_avg = {int(k): v for k, v in fobj_avg.items()}\n\n return fobj_avg", "def Analyze(self):\n \n self._analyzeLogs()\n for user in self._start_times:\n self._result[user] = self._zipTimes(user)", "def read_metric_values(self):\n inv_objs = self._inventory_mgr.current_inventory()\n monitored_metrics = self._metric_mgr.get_monitored_metrics()\n perf_manager = self._si.RetrieveServiceContent().perfManager\n for mor in inv_objs.keys():\n for inv_obj in inv_objs[mor]:\n inv_obj_metrics = inv_obj.metric_id_map\n desired_keys = list(set(inv_obj_metrics.keys()) & set(monitored_metrics[mor].keys()))\n if not len(desired_keys) == 0:\n metric_id_objs = [inv_obj_metrics[key] for key in desired_keys]\n query_spec = vim.PerformanceManager.QuerySpec(\n entity=inv_obj.mor, metricId=metric_id_objs,\n intervalId=inv_obj.INSTANT_INTERVAL,\n maxSample=1, format='normal'\n )\n try:\n results = perf_manager.QueryPerf(querySpec=[query_spec])\n except Exception as e:\n self._logger.error(\"Exception while making performance query : {0}\".format(e))\n if results:\n dps = self._parse_query(inv_obj, results, monitored_metrics[mor])\n payload = self._build_payload(dps)\n self._dispatch_metrics(payload)\n else:\n self._logger.warning(\"Empty result from query : {0}\".format(query_spec))", "def run(self):\n\n syn_cookies = Setup.syn_cookies\n cpu_orange_threshold = Setup.parse_options()['cpu_orange_threshold']\n cpu_red_threshold = Setup.parse_options()['cpu_red_threshold']\n network_orange_threshold = Setup.parse_options()['network_orange_threshold']\n network_red_threshold = Setup.parse_options()['network_red_threshold']\n ram_orange_threshold = Setup.parse_options()['network_orange_threshold']\n ram_red_threshold = Setup.parse_options()['ram_orange_threshold']\n interval = Setup.parse_options()['interval']\n time_period = Setup.parse_options()['time_period']\n\n # Check which resources should be monitored\n if cpu_orange_threshold > 0:\n resource = \"cpu\"\n print(\"CPU is being monitored, orange threshold set at %0.2f, red threshold set to %0.2f\"\n % (cpu_orange_threshold, cpu_red_threshold))\n resource_orange_threshold = float(cpu_orange_threshold)\n resource_red_threshold = float(cpu_red_threshold)\n elif network_orange_threshold > 0:\n resource = \"network\"\n print(\"Network usage is being monitored, orange threshold set at %0.2f, red threshold set to %0.2f\"\n % (network_orange_threshold, network_red_threshold))\n resource_orange_threshold = float(network_orange_threshold)\n resource_red_threshold = float(network_red_threshold)\n elif ram_orange_threshold > 0:\n resource = \"memory\"\n print(\"Memory is being monitored, orange threshold set at %0.2f , red threshold set to %0.2f\"\n % (ram_orange_threshold, ram_red_threshold))\n resource_orange_threshold = float(ram_orange_threshold)\n resource_red_threshold = float(ram_red_threshold)\n else:\n resource = \"cpu\"\n resource_orange_threshold = float(self.calculate_thresholds()['orange_cpu_threshold'])\n resource_red_threshold = float(self.calculate_thresholds()['red_cpu_threshold'])\n print(\"CPU is being monitored, orange threshold set at %0.2f, red threshold set to %0.2f\"\n % (resource_orange_threshold, resource_red_threshold))\n stats = self.get_system_load(interval, time_period, resource)\n print(\"System monitor engaged\")\n while True:\n system_load = 100 * float(get_mean(stats))\n print \"System load is %0.2f\" % system_load\n # If system load below orange threshold change status to green\n if system_load < resource_orange_threshold and Setup.system_status != 'green':\n Setup.system_status = 'green'\n print(\"ALERT: System status green\")\n # If system load exceeds orange threshold change status to orange\n elif system_load >= resource_orange_threshold \\\n and system_load < resource_red_threshold and Setup.system_status != 'orange':\n Setup.system_status = 'orange'\n print(\"ALERT: System status updated to orange\")\n if syn_cookies == 0:\n print(\"Turning on SYN Cookies\")\n self.turn_on_syn_cookies()\n syn_cookies = 1\n # If system load exceeds red threshold change system status to red\n elif system_load > resource_red_threshold and Setup.system_status != 'red':\n Setup.system_status = 'red'\n print(\"WARNING: System status updated to Red\")\n else:\n print(\"No conditions met\")\n print(\"Status: %s, System_load: %0.2f, Orange_threshold: %0.2f, Red_threshold: %0.2f\" %\n (Setup.system_status, system_load, resource_orange_threshold, resource_red_threshold))\n stats = self.update_system_load(interval, stats, resource)", "def get_stats(self):\n return utils.csv_to_dict(wait(self.proto.stat()))", "def collectStat(self, thread):\n\t\t# update average page load time\n\t\tif self.updated_count == 0:\n\t\t\tself.average_time = thread.load_time\n\t\telse:\n\t\t\tself.average_time = (self.average_time * self.updated_count + thread.load_time) / (self.updated_count + 1)\n\t\t# update stitistics by HTTP code\n\t\tif thread.code not in self.code_statistics:\n\t\t\tself.code_statistics[thread.code] = 1 \n\t\telse:\n\t\t\tself.code_statistics[thread.code] += 1\n\t\t# update count of processed pages\n\t\tself.updated_count += 1", "def process_file_metrics(root_dir, in_file_names, file_processors):\n manager = mp.Manager()\n file_metrics = manager.dict()\n\n parameters = [(root_dir, key, file_metrics, file_processors) for key in in_file_names]\n\n # main loop\n p = mp.Pool(max(1, mp.cpu_count() - 1))\n p.starmap(_process_file_metrics_parallel, parameters)\n p.close()\n p.join()\n\n return file_metrics", "def parseFile(self, file):\n return_dict = {}\n with open(file) as f:\n for line in f:\n line = line.strip()\n\n if line:\n if line.startswith('Left'):\n return_dict['Left'] = self.getStats(f)\n elif line.startswith('Right'):\n return_dict['Right'] = self.getStats(f)\n elif line.startswith('Aligned'):\n return_dict['Aligned'] = self.getStats(f, line)\n elif line.startswith('Reads'):\n return_dict['Reads'] = self.getStats(f)\n else:\n matched_summary = re.search('([\\d|.%]+)', line)\n return_dict['Overall'] = matched_summary.group(1)\n\n #return_dict['Summary'] = re.search('(\\d+\\.\\d+%)', line).group(1)\n\n return return_dict", "async def _read_status(self, job_id, job_paths):\n status_path = job_paths['status.json']\n\n try:\n async with aiofiles.open(status_path) as file:\n status_json = await file.read()\n\n status = json.loads(status_json)\n\n # Validate common value types\n assert isinstance(status, dict)\n\n progress = status.get('progress', 0.)\n assert 0. <= progress <= 1.\n\n assert isinstance(status.get('message', ''), str)\n\n await self.queue.set_status(job_id, status)\n except concurrent.futures.CancelledError:\n # Ignore (likely normal exit through task cancellation)\n raise\n except Exception: # pylint: disable=broad-except\n self.logger.exception(f'Exception while reading status of job {job_id}')", "def get_status_data():\n\n return dict(\n celery=_get_celery_status(),\n users=_get_users_stats(),\n daemons=_get_daemons_status(),\n settings=_get_settings(),\n files=_get_files_status(),\n languages=_get_languages_status(),\n sets=_get_sets_status(),\n )", "async def status(self, ctx: Context):\n # Get lines of code\n lines_of_code = os.popen(\n r'find . -path ./.venv -prune -false -o -name \"*.py\" -exec cat {} \\; | wc -l').read()\n\n # Get memory usage\n process = psutil.Process(os.getpid())\n memory_usage = process.memory_info().rss / 1024 ** 2\n\n await ctx.send(\n embed=discord.Embed(\n title=f'{self.bot.user.name} Status',\n colour=self.bot.user.colour\n ).set_thumbnail(\n url=self.bot.user.avatar_url\n ).add_field(\n name='Users:', value=len(self.bot.users)\n ).add_field(\n name='Guilds:', value=len(self.bot.guilds)\n ).add_field(\n name='Started at:', value=format_dt(self.bot._start_time)\n ).add_field(\n name='Memory usage:', value=f'{memory_usage:.2f} MB'\n ).add_field(\n name='Cogs loaded:', value=len(self.bot.cogs)\n ).add_field(\n name='Lines of code:', value=lines_of_code or 'Unknown'\n ).add_field(\n name='Quick links:',\n value='[Source Code](https://github.com/bijij/Silvally)',\n inline=False\n )\n )", "def status_counts(self, status_counts):\n\n self._status_counts = status_counts", "def metrics(self):\n return self.verificationRun.metrics()", "def readInServers(self):\n # we'll be using the global server tracker file\n global server_tracker_file\n # first, grab a list of all files in the current working directory\n current_dir = os.listdir('.')\n # verify that our server tracker file exists here\n if server_tracker_file not in current_dir:\n # if there's nothing to read in, simply return\n return\n \n # read in the csv\n with open(server_tracker_file, 'rb') as infile:\n # initialize the reader\n reader = csv.reader(infile)\n # verify that the header looks exactly as we expect\n header = reader.next()\n if header != ['Server','Ping Interval','Status']:\n # if this isn't the case, we won't try to read the file\n return\n else:\n # update our servers with the records we know about\n # while we update, we'll keep a count of how many\n # we can successfully read in\n server_count = 0\n for record in reader:\n # pull out the server name and ping interval\n server = record[0]\n try:\n interval = int(record[1])\n except ValueError:\n continue\n # ping the server to determine whether it is online\n # or offline\n status = sendPing(server)\n if status == 'Online':\n # allocate to online\n self.online_servers[server] = [0, interval]\n else:\n # allocate to offline\n self.offline_servers[server] = [0, interval]\n # udpate our count\n server_count += 1\n # repeat for every record from our pseudo memory dump file\n # report and return\n print 'Read in {0} known servers'.format(server_count)\n \n # file read complete\n return", "def status():\n used = get_space_used()\n avail = get_space_available()\n allowed = config.download.space_to_use\n print \"Space used by downloaded files: %.2f GB of %.2f GB (%.2f%%)\" % \\\n (used/1024.0**3, allowed/1024.0**3, 100.0*used/allowed)\n print \"Space available on file system: %.2f GB\" % (avail/1024.0**3)\n\n numwait = jobtracker.query(\"SELECT COUNT(*) FROM requests \" \\\n \"WHERE status='waiting'\", \\\n fetchone=True)\n numfail = jobtracker.query(\"SELECT COUNT(*) FROM requests \" \\\n \"WHERE status='failed'\", \\\n fetchone=True)\n print \"Number of requests waiting: %d\" % numwait\n print \"Number of failed requests: %d\" % numfail\n\n numdlactive = jobtracker.query(\"SELECT COUNT(*) FROM files \" \\\n \"WHERE status='downloading'\", \\\n fetchone=True)\n numdlfail = jobtracker.query(\"SELECT COUNT(*) FROM files \" \\\n \"WHERE status='failed'\", \\\n fetchone=True)\n print \"Number of active downloads: %d\" % numdlactive\n print \"Number of failed downloads: %d\" % numdlfail", "def send_metrics(self):\n metrics = self.get_metrics()\n if not metrics:\n return\n\n for mkey, metric in metrics.items():\n for mname, mval in metric.items():\n try:\n self.agent.record_custom_metric(self.convert_metric_name(mkey, mname), mval, None)\n except Exception as e:\n print_(e)", "def _log_file_processing_stats(self, known_file_paths):\n # File Path: Path to the file containing the DAG definition\n # PID: PID associated with the process that's processing the file. May\n # be empty.\n # Runtime: If the process is currently running, how long it's been\n # running for in seconds.\n # Last Runtime: If the process ran before, how long did it take to\n # finish in seconds\n # Last Run: When the file finished processing in the previous run.\n headers = [\"File Path\", \"PID\", \"Runtime\", \"# DAGs\", \"# Errors\", \"Last Runtime\", \"Last Run\"]\n\n rows = []\n now = timezone.utcnow()\n for file_path in known_file_paths:\n last_runtime = self.get_last_runtime(file_path)\n num_dags = self.get_last_dag_count(file_path)\n num_errors = self.get_last_error_count(file_path)\n file_name = os.path.basename(file_path)\n file_name = os.path.splitext(file_name)[0].replace(os.sep, \".\")\n\n processor_pid = self.get_pid(file_path)\n processor_start_time = self.get_start_time(file_path)\n runtime = (now - processor_start_time) if processor_start_time else None\n last_run = self.get_last_finish_time(file_path)\n if last_run:\n seconds_ago = (now - last_run).total_seconds()\n Stats.gauge(f\"dag_processing.last_run.seconds_ago.{file_name}\", seconds_ago)\n\n rows.append((file_path, processor_pid, runtime, num_dags, num_errors, last_runtime, last_run))\n\n # Sort by longest last runtime. (Can't sort None values in python3)\n rows.sort(key=lambda x: x[3] or 0.0)\n\n formatted_rows = []\n for file_path, pid, runtime, num_dags, num_errors, last_runtime, last_run in rows:\n formatted_rows.append(\n (\n file_path,\n pid,\n f\"{runtime.total_seconds():.2f}s\" if runtime else None,\n num_dags,\n num_errors,\n f\"{last_runtime:.2f}s\" if last_runtime else None,\n last_run.strftime(\"%Y-%m-%dT%H:%M:%S\") if last_run else None,\n )\n )\n log_str = (\n \"\\n\"\n + \"=\" * 80\n + \"\\n\"\n + \"DAG File Processing Stats\\n\\n\"\n + tabulate(formatted_rows, headers=headers)\n + \"\\n\"\n + \"=\" * 80\n )\n\n self.log.info(log_str)", "def und_generate_metrics(udb_file):\n log.info(f\"Running Analysis for commit: {udb_file} ...\")\n # stdout=subprocess.DEVNULL makes silent the stdout ,\n subprocess.call(f\"und analyze -db {udb_file}\", stdout=subprocess.DEVNULL)\n log.info(\"Calculating metrics and creating csv\")\n subprocess.call(f\"und metrics {udb_file}\")", "def get_updated_stats(request):\n\t# read current values in chart\n\tlog_file = open('./load_charts/static/load_charts/uptime_log', 'r')\n\tuptime_values = ast.literal_eval(log_file.read())\n\tlog_file.close()\n\t#defines current oldest entry\n\toldest_record = datetime.datetime.strptime(uptime_values[-1][\"date\"], '%Y-%m-%d %H:%M:%S')\n\t#defines a two minutes interval from now\n\ttime_interval = datetime.datetime.utcnow() - timedelta(minutes=2)\n\n\t# creates a dict with stats from uptime statistics (uptime, users ...)\n\tdata = parse_uptime()\n\t# default alert is -1 nothing is triggered\n\tdata[\"alert\"] = -1\n\n\t# we need at least a two minutes history\n\tif time_interval > oldest_record:\n\t\tthreshold = float(request.GET.get('threshold', ''))\n\t\tsum_avg = 0\n\t\tnb = 0\n\t\t# compute avg load on past two minutes\n\t\tfor pos, val in enumerate(uptime_values):\n\t\t\tval_date = datetime.datetime.strptime(val[\"date\"], '%Y-%m-%d %H:%M:%S')\n\t\t\tif val_date > time_interval:\n\t\t\t\tsum_avg += val[\"value\"]\n\t\t\t\tnb += 1\n\t\t# Case avg > threshold set alert to 1 else set alert to 0\n\t\tif nb:\n\t\t\tavg = sum_avg / nb\n\t\t\tdata[\"value\"] = avg\n\t\t\tif avg > threshold:\n\t\t\t\tdata[\"alert\"] = 1\n\t\t\telif avg < threshold:\n\t\t\t\tdata[\"alert\"] = 0\n\n\treturn HttpResponse(json.dumps(data))", "def get_all_metrics(self):\n up_time = self.uptime()\n down_time = self.downtime()\n customer_sla = self.sla()\n objective = self.slo()\n indicator = self.sli()\n avail_percentage = self.availability()\n mt_bf = self.mtbf(up_time)\n mt_tr = self.mttr(down_time)\n list_results = [up_time,down_time,customer_sla,objective,indicator,avail_percentage,mt_bf,mt_tr]\n return list_results", "def updateCounts(self):\n found = False\n fileName = \"counts\"\n if not os.access(fileName, os.F_OK):\n try:\n TFH = open(fileName, \"w\")\n TFH.close()\n except IOError as inst: # @UnusedVariable\n self.logIt(__name__ + \".updateCounts(): Unable to open \" + fileName + \" for write.\" + \" => \" + str(\n inst.errno) + \":\" + str(inst.strerror) + \"\\n\")\n raise\n\n self.logIt(__name__ + \".updateCounts(): fileName=\" + fileName + \"\\n\")\n try:\n FH = open(fileName, \"rb+\")\n # FH = posixfile.open(fileName, \"rb+\") # posixfile has been deprecated.\n # FH.lock('w|')\n data = None\n while 1:\n data = str(FH.readline())\n if data is None or data == \"\": break\n data = re.sub(\"\\n\", \"\", data)\n self.debug(__name__ + \".updateCounts(): data is \" + str(data) + \"\\n\")\n ms = str(self.msgNum) + \"=\"\n self.debug(__name__ + \".updateCounts(): ms is\" + str(ms) + \"\\n\")\n if re.search(ms, data):\n found = True\n self.debug(__name__ + \".updateCounts(): DEBUG0.5\\n\")\n break\n self.debug(__name__ + \".updateCounts(): DEBUG1\\n\")\n if data and found:\n self.debug(__name__ + \".updateCounts(): DEBUG2\\n\")\n eloc = FH.tell()\n self.debug(__name__ + \".updateCounts(): eloc=\" + str(eloc) + \"\\n\")\n sloc = eloc - len(data) - 1\n self.debug(__name__ + \".updateCounts(): sloc=\" + str(sloc) + \"\\n\")\n FH.seek(sloc, os.SEEK_SET)\n cloc = FH.tell()\n self.debug(__name__ + \".updateCounts(): cloc=\" + str(cloc) + \"\\n\")\n myList = list()\n myList = data.split('=')\n icount = int(myList[1]) + 1\n FH.write(str(self.msgNum) + \"=\" + str(icount) + \"\\n\")\n else:\n self.debug(__name__ + \".updateCounts(): DEBUG3\\n\")\n FH.write(str(self.msgNum) + \"=1\" + \"\\n\")\n FH.lock('u')\n FH.close()\n except IOError as inst: # @UnusedVariable\n pass\n # self.logIt( __name__ + \".updateCounts(): Unable to open \" + fileName + \" for write.\" + \" => \" + str( inst.errno ) + \":\" + str( inst.strerror ) + \"\\n\" )\n # Endtry", "def __collect_stats(self, encode, file_name):\n if encode not in self.__hash.keys():\n self.__hash[encode] = []\n self.__hash[encode].append(file_name)\n self.__files_count += 1\n with open(file_name, 'r', encoding=encode) as fr:\n for line in fr:\n self.__lines += 1\n self.__chars += len(line)", "async def monitor_downloads(self):\n downloads = await self.request_manager.get_downloads()\n for download in downloads[\"downloads\"]:\n time_diff = time.time() - self.start_time\n with open(self.download_stats_file_path, \"a\") as output_file:\n output_file.write(\"%s,%s,%s,%s,%s,%f\\n\" % (time_diff,\n download[\"infohash\"],\n download[\"status\"],\n download[\"speed_up\"],\n download[\"speed_down\"],\n download[\"progress\"]))\n\n # Now we get the number of circuits\n circuits_info = await self.request_manager.get_circuits_info()\n time_diff = time.time() - self.start_time\n circuits_ready = circuits_extending = circuits_closing = 0\n circuits_data = circuits_ip = circuits_rp = circuits_rendezvous = 0\n\n for circuit in circuits_info[\"circuits\"]:\n if circuit[\"state\"] == \"READY\":\n circuits_ready += 1\n elif circuit[\"state\"] == \"EXTENDING\":\n circuits_extending += 1\n elif circuit[\"state\"] == \"CLOSING\":\n circuits_closing += 1\n\n if circuit[\"type\"] == \"DATA\":\n circuits_data += 1\n elif circuit[\"type\"] == \"IP\":\n circuits_ip += 1\n elif circuit[\"type\"] == \"RP\":\n circuits_rp += 1\n elif circuit[\"type\"] == \"RENDEZVOUS\":\n circuits_rendezvous += 1\n\n with open(self.circuits_file_path, \"a\") as output_file:\n output_file.write(\"%s,%s,%s,%s,%d,%d,%d,%d\\n\" % (time_diff,\n circuit[\"circuit_id\"],\n circuit[\"type\"],\n circuit[\"state\"],\n circuit[\"goal_hops\"],\n circuit[\"actual_hops\"],\n circuit[\"bytes_up\"],\n circuit[\"bytes_down\"]))\n\n with open(self.circuits_states_file_path, \"a\") as output_file:\n output_file.write(\"%s,%d,%d,%d\\n\" % (time_diff,\n circuits_ready,\n circuits_extending,\n circuits_closing))\n\n with open(self.circuits_types_file_path, \"a\") as output_file:\n output_file.write(\"%s,%d,%d,%d,%d\\n\" % (time_diff,\n circuits_data,\n circuits_ip,\n circuits_rp,\n circuits_rendezvous))", "def set_metrics(self):", "def compute_metrics(self, results: list) -> dict:\n dump(results, self.out_file_path)\n print_log(\n f'Results has been saved to {self.out_file_path}.',\n logger='current')\n return {}", "def process_file(self):\n self._processing_logger.log_info('Start processing')\n self.parsing_start_time = datetime.datetime.now()\n if os.path.exists(self.tmp_stat_file_path) \\\n and not HcsParsingUtils.active_processing_exceed_timeout(self.tmp_stat_file_path):\n self._processing_logger.log_info('This file is processed by another parser, skipping...')\n return 2\n self.create_tmp_stat_file()\n hcs_index_file_path = self.hcs_root_dir + MEASUREMENT_INDEX_FILE_PATH\n time_series_details = self._extract_time_series_details(hcs_index_file_path)\n self.generate_ome_xml_info_file()\n xml_info_tree = ET.parse(self.ome_xml_info_file_path).getroot()\n plate_width, plate_height = self._get_plate_configuration(xml_info_tree)\n wells_tags = self.read_wells_tags()\n if wells_tags:\n self._processing_logger.log_info(\"Tags \" + str(wells_tags))\n if not TAGS_PROCESSING_ONLY and not EVAL_PROCESSING_ONLY:\n if not self._localize_related_files():\n self._processing_logger.log_info('Some errors occurred during copying files from the bucket, exiting...')\n return 1\n else:\n self._processing_logger.log_info('Localization is finished.')\n local_preview_dir = os.path.join(self.tmp_local_dir, 'preview')\n hcs_local_index_file_path = get_path_without_trailing_delimiter(self.tmp_local_dir) \\\n + MEASUREMENT_INDEX_FILE_PATH\n for sequence_id, timepoints in time_series_details.items():\n self._processing_logger.log_info('Processing sequence with id={}'.format(sequence_id))\n sequence_index_file_path = self.extract_sequence_data(sequence_id, hcs_local_index_file_path)\n conversion_result = os.system('bash \"{}\" \"{}\" \"{}\" {}'.format(\n OME_TIFF_SEQUENCE_CREATION_SCRIPT, sequence_index_file_path, local_preview_dir, sequence_id))\n if conversion_result != 0:\n self._processing_logger.log_info('File processing was not successful...')\n return 1\n sequence_overview_index_file_path, wells_grid_mapping = self.build_sequence_overview_index(sequence_index_file_path)\n conversion_result = os.system('bash \"{}\" \"{}\" \"{}\" {} \"{}\"'.format(\n OME_TIFF_SEQUENCE_CREATION_SCRIPT, sequence_overview_index_file_path, local_preview_dir,\n sequence_id, 'overview_data.ome.tiff'))\n if conversion_result != 0:\n self._processing_logger.log_info('File processing was not successful: well preview generation failure')\n return 1\n self.write_dict_to_file(os.path.join(local_preview_dir, sequence_id, 'wells_map.json'),\n self.build_wells_map(sequence_id, wells_grid_mapping, wells_tags))\n if LOCALIZE_USE_PIPE == \"true\":\n cloud_transfer_result = os.system('pipe storage cp -f -r \"{}\" \"{}\"'\n .format(local_preview_dir,\n HcsParsingUtils.extract_cloud_path(self.hcs_img_service_dir)))\n else:\n cloud_transfer_result = os.system('aws s3 sync \"{}\" \"{}\"'\n .format(local_preview_dir,\n HcsParsingUtils.extract_cloud_path(self.hcs_img_service_dir)))\n if cloud_transfer_result != 0:\n self._processing_logger.log_info('Results transfer was not successful...')\n return 1\n self._write_hcs_file(time_series_details, plate_width, plate_height)\n if not EVAL_PROCESSING_ONLY:\n tags_processing_result = self.try_process_tags(xml_info_tree, wells_tags)\n if TAGS_PROCESSING_ONLY:\n if wells_tags:\n for sequence_id, timepoints in time_series_details.items():\n path = os.path.join(self.hcs_img_service_dir, sequence_id, 'wells_map.json')\n self.write_dict_to_file(path, self.update_wells_json(path, wells_tags))\n return tags_processing_result\n if not TAGS_PROCESSING_ONLY:\n eval_processing_result = self.try_process_eval()\n if EVAL_PROCESSING_ONLY:\n return eval_processing_result\n self.create_stat_file()\n return 0", "def metrics_group():", "def load_status_table():", "def _read_log(self):\n\n line_regex = compile(r\"\\[I\\]\\s*\\(\\d+ms\\)[^\\d]+(?P<counter>\\d+)\"\n r\"[^\\d]+(?P<timestamp>\\d+(\\.\\d+)?)[^\\d]+\"\n r\"(?P<acceleration>\\d+);\")\n values = []\n with open(self.filepath) as file:\n for line in file:\n match = line_regex.match(line)\n if match:\n values.append({\n 'counter':\n int(match['counter']),\n 'timestamp':\n int(float(match['timestamp']) * 1000),\n 'acceleration':\n int(match['acceleration'])\n })\n\n self.values = values", "def getstatus(self):\n status = dict(state=self.getstate(), runningcmd=None,\n current_exposure=self.current_exposure,\n max_exposures=self.max_exposures,\n statustime=str(datetime.now())[:-7],\n lastfile=self.lastfile)\n if self.process:\n status['lastcmd'] = self.process.args[0]\n status['lastreturn'] = self.process.poll()\n if status['state'] == 'running':\n status['runningcmd'] = path.basename(self.process.args[0])\n try:\n with open(self.logfilename, newline='') as logfile:\n ts = datetime.fromtimestamp(path.getmtime(self.logfilename))\n status['cmdoutput'] = f\"Last output: {str(ts)[:-7]}\\n\"\n status['cmdoutput'] += '#'*80+'\\n'\n lines = logfile.readlines()\n if lines and lines[-1][-1] == '\\r':\n lines[-1] = lines[-1][:-1]\n for line in lines:\n if not line.endswith('\\r'):\n status['cmdoutput'] += line\n except FileNotFoundError:\n status['cmdoutput'] = \"\"\n \n # info for the lastimg to update\n status['lastimg'] = self.lastimgpath\n try:\n status['lastimg_timestamp'] = path.getmtime(self.lastimgpath)\n except FileNotFoundError:\n status['lastimg_timestamp'] = 0\n return status" ]
[ "0.6695974", "0.6339612", "0.6311073", "0.62527573", "0.6087654", "0.60828906", "0.60479605", "0.5964161", "0.5951921", "0.5916135", "0.5879056", "0.5801343", "0.5773241", "0.57671124", "0.57634276", "0.5756819", "0.5740762", "0.5738426", "0.5729482", "0.57033396", "0.56920266", "0.5655607", "0.5635406", "0.56338316", "0.5604341", "0.5591151", "0.55907583", "0.5567674", "0.55610144", "0.55295235", "0.55270654", "0.55194944", "0.5511929", "0.54956365", "0.54944605", "0.5480354", "0.54759806", "0.5470992", "0.5463171", "0.5460351", "0.5457137", "0.5456441", "0.544719", "0.54438084", "0.5442435", "0.5436841", "0.5430587", "0.54289156", "0.5427977", "0.54248935", "0.54228747", "0.5410932", "0.53982264", "0.53940725", "0.53832954", "0.53761405", "0.5364392", "0.5361971", "0.53551555", "0.5352927", "0.5341627", "0.53212905", "0.5317639", "0.531173", "0.5311527", "0.529278", "0.52902025", "0.52878404", "0.5284806", "0.5283746", "0.5282566", "0.526157", "0.52599096", "0.5258446", "0.52583575", "0.524512", "0.52384436", "0.52375", "0.5233103", "0.52307117", "0.5225916", "0.5224368", "0.52197033", "0.52165496", "0.52117556", "0.5207412", "0.5202959", "0.5202468", "0.51954937", "0.5173155", "0.5162787", "0.51615566", "0.51609975", "0.5158265", "0.5145286", "0.514103", "0.5127577", "0.51269686", "0.51254284", "0.5123322" ]
0.5690548
21
Gathers the metrics from the io file.
def gather_sample(self, stat_file, collector=None): if not collector: collector = {} # File format is single value per line with "fieldname:" prefix. for x in stat_file: fields = x.split() if len(fields) == 0: continue if not collector: collector = {} if fields[0] == "rchar:": collector.update({Metric("app.disk.bytes", "read"): int(fields[1])}) elif fields[0] == "syscr:": collector.update({Metric("app.disk.requests", "read"): int(fields[1])}) elif fields[0] == "wchar:": collector.update({Metric("app.disk.bytes", "write"): int(fields[1])}) elif fields[0] == "syscw:": collector.update({Metric("app.disk.requests", "write"): int(fields[1])}) return collector
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_metrics_file(self):\n\n with open(self.metrics_path, \"w+\") as f_metrics:\n\n f_metrics.write(get_metrics_file_form())", "def read_metrics(self):\n raise NotImplementedError()", "def emit_metrics(self):\n parse_time = time.perf_counter() - self._parsing_start_time\n Stats.gauge(\"dag_processing.total_parse_time\", parse_time)\n Stats.gauge(\"dagbag_size\", sum(stat.num_dags for stat in self._file_stats.values()))\n Stats.gauge(\n \"dag_processing.import_errors\", sum(stat.import_errors for stat in self._file_stats.values())\n )", "def gather_sample(self, stat_file, collector=None):\n\n if not collector:\n collector = {}\n\n for line in stat_file:\n # We just look for the different \"inuse\" lines and output their\n # socket type along with the count.\n m = re.search(r\"(\\w+): inuse (\\d+)\", line)\n if m is not None:\n collector.update(\n {\n Metric(\"app.net.sockets_in_use\", m.group(1).lower()): int(\n m.group(2)\n )\n }\n )\n return collector", "def gather_sample(self, stat_file, collector=None):\n if not collector:\n collector = {}\n # The file format is just a single line of all the fields.\n line = stat_file.readlines()[0]\n # Chop off first part which is the pid and executable file. The\n # executable file is terminated with a paren so just search for that.\n line = line[(line.find(\") \") + 2) :]\n fields = line.split()\n # Then the fields we want are just at fixed field positions in the\n # string. Just grab them.\n\n # See http://man7.org/linux/man-pages/man5/proc.5.html for reference on field numbers\n # Keep in mind that we chop first 3 values away (pid, command line, state), so you need to\n # subtract 3 from the field numbers from the man page (e.g. on the man page nice is number\n # 19, but in our case it's 16 aka 19 - 3)\n process_uptime = self.__get_uptime_ms() - self.calculate_time_ms(\n int(fields[19])\n )\n\n collector.update(\n {\n Metric(\"app.cpu\", \"user\"): self.__calculate_time_cs(int(fields[11])),\n Metric(\"app.cpu\", \"system\"): self.__calculate_time_cs(int(fields[12])),\n Metric(\"app.uptime\", None): process_uptime,\n Metric(\"app.nice\", None): float(fields[16]),\n Metric(\"app.threads\", None): int(fields[17]),\n Metric(\"app.mem.majflt\", None): int(fields[9]),\n Metric(\"app.io.wait\", None): int(fields[39])\n if len(fields) >= 39\n else 0,\n }\n )\n return collector", "def possible_metrics(filename):\n\n metrics = {}\n\n raw_data = parse_config(filename)\n\n for graph in raw_data:\n for metric in graph['metrics']:\n metrics.update({metric['label']: [(metric['x_stream'], metric['y_stream'], metric['z_stream']), metric['func']]})\n\n return metrics", "def walk(self):\n data = open(self.data_file_path, 'rb')\n read_metric = globals()[\"ProtoDefinition\"].Payload()\n read_metric.ParseFromString(data.read())\n\n # One record for the whole file\n self.payload_metadata = read_metric.payloadMetadata\n self.device = read_metric.device\n\n # Get list of all *repeated* field types\n field_names = []\n for field_desc in read_metric.DESCRIPTOR.fields:\n field_name = field_desc.name\n\n if field_desc.label == field_desc.LABEL_REPEATED:\n field_names.append(field_name)\n\n # For each repeated field type, get the data and yield one item at a time\n for field_name in field_names:\n stream_samples = getattr(read_metric, field_name)\n for sample in stream_samples:\n yield self.device, sample", "def run_main():\n # Matching lines against a matcher function.\n matched_lines = match_file(file_names, matcher)\n\n # Will contain data sorted by file.\n binned_data = {}\n\n # Looking through the lines that were inserted into the metrics file via the metrics component.\n for key in matched_lines:\n\n # Grabbing matched lines by the file or orgination.\n buffer = matched_lines[key]\n\n # This will contain dictionaries converted from JSON.\n data = []\n\n # Loop through the collection, appending data converted from JSON entries.\n for line in buffer:\n data.append(extract_data(line))\n\n # Sort the data by file.\n binned_data[key] = sort_data(data)\n\n # Output the final results.\n generate_statistics(binned_data)\n return 0", "def _summary(in_file):\n data = Counter()\n out_file = in_file + \"_size_stats\"\n if file_exists(out_file):\n return out_file\n with open(in_file) as in_handle:\n for line in in_handle:\n counts = int(line.strip().split(\"_x\")[1])\n line = in_handle.next()\n l = len(line.strip())\n in_handle.next()\n in_handle.next()\n data[l] += counts\n with file_transaction(out_file) as tx_out_file:\n with open(tx_out_file, 'w') as out_handle:\n for l, c in data.items():\n out_handle.write(\"%s %s\\n\" % (l, c))\n return out_file", "def gather_sample(self, stat_file, collector=None):\n\n if not collector:\n collector = {}\n\n for line in stat_file:\n # Each line has a format of:\n # Tag: Value\n #\n # We parse out all lines looking like that and match the stats we care about.\n m = re.search(r\"^(\\w+):\\s*(\\d+)\", line)\n if m is None:\n continue\n\n field_name = m.group(1)\n int_value = int(m.group(2))\n # FDSize is not the same as the number of open file descriptors. Disable\n # for now.\n # if field_name == \"FDSize\":\n # self.print_sample(\"app.fd\", int_value)\n if field_name == \"VmSize\":\n collector.update({Metric(\"app.mem.bytes\", \"vmsize\"): int_value * 1024})\n elif field_name == \"VmPeak\":\n collector.update(\n {Metric(\"app.mem.bytes\", \"peak_vmsize\"): int_value * 1024}\n )\n elif field_name == \"VmRSS\":\n collector.update(\n {Metric(\"app.mem.bytes\", \"resident\"): int_value * 1024}\n )\n elif field_name == \"VmHWM\":\n collector.update(\n {Metric(\"app.mem.bytes\", \"peak_resident\"): int_value * 1024}\n )\n return collector", "def record_metrics(metrics, args):\n with open('interpretation_metrics/model_metrics_{}'.format(args.file_num), 'a') as f:\n f.write(\"META DATA\\n\")\n f.write(\"---------\\n\")\n f.write(\"Model Name: {}\\n\".format(args.model_name))\n f.write(\"Attack Target: {}\\n\".format(args.attack_target))\n f.write(\"Gradient Model File: {}\\n\".format(args.gradient_model_file))\n f.write(\"Predictive Model File: {}\\n\".format(args.predictive_model_file))\n f.write(\"Cuda: {}\\n\".format(args.cuda))\n\n f.write(\"\\nSIMPLE GRADIENT COMBINED MODEL METRICS\\n\")\n f.write(\"----------------------------------------\\n\")\n for key, val in metrics['simple_gradient_combined'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nSIMPLE GRADIENT BASELINE MODEL METRICS\\n\")\n f.write(\"----------------------------------------\\n\")\n for key, val in metrics['simple_gradient_baseline'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nSMOOTH GRADIENT COMBINED MODEL METRICS\\n\")\n f.write(\"----------------------------------------\\n\")\n for key, val in metrics['smooth_gradient_combined'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nSMOOTH GRADIENT BASELINE MODEL METRICS\\n\")\n f.write(\"----------------------------------------\\n\")\n for key, val in metrics['smooth_gradient_baseline'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nINTEGRATED GRADIENT COMBINED MODEL METRICS\\n\")\n f.write(\"--------------------------------------------\\n\")\n for key, val in metrics['integr_gradient_combined'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nINTEGRATED GRADIENT BASELINE MODEL METRICS\\n\")\n f.write(\"--------------------------------------------\\n\")\n for key, val in metrics['integr_gradient_baseline'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))", "def collect(self):\n if not self._btime:\n return\n\n try:\n with open(\"/proc/self/stat\", 'rb') as stat:\n parts = (stat.read().split(b')')[-1].split())\n\n self.process_metrics[\"vmem\"].set(\"\", float(parts[20]))\n self.process_metrics[\"rss\"].set(\"\", float(parts[21]) * _PAGESIZE)\n start_time_secs = float(parts[19]) / self._ticks\n self.process_metrics[\"start_time\"].set(\n \"\", start_time_secs + self._btime\n )\n utime = float(parts[11]) / self._ticks\n stime = float(parts[12]) / self._ticks\n self.process_metrics[\"cpu\"].set(\"\", utime + stime)\n except IOError:\n pass\n\n try:\n with open(\"/proc/self/limits\", 'rb') as limits:\n for line in limits:\n if line.startswith(b'Max open file'):\n self.process_metrics[\"max_fds\"].set(\n \"\", float(line.split()[3])\n )\n break\n\n self.process_metrics[\"open_fds\"].set(\n \"\", float(len(os.listdir(\"/proc/self/fd\")))\n )\n except (IOError, OSError):\n pass\n\n # Update gc metrics if enabled.\n if \"collected\" in self.process_metrics:\n for generation, stat in enumerate(gc.get_stats()):\n generation = {\"generation\": str(generation)}\n self.process_metrics[\"collected\"].set(\n generation, stat[\"collected\"]\n )\n self.process_metrics[\"uncollectable\"].set(\n generation, stat[\"uncollectable\"]\n )\n self.process_metrics[\"collections\"].set(\n generation, stat[\"collections\"]\n )", "async def write_metrics(every: int, to: str):\n while True:\n line = f\"pyvast-threatbus,host={socket.gethostname()} \"\n start_length = len(line)\n for m in metrics:\n if not m.is_set:\n continue\n if type(m) is Gauge or type(m) is InfiniteGauge:\n if len(line) > start_length:\n line += \",\"\n line += f\"{m.name}={m.value}\"\n if type(m) is Summary:\n if len(line) > start_length:\n line += \",\"\n line += (\n f\"{m.name}_min={m.min},{m.name}_max={m.max},{m.name}_avg={m.avg}\"\n )\n m.reset()\n\n if len(line) > start_length:\n # only update the file if there were metrics collected.\n line += f\" {time.time_ns()}\" # append current nanoseconds ts\n with open(to, \"a\") as f:\n f.write(line + \"\\n\")\n await asyncio.sleep(every)", "def _read_log(self):\n\n line_regex = compile(r\"\\[I\\]\\s*\\(\\d+ms\\)[^\\d]+(?P<counter>\\d+)\"\n r\"[^\\d]+(?P<timestamp>\\d+(\\.\\d+)?)[^\\d]+\"\n r\"(?P<acceleration>\\d+);\")\n values = []\n with open(self.filepath) as file:\n for line in file:\n match = line_regex.match(line)\n if match:\n values.append({\n 'counter':\n int(match['counter']),\n 'timestamp':\n int(float(match['timestamp']) * 1000),\n 'acceleration':\n int(match['acceleration'])\n })\n\n self.values = values", "def gather_sample(self, stat_file, collector=None):\n\n # This file format is weird. Each set of stats is outputted in two\n # lines. First, a header line that list the field names. Then a\n # a value line where each value is specified in the appropriate column.\n # You have to match the column name from the header line to determine\n # what that column's value is. Also, each pair of lines is prefixed\n # with the same name to make it clear they are tied together.\n all_lines = stat_file.readlines()\n # We will create an array of all of the column names in field_names\n # and all of the corresponding values in field_values.\n field_names = []\n field_values = []\n\n # To simplify the stats, we add together the two forms of retransmit\n # I could find in the netstats. Those to fast retransmit Reno and those\n # to selective Ack.\n retransmits = 0\n found_retransmit_metric = False\n\n # Read over lines, looking at adjacent lines. If their row names match,\n # then append their column names and values to field_names\n # and field_values. This will break if the two rows are not adjacent\n # but I do not think that happens in practice. If it does, we just\n # won't report the stats.\n for i in range(0, len(all_lines) - 1):\n names_split = all_lines[i].split()\n values_split = all_lines[i + 1].split()\n # Check the row names are the same.\n if names_split[0] == values_split[0] and len(names_split) == len(\n values_split\n ):\n field_names.extend(names_split)\n field_values.extend(values_split)\n\n if not collector:\n collector = {}\n\n # Now go back and look for the actual stats we care about.\n for i in range(0, len(field_names)):\n if field_names[i] == \"InOctets\":\n collector.update({Metric(\"app.net.bytes\", \"in\"): field_values[i]})\n elif field_names[i] == \"OutOctets\":\n collector.update({Metric(\"app.net.bytes\", \"out\"): field_values[i]})\n elif field_names[i] == \"TCPRenoRecovery\":\n retransmits += int(field_values[i])\n found_retransmit_metric = True\n elif field_names[i] == \"TCPSackRecovery\":\n retransmits += int(field_values[i])\n found_retransmit_metric = True\n\n # If we found both forms of retransmit, add them up.\n if found_retransmit_metric:\n collector.update({Metric(\"app.net.tcp_retransmits\", None): retransmits})\n return collector", "def _update_counters(self, filepath, step):\n\n counters = {}\n\n # Load\n if os.path.exists(self.counters_file):\n with open(self.counters_file) as f:\n counters = json.load(f)\n\n counters[filepath] = dict(step=step)\n\n # Save\n with open(self.counters_file, \"w\") as f:\n json.dump(counters, f, indent=4)", "def compute_metrics(self):\n pass", "def collect(self): # pylint: disable=no-self-use\n start = time.time()\n for metric in metric_rq():\n yield metric\n\n gauge = GaugeMetricFamily(\n \"nautobot_rq_metrics_processing_ms\", \"Time in ms to generate the app metrics endpoint\"\n )\n duration = time.time() - start\n gauge.add_metric([], format(duration * 1000, \".5f\"))\n yield gauge", "def add_stats(self):\n units = self.get_unit_map()\n for metric in self.raw_metrics:\n unit, metric_type = units.get(metric, (DEFAULT_UNIT, DEFAULT_TYPE))\n if metric_type == \"counter\":\n # Unit/Second\n unit = \"/\".join((unit, \"Second\"))\n self.add_derive_value(metric, unit, self.raw_metrics[metric], rate=True)\n else:\n self.add_gauge_value(metric, unit, self.raw_metrics[metric])", "def qc_metrics(self, files_in, qc_files):\n self.cmd(\"{samtools} index {bam_in}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in=files_in[0],\n ),\n shell=True)\n self.cmd(\"{samtools} idxstats {bam_in} | tee {qc_file}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in = files_in[0],\n qc_file = qc_files[0],\n ),\n shell=True,\n log_output=True)\n self.cmd(\"{samtools} flagstat {bam_in} | tee {qc_file}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in = files_in[0],\n qc_file = qc_files[1],\n ),\n shell=True,\n log_output=True)\n \n self.checkpoint(qc_files[0])\n self.checkpoint(qc_files[1])\n self.checkpoint(qc_files[2])", "def gather_sample(self, my_file, collector=None):\n\n pass", "def get_run_metrics_handle(run_dir):\n #print(\"Examining: {}\".format(run_dir))\n\n valid_to_load = py_interop_run.uchar_vector(py_interop_run.MetricCount, 0)\n for v2l in (py_interop_run.Tile, py_interop_run.ExtendedTile):\n valid_to_load[v2l] = 1\n\n run_metrics = py_interop_run_metrics.run_metrics()\n run_metrics.read(run_dir, valid_to_load)\n\n return run_metrics", "def read_logs(self):\n for system, filenames in SmokeTests.INPUT_FILES.items():\n input_file = filenames[\"logs\"]\n with open(input_file) as fin:\n self._logs[system] = fin.read()", "def collect(self): # pylint: disable=no-self-use\n start = time.time()\n\n if \"jobs\" in PLUGIN_SETTINGS and PLUGIN_SETTINGS[\"jobs\"]:\n for metric in metric_jobs():\n yield metric\n\n if \"models\" in PLUGIN_SETTINGS:\n for metric in metric_models(PLUGIN_SETTINGS[\"models\"]):\n yield metric\n\n # --------------------------------------------------------------\n # Extras Function defined in configuration.py or the Regristry\n # # --------------------------------------------------------------\n if \"extras\" in PLUGIN_SETTINGS:\n for metric in collect_extras_metric(PLUGIN_SETTINGS[\"extras\"]):\n yield metric\n\n for metric in collect_extras_metric(__REGISTRY__):\n yield metric\n\n gauge = GaugeMetricFamily(\n \"nautobot_app_metrics_processing_ms\", \"Time in ms to generate the app metrics endpoint\"\n )\n duration = time.time() - start\n gauge.add_metric([], format(duration * 1000, \".5f\"))\n yield gauge", "def _get_ganglia_metrics(hostname, port, file_):\n if file_:\n f = open(file_, 'r')\n return \"\".join(f.readlines())\n else:\n return netcat(hostname, port, '')", "def __init_metrics(self):\n\n batch = {}\n # split data into batches of size batch_size or less\n for metric_name, metric_pattern in self.metrics.items():\n # get the batch list for that metric\n batch_list = []\n for s in range(1, self.schema + 1):\n for t in range(1, self.table + 1):\n k = '/metrics/type=IndexTable/keyspace={}/scope={}/name={}/mean'.format(s, t, metric_name)\n # from Python 3.6 onwards, the standard dict type maintains insertion order by default\n batch[k] = 0\n # if the batch has batch_size items or at the end of iteration,\n # append the batch to list of that metric and create a new empty batch\n if len(batch) == self.batch_size or (s == self.schema and t == self.table):\n batch_list.append(batch)\n batch = {}\n\n # parse metric patterns\n l = metric_pattern.split()\n if l[0] == '(>':\n self.metrics[metric_name] = IncMetricStruct(float(int(l[1])), float(l[2][1:]), float(l[4][:-2]),\n batch_list)\n else:\n self.metrics[metric_name] = RandMetricStruct(float(l[0][1:]), float(l[-1][:-1]), batch_list)", "def start_monitor_loop(self):\n read_file = read_config_file.ConfigFileReader()\n\n communication_time = read_file.get_send_communication_time()\n metrics_array = read_file.get_metrics()\n\n self.add_metrics_to_monitor_object(communication_time, metrics_array)", "def metrics(self):\n raise NotImplementedError(\"metrics\")", "def parse(self):\n\n # if data has already been parsed, do nothing\n if self._data:\n return\n\n stats = {\n \"genres\": {},\n \"artists\": {},\n \"global_stats\": {\n \"songs\": 0,\n \"lines\": 0,\n \"words\": 0\n }\n }\n try:\n with open(self._filename) as file:\n objects = ijson.items(file, \"item\")\n\n # compute metrics\n for object in objects:\n\n lines = len(object[\"lyrics\"])\n words = sum([len(line.split()) for line in object[\"lyrics\"]])\n\n genre = object[\"genre\"]\n stats[\"genres\"][genre] = stats[\"genres\"].get(genre, {\"artists\": {}})\n genre_obj = stats[\"genres\"][genre]\n genre_obj[\"songs\"] = genre_obj.get(\"songs\", 0) + 1\n genre_obj[\"lines\"] = genre_obj.get(\"lines\", 0) + lines\n genre_obj[\"words\"] = genre_obj.get(\"words\", 0) + words\n genre_obj[\"is_music\"] = genre_obj.get(\"is_music\", 0)\n if object[\"is_music\"] != \"false\":\n genre_obj[\"is_music\"] += 1\n\n artist = object[\"artist\"]\n stats[\"artists\"][artist] = stats[\"artists\"].get(artist, 0) + 1\n stats[\"genres\"][genre][\"artists\"][artist] = stats[\"genres\"][genre][\"artists\"].get(artist, 0) + 1\n\n # update global stats\n stats[\"global_stats\"][\"songs\"] += 1\n stats[\"global_stats\"][\"lines\"] += lines\n stats[\"global_stats\"][\"words\"] += words\n\n # calculate averages for each genre\n for genre, genre_stats in stats[\"genres\"].items():\n genre_stats[\"avg_line_length\"] = genre_stats[\"words\"] / genre_stats[\"lines\"]\n genre_stats[\"avg_lines\"] = genre_stats[\"lines\"] / genre_stats[\"songs\"]\n genre_stats[\"avg_words\"] = genre_stats[\"words\"] / genre_stats[\"songs\"]\n\n # calculate global averages\n stats[\"global_stats\"][\"avg_line_length\"] = stats[\"global_stats\"][\"words\"] / stats[\"global_stats\"][\"lines\"]\n stats[\"global_stats\"][\"avg_lines\"] = stats[\"global_stats\"][\"lines\"] / stats[\"global_stats\"][\"songs\"]\n stats[\"global_stats\"][\"avg_words\"] = stats[\"global_stats\"][\"words\"] / stats[\"global_stats\"][\"songs\"]\n\n self._data = stats\n\n except IOError as e:\n print(\"Exception occurred: \", e)", "def _cmd_metrics(args):\n if (\n len(args.cnarrays) > 1\n and args.segments\n and len(args.segments) > 1\n and len(args.cnarrays) != len(args.segments)\n ):\n raise ValueError(\n \"Number of coverage/segment filenames given must be \"\n \"equal, if more than 1 segment file is given.\"\n )\n\n cnarrs = map(read_cna, args.cnarrays)\n if args.segments:\n args.segments = map(read_cna, args.segments)\n table = metrics.do_metrics(cnarrs, args.segments, args.drop_low_coverage)\n write_dataframe(args.output, table)", "def read(self):\n self.record_d = {}\n if self.__read_file():\n self.__print_report()", "def mymetrics(): \n _update_metric_counters()\n logging.debug(prom_objects_seen.collect())\n return flask.Response(generate_latest(), mimetype='text/plain')", "def read_callback(data=None):\n\n hits_by_domain = get_hits_by_domain()\n\n if not hits_by_domain:\n collectd.info('hits_by_domain not collected successfully')\n pass\n else:\n for key in hits_by_domain:\n metric = collectd.Values()\n metric.plugin = 'hits_by_domain'\n metric.type = 'count'\n metric.type_instance = key\n metric.values = [hits_by_domain[key]]\n metric.dispatch()", "def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n used_space = total_space - free_space\n gb_used = used_space / 1024 / 1024 / 1024\n\n # log!(format!(\"Collecting metric gb-used {}\", gb_used), Info)\n add_metric(\"gb-used\", \"{}\".format(gb_used))", "def process_file_metrics(root_dir, in_file_names, file_processors):\n manager = mp.Manager()\n file_metrics = manager.dict()\n\n parameters = [(root_dir, key, file_metrics, file_processors) for key in in_file_names]\n\n # main loop\n p = mp.Pool(max(1, mp.cpu_count() - 1))\n p.starmap(_process_file_metrics_parallel, parameters)\n p.close()\n p.join()\n\n return file_metrics", "def network_io_counters():\r\n f = open(\"/proc/net/dev\", \"r\")\r\n try:\r\n lines = f.readlines()\r\n finally:\r\n f.close()\r\n\r\n retdict = dict()\r\n for line in lines[2:]:\r\n colon = line.find(':')\r\n assert colon > 0, line\r\n name = line[:colon].strip()\r\n fields = line[colon + 1:].strip().split()\r\n bytes_recv = int(fields[0])\r\n packets_recv = int(fields[1])\r\n errin = int(fields[2])\r\n dropin = int(fields[2])\r\n bytes_sent = int(fields[8])\r\n packets_sent = int(fields[9])\r\n errout = int(fields[10])\r\n dropout = int(fields[11])\r\n retdict[name] = nt_net_iostat(bytes_sent, bytes_recv, packets_sent, packets_recv,\r\n errin, errout, dropin, dropout)\r\n return retdict", "def read_metric_values(self):\n inv_objs = self._inventory_mgr.current_inventory()\n monitored_metrics = self._metric_mgr.get_monitored_metrics()\n perf_manager = self._si.RetrieveServiceContent().perfManager\n for mor in inv_objs.keys():\n for inv_obj in inv_objs[mor]:\n inv_obj_metrics = inv_obj.metric_id_map\n desired_keys = list(set(inv_obj_metrics.keys()) & set(monitored_metrics[mor].keys()))\n if not len(desired_keys) == 0:\n metric_id_objs = [inv_obj_metrics[key] for key in desired_keys]\n query_spec = vim.PerformanceManager.QuerySpec(\n entity=inv_obj.mor, metricId=metric_id_objs,\n intervalId=inv_obj.INSTANT_INTERVAL,\n maxSample=1, format='normal'\n )\n try:\n results = perf_manager.QueryPerf(querySpec=[query_spec])\n except Exception as e:\n self._logger.error(\"Exception while making performance query : {0}\".format(e))\n if results:\n dps = self._parse_query(inv_obj, results, monitored_metrics[mor])\n payload = self._build_payload(dps)\n self._dispatch_metrics(payload)\n else:\n self._logger.warning(\"Empty result from query : {0}\".format(query_spec))", "def rule_metrics(path):\n logging.info(\n \"Searching path `{}` for YAML rule definitions for metrics ...\".format(path)\n )\n set_logger()\n metrics_rules(path)", "def metrics(self):\n\n data = requests.get(\n f\"http://{self.prometheus_host}:{self.prometheus_port}/metrics\"\n ).content.decode()\n lines = [line for line in data.split(\"\\n\") if not line.startswith(\"#\")]\n metrics = {}\n for line in lines:\n if not line:\n continue\n\n name, value = line.split(\" \")\n\n try:\n value = int(value) # type: ignore\n except ValueError:\n value = float(value) # type: ignore\n\n if \"{\" in name and \"}\" in name:\n base = name[: name.index(\"{\")]\n tags = name[name.index(\"{\") + 1 : -1]\n tags = [tag.split(\"=\") for tag in tags.split(\",\")]\n tags = [(key, val.replace('\"', \"\")) for key, val in tags]\n\n name = base + \"#\" + \",\".join(f\"{k}:{v}\" for k, v in sorted(tags))\n\n metrics[name] = value\n\n return metrics", "def readOpsimData(self):\n if self.opsim_data:\n good, string = self.checkOpsimData()\n if good:\n self.opsim_visits = self.opsim_data\n else:\n raise Exception(string)\n elif self.opsim_filename:\n dataDir = os.getenv('LSST_POINTING_DIR')\n if not dataDir:\n raise Exception('LSST_POINTING_DIR env not set')\n opsimfile = os.path.join(dataDir, self.opsim_filename)\n # Read the file, store the info.\n with open(opsimfile, 'r') as opsim:\n for visit in opsim:\n if visit.startswith('o'):\n print(visit)\n continue\n data = visit.strip().split()\n visitDict = {}\n visitDict[_opsim_keys[0]] = int(data[0])\n for i in range(1, len(data)):\n visitDict[_opsim_keys[i]] = float(data[i])\n self.opsim_visits.append(visitDict)\n else:\n raise Exception('No data specified')", "def collect_stats(self, cursor):\n metrics = self.config.get('metrics', DEFAULT_METRICS)\n if isinstance(metrics, str):\n if metrics == \"all\":\n # puffer_pool_status is only for 5.5, so we ignore that by default\n metrics = CATEGORIES.keys()\n metrics.remove('buffer_pool_stats')\n else:\n # support comma-separated list\n metrics = re.split(\"\\s*,\\s*\", metrics)\n\n self.logger.debug(\"metrics to collect: %s\" % \", \".join(metrics))\n for cat in metrics:\n if cat in CATEGORIES:\n self.add_category_stats(cat, cursor)\n else:\n self.logger.warning(\"%s is not a valid metric category\" % cat)\n\n if 'newrelic' in metrics:\n self.derive_newrelic_stats()", "def parse_metrics_file(self) -> Dict[int, dict]:\n LOG.info(\"Parsing Dragen demultiplexing adapter metrics file %s\", self.adapter_metrics_path)\n parsed_metrics = {}\n\n with self.adapter_metrics_path.open(\"r\") as metrics_file:\n metrics_reader = csv.DictReader(metrics_file)\n for row in metrics_reader:\n lane = int(row[\"Lane\"])\n read_number = row[\"ReadNumber\"]\n sample_id = row[\"Sample_ID\"]\n parsed_metrics[lane] = parsed_metrics.get(lane, {})\n parsed_metrics[lane][(read_number, sample_id)] = row\n\n return self.summerize_adapter_metrics(parsed_metrics=parsed_metrics)", "def __collect_stats(self, encode, file_name):\n if encode not in self.__hash.keys():\n self.__hash[encode] = []\n self.__hash[encode].append(file_name)\n self.__files_count += 1\n with open(file_name, 'r', encoding=encode) as fr:\n for line in fr:\n self.__lines += 1\n self.__chars += len(line)", "def und_generate_metrics(udb_file):\n log.info(f\"Running Analysis for commit: {udb_file} ...\")\n # stdout=subprocess.DEVNULL makes silent the stdout ,\n subprocess.call(f\"und analyze -db {udb_file}\", stdout=subprocess.DEVNULL)\n log.info(\"Calculating metrics and creating csv\")\n subprocess.call(f\"und metrics {udb_file}\")", "def __init__(self, file_name: str):\n self.case_metrics = []\n self.cluster_metrics = []\n self.file_name = file_name\n\n self.path_to_pmg_metrics = f'metrics/{file_name}_process_model_graphs'\n self.path_to_pmg_vis = f'visualization/{file_name}_process_model_graphs'\n self.path_to_drifts = 'visualization/drifts'\n self.path_to_case_metrics = 'metrics/case_metrics'\n self.path_to_cluster_metrics = 'metrics/cluster_metrics'\n try:\n makedirs(self.path_to_pmg_metrics, exist_ok=True)\n makedirs(self.path_to_pmg_vis, exist_ok=True)\n makedirs(self.path_to_drifts, exist_ok=True)\n makedirs(self.path_to_case_metrics, exist_ok=True)\n makedirs(self.path_to_cluster_metrics, exist_ok=True)\n\n pd.DataFrame(columns=['stream_index', 'timestamp', 'check point', 'case',\n 'graph distance', 'time distance', 'label']) \\\n .to_csv(f'{self.path_to_case_metrics}/{file_name}.csv', index=False)\n pd.DataFrame(columns=['stream_index', 'timestamp', 'check point', 'cluster id',\n 'x', 'y', 'radius', 'weight', 'cluster type']) \\\n .to_csv(f'{self.path_to_cluster_metrics}/{file_name}.csv', index=False)\n except Exception as e:\n print(e)", "def get_data(self):\n\n self.read_expression()\n self.read_tfs()\n self.read_metadata()\n self.set_gold_standard_and_priors()", "def __init__(self, pid, monitor_id, logger, file_pattern):\n self._pid = pid\n self._id = monitor_id\n self._file_pattern = file_pattern\n # The file object to be read. We always keep this open and just seek to zero when we need to\n # re-read it. Some of the /proc files do better with this approach.\n self._file = None\n # The time we last collected the metrics.\n self._timestamp = None\n # True if the reader failed for some unrecoverable error.\n self._failed = False\n self._logger = logger\n self._metric_printer = MetricPrinter(logger, monitor_id)", "def read_cpu_stats(target_file):\n test_line = target_file.readline()\n if \"CPU\" in test_line:\n logical_processors = target_file.readline().strip()\n processors_desc = target_file.readline().strip()\n threads_cores = target_file.readline().strip()\n return CpuStats(logical_processors, processors_desc, threads_cores)\n if \"logical processors\" in test_line:\n logical_processors = test_line.strip()\n processors_desc = target_file.readline().strip()\n threads_cores = target_file.readline().strip()\n return CpuStats(logical_processors, processors_desc, threads_cores)\n return CpuStats('', '', '')", "def metrics(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'metrics')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def main():\n\n\t# Parse the file\n\tmem_file = advanced_analysis('../data_1/mempages.dat.out')", "def send_metrics(self):\n metrics = self.get_metrics()\n if not metrics:\n return\n\n for mkey, metric in metrics.items():\n for mname, mval in metric.items():\n try:\n self.agent.record_custom_metric(self.convert_metric_name(mkey, mname), mval, None)\n except Exception as e:\n print_(e)", "def set_metrics(self):", "def read_data(self):\n self.data = reduce_spectrum(self.filename)", "def get_data():\n \n data = {\n 'loadAvg1Min': 0, #load average 1 min\n 'loadAvg5Min': 0, #load average 5 min\n 'loadAvg15Min': 0, #load average 15 min\n 'cpuUsage': [], #usage distribution for each cpu\n 'memUsage': {}, #memory usage \n 'networkReads': [], #network reads per second for each interface\n 'networkWrites': [], #network writes per second for each interface\n 'diskReads': [], #disk reads per second for each disk\n 'diskWrites': [] #disk writes per second for each disk\n }\n \n #metrics that doesnt need sampling\n data['loadAvg1Min'], data['loadAvg5Min'], data['loadAvg15Min'] = get_load_avg() #get load avg\n data['memUsage'].update(get_mem_usage()) #memory usage\n \n #metrics that needs sampling\n #they are written as a generator so that we can sleep before collection again\n sampling_duration = 1\n cpu_usage_gen = get_cpu_usage(sampling_duration) #generator for cpu usage\n net_rw_gen = get_net_rw(sampling_duration) #generator for network read write\n disk_rw_gen = get_disk_rw(sampling_duration) #generator for disk read write\n \n while 1: #now start sampling, whenever we have walid data, we can exit the loop\n cpu_usage = next(cpu_usage_gen)\n net_rw = next(net_rw_gen)\n disk_rw = next(disk_rw_gen)\n \n if cpu_usage or net_rw or disk_rw: #we have valid data\n break\n \n time.sleep(sampling_duration)\n \n #append cpu usage for each cpu core\n for cpu, usage in cpu_usage.items():\n data['cpuUsage'].append({'name': cpu, 'value': usage})\n \n #append network read and write for each interface\n for interface, rw in net_rw.items():\n data['networkReads'].append({'name': interface, 'value': rw['reads']})\n data['networkWrites'].append({'name': interface, 'value': rw['writes']}) \n \n #append disk read and write for each logical disk\n for device, rw in disk_rw.items():\n data['diskReads'].append({'name': device, 'value': rw['reads']})\n data['diskWrites'].append({'name': device, 'value': rw['writes']})\n \n return data", "def update_stat_file(self):\n logfile = \"../data/{}_stat.json\".format(self.ID)\n statobj = {\n 'hp': self.hp,\n 'max_hp': MAX_TANK_HP,\n 'ammo': self.ammo,\n 'score': self.score,\n 'age': self.age,\n 'alive': not self.is_dead(),\n 'color': TANK_COLORS[self.color],\n }\n if USE_SIMULATOR:\n js.globals.handle_stat(self.ID, json.dumps(statobj))\n else:\n with open(logfile, 'w') as f:\n f.write(json.dumps(statobj))", "def get_stats(self):\n # pool.map needs an arg for each function that will be run\n dmx_mean = [self.dmx.mean()] * len(self.genome_paths)\n with ProcessingPool() as pool:\n results = pool.map(genome.mp_stats, self.genome_paths, dmx_mean)\n self.stats = pd.concat(results)\n self.stats.to_csv(self.stats_path)", "def __init__(self, filepath):\n\n self.filepath = Path(filepath)\n\n # Store log data in line based format\n self.values = None\n self._read_log()\n\n # Store log data in row based format\n self.counters = []\n self.timestamps = []\n self.acceleration = []\n self._store_rows()", "def read(self, filename):\n with RavenFileReader(filename) as f:\n line = f.nexttag()\n while line:\n # Begin data type checks\n if self.cleantag(line) == 'Gauge':\n self.read_metgauge(line, f)\n elif self.cleantag(line) == 'ObservationData':\n self.read_obsgauge(line, f)\n # Next line\n line = f.nexttag()", "def metrics_group():", "def readStatFile(filePath):\n f = open(filePath, 'r')\n\n allStats = {}\n overviewStats = {}\n category = ''\n folder = ''\n method = ''\n\n for line in f:\n # Check if the line contains the 'cm' character and thus provides information of the specific folder\n if 'cm' in line:\n words = line.split()\n\n for word in words:\n if '/' in word:\n # All processed folder has either a /MoG or /SubSENSE folder. Exploit this to get the filename\n category = os.path.basename(os.path.dirname(os.path.dirname(os.path.normpath(filePath))))\n folder = os.path.basename(os.path.dirname(os.path.normpath(filePath)))\n method = word\n\n # Get the raw FP, TN, etc. count\n folderNumbers = {'TP': words[4], 'FP': words[5], 'FN': words[6], 'TN': words[7],\n 'ErrorShadow': words[8]}\n overviewStats[method] = folderNumbers\n\n\n # CHeck if line is not empty, does not contain certain characters, and that the folder has been found\n if '#' not in line and 'cm' not in line and line and folder and '\\n' != line and method:\n measures = line.split()\n\n isRealMeasure = True\n\n for measure in measures:\n if not RepresentsFloat(measure):\n isRealMeasure = False\n break\n\n\n if len(measures) == 7 and isRealMeasure:\n folderStats = {'recall': measures[0], 'specificity': measures[1], 'FPR': measures[2], 'FNR': measures[3], \n 'PBC': measures[4], 'precision': measures[5], 'f-measure': measures[6]}\n allStats[method] = folderStats\n\n method = ''\n\n return allStats, overviewStats", "def read_file(self, file_name, bases=[], tasks=[]):\n out_bases = OrderedDict()\n out_tasks = OrderedDict()\n with h5py.File(file_name, 'r') as f:\n for b in bases:\n out_bases[b] = f['scales/{:s}/1.0'.format(b)][()]\n out_write_num = f['scales']['write_number'][()]\n out_sim_time = f['scales']['sim_time'][()]\n for t in tasks:\n out_tasks[t] = f['tasks'][t][()]\n return out_bases, out_tasks, out_write_num, out_sim_time", "def metrics(self):\n self.metrics = []\n \n self.clients()\n\n if len(self.metrics) > 0:\n return self.metrics\n else:\n return []", "def _read_metrics(instream):\n format, base = _read_format(instream)\n if format & PCF_COMPRESSED_METRICS:\n compressed_metrics = base.Struct(**_COMPRESSED_METRICS)\n # documented as signed int, but unsigned it makes more sense\n # also this is used as uint by bdftopcf for e.g. unifont\n count = base.uint16.read_from(instream)\n metrics = (compressed_metrics * count).read_from(instream)\n # adjust unsigned bytes by 0x80 offset\n metrics = tuple(\n Props(**{_k: _v-0x80 for _k, _v in vars(_m).items()})\n for _m in metrics\n )\n else:\n uncompressed_metrics = base.Struct(**_UNCOMPRESSED_METRICS)\n count = base.uint32.read_from(instream)\n metrics = (uncompressed_metrics * count).read_from(instream)\n return metrics", "def info(ctx, input, aspect, indent, namespace, meta_member, verbose, bidx,\n masked):\n verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 1\n logger = logging.getLogger('rio')\n mode = 'r' if (verbose or meta_member == 'stats') else 'r-'\n\n try:\n with rasterio.drivers(CPL_DEBUG=(verbosity > 2)):\n with rasterio.open(input, mode) as src:\n info = src.profile\n info['transform'] = info['affine'][:6]\n del info['affine']\n info['shape'] = info['height'], info['width']\n info['bounds'] = src.bounds\n proj4 = rasterio.crs.to_string(src.crs)\n if proj4.startswith('+init=epsg'):\n proj4 = proj4.split('=')[1].upper()\n info['crs'] = proj4\n info['res'] = src.res\n info['lnglat'] = src.lnglat()\n if verbose:\n stats = [{'min': float(b.min()),\n 'max': float(b.max()),\n 'mean': float(b.mean())\n } for b in src.read(masked=masked)]\n info['stats'] = stats\n info['checksum'] = [src.checksum(i) for i in src.indexes]\n if aspect == 'meta':\n if meta_member == 'stats':\n band = src.read(bidx, masked=masked)\n click.echo('%f %f %f' % (\n float(band.min()),\n float(band.max()),\n float(band.mean())))\n elif meta_member == 'checksum':\n click.echo(str(src.checksum(bidx)))\n elif meta_member:\n if isinstance(info[meta_member], (list, tuple)):\n click.echo(\" \".join(map(str, info[meta_member])))\n else:\n click.echo(info[meta_member])\n else:\n click.echo(json.dumps(info, indent=indent))\n elif aspect == 'tags':\n click.echo(\n json.dumps(src.tags(ns=namespace), indent=indent))\n except Exception:\n logger.exception(\"Exception caught during processing\")\n raise click.Abort()", "def parse_file(self):\n with open(self.file_name, 'r', errors='ignore') as log_file:\n for line in log_file:\n self.process_line(line)", "def run(self):\n\n self.load_file()\n self.cat_to_num()\n self.split()", "def MainStats(path, filetype, NrExp, col, start, stop):\n# path= path.split('/') # here is better to google and see what is going on. Or experiment alone\n# path= \"/\".join(path[:-1]) \n dato=ExtractData_raw_files(path, filetype)\n dBase=dato.createDictBase()\n stats = Stats(dBase, NrExp, col, start, stop)\n means, stds=stats.Means_Stds()\n times = stats.time_return()\n return means , stds, times", "def metrics(self, request):\n return OtterMetrics(self.store).app.resource()", "def collect_storage_metrics(sys):\n try:\n session = get_session()\n client = InfluxDBClient(host=INFLUXDB_HOSTNAME, port=INFLUXDB_PORT, database=INFLUXDB_DATABASE)\n\n sys_id = sys[\"id\"]\n sys_name = get_system_name(sys)\n\n json_body = list()\n\n # Get Drive statistics\n drive_stats_list = session.get((\"{}/{}/analysed-drive-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n drive_locations = get_drive_location(sys_id, session)\n if CMD.showDriveNames:\n for stats in drive_stats_list:\n location_send = drive_locations.get(stats[\"diskId\"])\n LOG.info((\"Tray{:02.0f}, Slot{:03.0f}\").format(location_send[0], location_send[1]))\n # Add Drive statistics to json body\n for stats in drive_stats_list:\n disk_location_info = drive_locations.get(stats[\"diskId\"])\n disk_item = dict(\n measurement = \"disks\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n sys_tray = (\"{:02.0f}\").format(disk_location_info[0]),\n sys_tray_slot = (\"{:03.0f}\").format(disk_location_info[1])\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in DRIVE_PARAMS\n )\n )\n if CMD.showDriveMetrics:\n LOG.info(\"Drive payload: %s\", disk_item)\n json_body.append(disk_item)\n\n # Get interface statistics\n interface_stats_list = session.get((\"{}/{}/analysed-interface-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showInterfaceNames:\n for stats in interface_stats_list:\n LOG.info(stats[\"interfaceId\"])\n # Add interface statistics to json body\n for stats in interface_stats_list:\n if_item = dict(\n measurement = \"interface\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n interface_id = stats[\"interfaceId\"],\n channel_type = stats[\"channelType\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in INTERFACE_PARAMS\n )\n )\n if CMD.showInterfaceMetrics:\n LOG.info(\"Interface payload: %s\", if_item)\n json_body.append(if_item)\n\n # Get System statistics\n system_stats_list = session.get((\"{}/{}/analysed-system-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n # Add System statistics to json body\n sys_item = dict(\n measurement = \"systems\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name\n ),\n fields = dict(\n (metric, system_stats_list.get(metric)) for metric in SYSTEM_PARAMS\n )\n )\n if CMD.showSystemMetrics:\n LOG.info(\"System payload: %s\", sys_item)\n json_body.append(sys_item)\n \n # Get Volume statistics\n volume_stats_list = session.get((\"{}/{}/analysed-volume-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showVolumeNames:\n for stats in volume_stats_list:\n LOG.info(stats[\"volumeName\"])\n # Add Volume statistics to json body\n for stats in volume_stats_list:\n vol_item = dict(\n measurement = \"volumes\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n vol_name = stats[\"volumeName\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in VOLUME_PARAMS\n )\n )\n if CMD.showVolumeMetrics:\n LOG.info(\"Volume payload: %s\", vol_item)\n json_body.append(vol_item)\n\n if not CMD.doNotPost:\n client.write_points(json_body, database=INFLUXDB_DATABASE, time_precision=\"s\")\n\n except RuntimeError:\n LOG.error((\"Error when attempting to post statistics for {}/{}\").format(sys[\"name\"], sys[\"id\"]))", "def main():\n from time import perf_counter\n\n hint = None\n if len(sys.argv) > 1:\n hint = sys.argv[1]\n logpath = _guess_log_path(hint)\n\n if logpath:\n print(\"Logpath:\", logpath)\n print(\"Getting values:\")\n\n with open(logpath, 'r') as ofl:\n headers = parse_log_headers(ofl.read(297))\n pre = perf_counter()\n values = get_values(ofl)\n post = perf_counter()\n\n print(\"Values:\")\n for i, v in enumerate(values):\n print(\"\\t\", headers[i][0], \": \", v)\n print(\"Read in {:0.9f} sec\".format(post - pre))\n\n else:\n print(\"Nope\")", "def list_metrics(self):\n pass", "def process_stat_files(param):\n\n #get the files that are actually in the output directory\n call = ['cp', '-R']\n call.append(param['working_dir']+'results/featureCount/')\n call.append(param['working_dir']+'report/')\n _, _ = subprocess.Popen(call,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE).communicate()\n\n featurecount_file = (param['working_dir']+\n 'results/featureCount/featureCount_stats.txt')\n #extract table\n table = []\n filehandle = open(featurecount_file)\n #header\n table.append(filehandle.readlines()[0].rstrip().split('\\t'))\n table[0] = table[0][1:]\n filehandle.close()\n\n #total number of aligned reads\n tot_reads = param['bam_qc']['unique_aligned_reads']\n counter = [0] * len(param['bam_qc']['unique_aligned_reads'])\n \n filehandle = open(featurecount_file)\n for line in filehandle.readlines()[1:]:\n cur_line = line.rstrip().split('\\t')\n cur_line[0] = re.sub(r'_',' ',cur_line[0])\n if cur_line[0] not in ['Unassigned MultiMapping','Assigned']:\n counter = [ct + int(cr) for ct, cr in zip(counter, cur_line[1:])]\n perc = ([cur_line[0]]+\n MODULE_HELPER.get_percentage(cur_line[1:],\n tot_reads,\n len(cur_line)-1))\n table.append(perc)\n filehandle.close()\n assigned = [tot_reads[idx] - counter[idx] for idx in range(len(tot_reads))]\n perc = ['Assigned'] + MODULE_HELPER.get_percentage(assigned,\n tot_reads,\n len(counter))\n return table", "def test_io_statistics(self):\n import time\n from supvisors.statistics import instant_io_statistics, io_statistics\n # take 2 spaced instant cpu statistics\n ref_stats = instant_io_statistics()\n time.sleep(1)\n last_stats = instant_io_statistics()\n stats = io_statistics(last_stats, ref_stats, 1)\n # test keys\n self.assertListEqual(ref_stats.keys(), stats.keys())\n self.assertListEqual(last_stats.keys(), stats.keys())\n # test that values are pairs\n for intf, bytes in stats.items():\n self.assertEqual(2, len(bytes))\n for value in bytes:\n self.assertIs(int, type(value))", "def _dispatch_metrics(self, payload):\n for item in payload:\n try:\n self._ingest.send(gauges=item['gauges'], counters=item['counters'])\n except Exception as e:\n self._logger.error(\"Exception while sending payload to ingest : {0}\".format(e))", "def read_stats(filename):\n header = {}\n tableinfo = {}\n measures = []\n rowmeasures = []\n\n with open(filename, 'rt') as fp:\n lines = fp.readlines()\n for line in lines:\n if line == line[0]:\n continue\n #parse commented header\n if line.startswith('#'):\n fields = line.split()[1:]\n if len(fields) < 2:\n continue\n tag = fields[0]\n if tag == 'TableCol':\n col_idx = int(fields[1])\n if col_idx not in tableinfo:\n tableinfo[col_idx] = {}\n tableinfo[col_idx][fields[2]] = ' '.join(fields[3:])\n if tableinfo[col_idx][fields[2]] == \"StructName\":\n struct_idx = col_idx\n elif tag == \"Measure\":\n fields = ' '.join(fields).replace('CortexVol ', 'CortexVol, ').split()\n fields = ' '.join(fields[1:]).split(', ')\n measures.append({'structure': fields[0],\n 'name': fields[1],\n 'description': fields[2],\n 'value': fields[3],\n 'units': fields[4],\n 'source': 'Header'})\n elif tag == \"ColHeaders\":\n if len(fields) != len(tableinfo):\n for idx, fieldname in enumerate(fields[1:]):\n if idx + 1 in tableinfo:\n continue\n tableinfo[idx + 1] = {'ColHeader': fieldname,\n 'Units': 'unknown',\n 'FieldName': fieldname}\n else:\n continue\n else:\n header[tag] = ' '.join(fields[1:])\n else:\n #read values\n row = line.split()\n values = {}\n measures.append({'structure': row[struct_idx-1],\n 'items': [],\n 'source': 'Table'}),\n for idx, value in enumerate(row):\n if idx + 1 == struct_idx:\n continue\n measures[-1]['items'].append({\n 'name': tableinfo[idx + 1]['ColHeader'],\n 'description': tableinfo[idx + 1]['FieldName'],\n 'value': value,\n 'units': tableinfo[idx + 1]['Units'],\n })\n return header, tableinfo, measures", "def main():\n if len(sys.argv) != 2:\n print('Usage: {0} <config.yaml>'.format(sys.argv[0]))\n sys.exit()\n\n try:\n conf_file = open(sys.argv[1], 'r')\n conf = yaml.safe_load(conf_file)\n except Exception as e:\n print('Error loading config: {0}'.format(str(e)), file=sys.stderr)\n\n sensor_mappings = conf.get('sensor_mappings')\n prometheus_port = conf.get('exporter_port', 8104)\n method = conf.get('method')\n onewire_temperature_c = Gauge('onewire_temperature_c', 'Temperature in C', ['location'])\n\n # Start the prometheus HTTP server\n start_http_server(prometheus_port)\n\n if method == 'serial':\n read_serial(onewire_temperature_c, sensor_mappings, conf.get('serial_port'))\n elif method == 'w1':\n read_w1(onewire_temperature_c, sensor_mappings)\n else:\n print('Invalid method specified: {0}'.format(method), file=sys.stderr)\n sys.exit()", "def main():\n parser = argparse.ArgumentParser(description=main.__doc__)\n parser.add_argument('--conf', default=\"/etc/sensors.json\",\n help=\"The local json config file for this system\")\n parser.add_argument('--verbose', action=\"store_true\",\n help=\"Turn on verbose output\")\n parser.add_argument('--interval', default=120,\n help=\"How many seconds to sleep between gathering data\")\n args = parser.parse_args()\n\n # Make sure the file exists, or create an example and exit\n if not os.path.exists(args.conf):\n print 'ERROR: No configuration exists\\n'\n sys.exit(1)\n\n while True:\n # Get UTC epoch time\n now = time.mktime(time.gmtime(time.time()))\n if args.verbose:\n logging.basicConfig(level=logging.DEBUG)\n else:\n logging.basicConfig(level=logging.INFO)\n\n\n with open(args.conf, \"r\") as fd:\n conf = json.loads(fd.read())\n\n # Gather the readings...\n for sensor in conf['sensors']:\n sensor['val'] = str(subprocess.check_output([\n 'owread', '-F', '-s', sensor['server'], sensor['path']])).strip()\n log.debug(\"Sensor %r current reading %r\", sensor['type'],\n sensor['val'])\n\n # Now update each servers queues\n for server in conf['servers']:\n with open(server['local_queue'], \"a\") as readings:\n for sensor in conf['sensors']:\n readings.write(\"%s,location=%s value=%f %ld\\n\" % (\n sensor['type'], sensor['location'],\n float(sensor['val']), int(now)))\n time.sleep(args.interval)", "def add_metrics(self, metrics):\n for i, metric in enumerate(self.config.metrics):\n tf.summary.scalar(metric, metrics[i])", "def metrics(self, metrics):\n\n self._metrics = metrics", "def stats(filename):\n from .utils import stats as print_stats\n click.echo('Starting to gather statistics on file {}'.format(filename))\n print_stats(filename)\n click.echo('Statistics printing finished')", "def process_file(self):\n self._processing_logger.log_info('Start processing')\n self.parsing_start_time = datetime.datetime.now()\n if os.path.exists(self.tmp_stat_file_path) \\\n and not HcsParsingUtils.active_processing_exceed_timeout(self.tmp_stat_file_path):\n self._processing_logger.log_info('This file is processed by another parser, skipping...')\n return 2\n self.create_tmp_stat_file()\n hcs_index_file_path = self.hcs_root_dir + MEASUREMENT_INDEX_FILE_PATH\n time_series_details = self._extract_time_series_details(hcs_index_file_path)\n self.generate_ome_xml_info_file()\n xml_info_tree = ET.parse(self.ome_xml_info_file_path).getroot()\n plate_width, plate_height = self._get_plate_configuration(xml_info_tree)\n wells_tags = self.read_wells_tags()\n if wells_tags:\n self._processing_logger.log_info(\"Tags \" + str(wells_tags))\n if not TAGS_PROCESSING_ONLY and not EVAL_PROCESSING_ONLY:\n if not self._localize_related_files():\n self._processing_logger.log_info('Some errors occurred during copying files from the bucket, exiting...')\n return 1\n else:\n self._processing_logger.log_info('Localization is finished.')\n local_preview_dir = os.path.join(self.tmp_local_dir, 'preview')\n hcs_local_index_file_path = get_path_without_trailing_delimiter(self.tmp_local_dir) \\\n + MEASUREMENT_INDEX_FILE_PATH\n for sequence_id, timepoints in time_series_details.items():\n self._processing_logger.log_info('Processing sequence with id={}'.format(sequence_id))\n sequence_index_file_path = self.extract_sequence_data(sequence_id, hcs_local_index_file_path)\n conversion_result = os.system('bash \"{}\" \"{}\" \"{}\" {}'.format(\n OME_TIFF_SEQUENCE_CREATION_SCRIPT, sequence_index_file_path, local_preview_dir, sequence_id))\n if conversion_result != 0:\n self._processing_logger.log_info('File processing was not successful...')\n return 1\n sequence_overview_index_file_path, wells_grid_mapping = self.build_sequence_overview_index(sequence_index_file_path)\n conversion_result = os.system('bash \"{}\" \"{}\" \"{}\" {} \"{}\"'.format(\n OME_TIFF_SEQUENCE_CREATION_SCRIPT, sequence_overview_index_file_path, local_preview_dir,\n sequence_id, 'overview_data.ome.tiff'))\n if conversion_result != 0:\n self._processing_logger.log_info('File processing was not successful: well preview generation failure')\n return 1\n self.write_dict_to_file(os.path.join(local_preview_dir, sequence_id, 'wells_map.json'),\n self.build_wells_map(sequence_id, wells_grid_mapping, wells_tags))\n if LOCALIZE_USE_PIPE == \"true\":\n cloud_transfer_result = os.system('pipe storage cp -f -r \"{}\" \"{}\"'\n .format(local_preview_dir,\n HcsParsingUtils.extract_cloud_path(self.hcs_img_service_dir)))\n else:\n cloud_transfer_result = os.system('aws s3 sync \"{}\" \"{}\"'\n .format(local_preview_dir,\n HcsParsingUtils.extract_cloud_path(self.hcs_img_service_dir)))\n if cloud_transfer_result != 0:\n self._processing_logger.log_info('Results transfer was not successful...')\n return 1\n self._write_hcs_file(time_series_details, plate_width, plate_height)\n if not EVAL_PROCESSING_ONLY:\n tags_processing_result = self.try_process_tags(xml_info_tree, wells_tags)\n if TAGS_PROCESSING_ONLY:\n if wells_tags:\n for sequence_id, timepoints in time_series_details.items():\n path = os.path.join(self.hcs_img_service_dir, sequence_id, 'wells_map.json')\n self.write_dict_to_file(path, self.update_wells_json(path, wells_tags))\n return tags_processing_result\n if not TAGS_PROCESSING_ONLY:\n eval_processing_result = self.try_process_eval()\n if EVAL_PROCESSING_ONLY:\n return eval_processing_result\n self.create_stat_file()\n return 0", "def store_metrics_to_params(self):\n\n model = self.model_name\n\n if self.stats_path.exists():\n with open(self.stats_path, \"rb\") as f:\n stats_dict = pickle.load(f)\n else:\n stats_dict = {}\n\n if model not in stats_dict:\n stats_dict[model] = defaultdict(list)\n\n stats_dict[model]['amine'].append(self.amine)\n stats_dict[model]['accuracies'].append(self.metrics['accuracies'])\n stats_dict[model]['confusion_matrices'].append(\n self.metrics['confusion_matrices'])\n stats_dict[model]['precisions'].append(self.metrics['precisions'])\n stats_dict[model]['recalls'].append(self.metrics['recalls'])\n stats_dict[model]['bcrs'].append(self.metrics['bcrs'])\n\n # Save this dictionary in case we need it later\n with open(self.stats_path, \"wb\") as f:\n pickle.dump(stats_dict, f)", "def collect_metrics(params, start, uuid=str(uuid4())):\n list_of_metrics = ['AWS/EC2/CPUUtilization',\n 'CGCloud/MemUsage',\n 'CGCloud/DiskUsage_mnt_ephemeral',\n 'CGCloud/DiskUsage_root',\n 'AWS/EC2/NetworkIn',\n 'AWS/EC2/NetworkOut',\n 'AWS/EC2/DiskWriteOps',\n 'AWS/EC2/DiskReadOps']\n\n ids = get_instance_ids(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-worker')\n\n while ids:\n # metrics = {metric: [] for metric in list_of_metrics}\n for instance_id in ids:\n for metric in list_of_metrics:\n averages = []\n try:\n s = start\n while s < stop:\n e = s + (4 * 24 * 3600)\n aws_start = datetime.utcfromtimestamp(s)\n aws_stop = datetime.utcfromtimestamp(e)\n met_object = get_metric(metric, instance_id, aws_start, aws_stop)\n averages.extend([x['Average'] for x in get_datapoints(met_object)])\n s = e\n if averages:\n metrics[metric].append(averages)\n logging.info('# of Datapoints for metric {} is {}'.format(metric, len(metrics[metric][0])))\n except RuntimeError:\n if instance_id in instance_ids:\n instance_ids.remove(instance_id)\n # Remove metrics if no datapoints were collected\n metrics = dict((k, v) for k, v in metrics.iteritems() if v)\n # Save CSV of data\n mkdir_p('{}_{}'.format(uuid, str(datetime.utcnow()).split()[0]))\n for metric in metrics:\n with open('{}_{}/{}.csv'.format(uuid, str(datetime.utcnow()).split()[0], metric.rsplit('/', 1)[1]), 'wb') as f:\n writer = csv.writer(f)\n writer.writerows(metrics[metric])", "def _read_output_files(self):\n self.manage = {} # Empty the dictionary matching phrases\n self.manage['spin'] = (re.compile(' *net spin of'), self._read_spin)\n self.manage['nelect'] = (re.compile(' *number of electrons'), self._read_nelect)\n self.manage['cellcontents'] = (re.compile(' *Unit Cell'), self._read_cellcontents)\n self.manage['pspots'] = (re.compile(' *Files used for pseudopotentials:'), self._read_pspot)\n self.manage['masses'] = (re.compile(' *Mass of species in AMU'), self._read_masses)\n self.manage['kpoints'] = (re.compile(' *Number of kpoints used'), self._read_kpoints)\n self.manage['kpoint_grid'] = (re.compile(' *MP grid size for SCF'), self._read_kpoint_grid)\n self.manage['finalenergy'] = (re.compile(' *Final energy, E'), self._read_energies)\n self.manage['finalenergy2'] = (re.compile('Final energy ='), self._read_energies2)\n self.manage['finalenergy3'] = (re.compile('Dispersion corrected final energy'), self._read_energies3)\n self.manage['energy_cutoff'] = (re.compile(' *plane wave basis set cut'), self._read_energy_cutoff)\n self.manage['nbands'] = (re.compile(' *number of bands'), self._read_nbands)\n self.manage['pressure'] = (re.compile(' *\\* *Pressure: '), self._read_external_pressure)\n self.manage['opticalDielectric'] = (re.compile(' *Optical Permittivity'), self._read_dielectric)\n self.manage['bornCharges'] = (re.compile(' *Born Effective Charges'), self._read_born_charges)\n # For the .phonon file\n self.manage['frequency'] = (re.compile(' q-pt= 1 0.000000 0.000000 0.000000 1.0000000000 *$'), self._read_frequencies)\n self.manage['nbranches'] = (re.compile(' Number of branches'), self._read_nbranches)\n for f in self._outputfiles:\n self._read_output_file(f)\n return", "def _load(self):\n op_type_file_path = os.path.join(\n self._profiling_dir,\n self._csv_file_to_analyse.format(self._device_id)\n )\n op_type_file_path = validate_and_normalize_path(\n op_type_file_path, raise_key=\"Invalid op_type_file_path\")\n if not os.path.isfile(op_type_file_path):\n log.warning('The file <%s> does not exist.', op_type_file_path)\n return\n\n with open(op_type_file_path, 'r') as file:\n csv_reader = csv.reader(file)\n _ = next(csv_reader)\n for info in csv_reader:\n self._data.append(self._convert_field_type(info))", "def analyze(file,process):\n readin(file)\n # inspecting(file, functions)\n process(file, functions)", "def readFile(self, fname):\r\n self.scores = []\r\n self.fname = fname\r\n try:\r\n with open(fname, 'r') as f:\r\n for line in f:\r\n self.appendScore(line.split(' '))\r\n except:\r\n pass", "def process_data(file: TextIO) -> 'Climatematch':\n climate_dict = {}\n line = file.readline()\n \n while line != '':\n username = line.strip()\n climate_dict[username] = {}\n \n line = file.readline().strip()\n climate_dict[username]['name'] = line\n line = file.readline().strip()\n climate_dict[username]['location'] = line\n \n climate_dict[username]['bio'] = ''\n line = file.readline()\n while line != 'ENDBIO\\n': \n climate_dict[username]['bio'] += line\n line = file.readline()\n \n climate_dict[username]['skills'] = []\n line = file.readline().strip()\n while line != 'ENDSKILL': \n climate_dict[username]['skills'].append(line)\n line = file.readline().strip()\n \n climate_dict[username]['interest'] = []\n line = file.readline().strip() \n while line != 'END': \n climate_dict[username]['interest'].append(line)\n line = file.readline().strip()\n line = file.readline()\n \n return climate_dict", "def collect(self):\n\n collector = {}\n for gather in self.gathers:\n try:\n stats = gather.run_single_cycle(collector=collector)\n if stats:\n collector.update(stats)\n except Exception as ex:\n self._logger.exception(\n \"Exception while collecting metrics for PID: %s of type: %s. Details: %s\",\n self.pid,\n type(gather),\n repr(ex),\n )\n return collector", "def write_training_metrics(self) -> None:\n self.trainer_metrics.write_training_metrics()", "def load_data(self, f): \n self.sampling = True\n self.reads = np.load(f)\n self.total = self.reads.shape[0]", "def metrics(_):\r\n collector = BuildsCollector()\r\n build_metrics, headers = collector.get_metrics_table()\r\n print(tabulate(build_metrics, headers=headers))", "def fetch(self):\n\n\n # Update Prometheus metrics with application metrics\n self.current_requests.set(get_current_requests())\n self.total_uptime.set(get_uptime())\n self.health.state(get_health())", "def test_alt_service_perfdata(self, aggregator):\n self.log_file = tempfile.NamedTemporaryFile()\n perfdata_file = tempfile.NamedTemporaryFile()\n\n # Get the config\n config, _ = get_config(\n \"service_perfdata_file={}\\n\"\n \"service_perfdata_file_template={}\".format(perfdata_file.name, NAGIOS_TEST_ALT_SVC_TEMPLATE),\n service_perf=True\n )\n\n # Set up the check\n nagios = NagiosCheck(CHECK_NAME, {}, {}, config['instances'])\n nagios.get_topology = mocked_topology\n\n # Run the check once\n nagios.check(config['instances'][0])\n\n with open(NAGIOS_TEST_SVC, \"r\") as f:\n nagios_perf = ensure_string(f.read())\n\n perfdata_file.write(nagios_perf)\n perfdata_file.flush()\n\n nagios.check(config['instances'][0])\n\n # Test metrics\n expected_metrics = [\n {\n 'name': 'nagios.current_users.users',\n 'timestamp': 1339511440,\n 'value': 1.0,\n 'hostname': 'localhost',\n 'tags': ['warn:20', 'crit:50', 'min:0'],\n },\n {\n 'name': 'nagios.ping.pl',\n 'timestamp': 1339511500,\n 'value': 0.0,\n 'hostname': 'localhost',\n 'tags': ['unit:%', 'warn:20', 'crit:60', 'min:0'],\n },\n {\n 'name': 'nagios.ping.rta',\n 'timestamp': 1339511500,\n 'value': 0.065,\n 'hostname': 'localhost',\n 'tags': ['unit:ms', 'warn:100.000000', 'crit:500.000000', 'min:0.000000'],\n },\n {\n 'name': 'nagios.root_partition',\n 'timestamp': 1339511560,\n 'value': 2470.0,\n 'hostname': 'localhost',\n 'tags': ['unit:MB', 'warn:5852', 'crit:6583', 'min:0', 'max:7315', 'device:/'],\n },\n ]\n\n for metric in expected_metrics:\n aggregator.assert_metric(metric['name'], metric['value'], tags=metric['tags'], hostname=metric['hostname'])\n\n aggregator.assert_all_metrics_covered()", "def run_resample(self):\n\n self.in_data.open()\n self.out_data.open()\n\n try:\n # Get the fields from the input file and set them/write headers in output:\n self.all_fields = self.in_data.fields\n\n self.out_data.set_fields(self.all_fields)\n self.out_data.write_headers()\n\n # Set up the sensor fields by removing non-sensor fields:\n self.set_sensor_only_fields()\n\n # Read the first event from the input file:\n self.get_next_input_event()\n\n # Warn and exit if we have no input data to read:\n if self.next_input_event is None:\n msg = f\"The input file {self.in_file} did not have any data rows\"\n warn(msg)\n\n return\n\n self.first_event_stamp = self.next_input_event[self.stamp_field]\n\n # Set up the sample tracking (here mostly to set the start of the first interval):\n self.reset_sample_tracking()\n\n # Now iterate through the output intervals:\n while True:\n self.process_next_interval()\n except EOFError: # catch when we are at the end of the file\n pass\n finally:\n self.in_data.close()\n self.out_data.close()\n\n print() # make sure we go to a new output line", "def main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n counts = defaultdict(lambda: 0)\n total_sentences = 0\n for filename in sys.stdin:\n filename = filename.strip()\n reader = tf.python_io.tf_record_iterator(filename)\n n_sentences = 0\n for record in reader:\n x = tf.train.Example()\n x.ParseFromString(record)\n tokens = [int(i) for i in x.features.feature[FLAGS.field].int64_list.value]\n counts[len(tokens)] += 1\n n_sentences += 1\n tf.logging.info(\"Read %d sentences from %s.\", n_sentences, filename)\n total_sentences += n_sentences\n\n tf.logging.info(\"Statistics for %s:\", FLAGS.field)\n sorted_counts = [(l, f) for l, f in counts.iteritems()]\n sorted_counts.sort()\n acc = 0\n for l, f in sorted_counts:\n acc += f\n tf.logging.info(\"<=%d: %d/%d (%.3f%%)\", l, acc, total_sentences, 100.0 * acc / total_sentences)", "def main():\n # Load and parse json object from file with specific\n file_name = \"./benchmark.log\"\n doc = re.sub(\"[\\n|\\t]\", \"\", \"\".join(benchmark.read_text_file(file_name)))\n json_object = json.loads(\"\".join(doc))\n\n intervals = json_object[\"intervals\"]\n\n socket_keys = benchmark.get_socket_keys(intervals)\n\n result = benchmark.get_result_dictionary(intervals, socket_keys)\n\n print_to_csv(result, socket_keys)", "def read_gauge(self, key: str, *args) -> Iterable[Observation]:\n yield self.map[key]", "def _write_stats(self, stat_type, user=None, summ_type=None):\n if stat_type == \"full collection\":\n self.summary_file.write(\"\\n\\nDataset: {c}\\n\".format(c=self.dataset_name))\n self.summary_file.write(\"Number of unique urls: {u}\\nNumber of unique sites: {s}\\n\".format(u=len(set(self.stat_dict['urls'])), s=len(set(self.stat_dict['sites'])))\n )\n site_cnts = Counter(self.stat_dict['sites']).most_common()\n for site in site_cnts:\n self.summary_file.write(\"{s}: {n}\\n\".format(s=site[0], n=site[1]))\n\n if stat_type == \"token_counts\":\n self.summary_file.write(\"\\n\\nDataset: {c}\\n\".format(c=self.dataset_name))\n for doc_type in self.stat_dict:\n if user is not None:\n self.summary_file.write(\"\\n{0}, {1}\\n\".format(user, summ_type))\n\n self.summary_file.write(\n \"\\nNumber of {d}s: {p}\\nAverage tokens/{d}: {t}\\nAverage sentences/{d}: {s}\\n\".format(\n d=doc_type, p=len(self.stat_dict[doc_type][0]), t=sum(self.stat_dict[doc_type][1])/len(self.stat_dict[doc_type][1]), s=sum(self.stat_dict[doc_type][0])/len(self.stat_dict[doc_type][0])\n )\n )\n\n self.summary_file.write(\n \"Median tokens/{d}: {p}\\nStandard deviation tokens/{d}: {t}\\n\".format(\n d=doc_type, p=np.median(self.stat_dict[doc_type][1]), t=np.std(self.stat_dict[doc_type][1])\n )\n )\n\n self.summary_file.write(\n \"Median sentences/{d}: {p}\\nStandard deviation sentences/{d}: {t}\\n\".format(\n d=doc_type, p=np.median(self.stat_dict[doc_type][0]), t=np.std(self.stat_dict[doc_type][0])\n )\n )", "def _calc_resource_stats(self, interval):\n result = {}\n\n if 'mem' in self.metrics:\n result['mem'] = self._get_mem_info()\n\n if 'disk-space' in self.metrics:\n result['disk-space'] = self.__get_disk_usage(self.engine.artifacts_dir).percent\n\n if 'engine-loop' in self.metrics:\n result['engine-loop'] = self.engine.engine_loop_utilization\n\n if 'conn-all' in self.metrics:\n try:\n # take all connections without address resolution\n output = subprocess.check_output(['netstat', '-an'])\n output_lines = stream_decode(output).split('\\n') # in py3 stream has 'bytes' type\n est_lines = [line for line in output_lines if line.find('EST') != -1]\n result['conn-all'] = len(est_lines)\n except BaseException as exc:\n self.log.debug(\"Failed to get connections info: %s\", exc)\n result['conn-all'] = 0\n\n if 'cpu' in self.metrics:\n result['cpu'] = self._get_cpu_percent()\n\n if 'bytes-recv' in self.metrics or 'bytes-sent' in self.metrics:\n net = self.__get_net_counters()\n if net is not None:\n tx_bytes = int((net.bytes_sent - self._net_counters.bytes_sent) / float(interval))\n rx_bytes = int((net.bytes_recv - self._net_counters.bytes_recv) / float(interval))\n self._net_counters = net\n else:\n rx_bytes = 0.0\n tx_bytes = 0.0\n\n if 'bytes-recv' in self.metrics:\n result['bytes-recv'] = rx_bytes\n if 'bytes-sent' in self.metrics:\n result['bytes-sent'] = tx_bytes\n\n if 'disk-read' in self.metrics or 'disk-write' in self.metrics:\n disk = self.__get_disk_counters()\n if disk is not None:\n dru = int((disk.read_bytes - self._disk_counters.read_bytes) / float(interval))\n dwu = int((disk.write_bytes - self._disk_counters.write_bytes) / float(interval))\n self._disk_counters = disk\n else:\n dru = 0.0\n dwu = 0.0\n\n if 'disk-read' in self.metrics:\n result['disk-read'] = dru\n if 'disk-write' in self.metrics:\n result['disk-write'] = dwu\n\n return result" ]
[ "0.69621795", "0.6846196", "0.5868116", "0.5818642", "0.58145773", "0.569007", "0.553364", "0.55294347", "0.5510199", "0.5496143", "0.5489615", "0.54809064", "0.5451518", "0.54069734", "0.54033786", "0.5399427", "0.5393153", "0.538728", "0.5384428", "0.5374179", "0.53578675", "0.53468823", "0.5328515", "0.5324128", "0.5302458", "0.5300442", "0.5258494", "0.52579576", "0.5243373", "0.5215087", "0.521458", "0.5203912", "0.518961", "0.51876616", "0.5185983", "0.5184603", "0.51810664", "0.5175785", "0.5175613", "0.51614755", "0.5157842", "0.5149701", "0.51479435", "0.5126285", "0.5115767", "0.51141495", "0.5095638", "0.5092247", "0.5081432", "0.50803655", "0.50789815", "0.50745606", "0.5058389", "0.5052794", "0.505277", "0.5042217", "0.503812", "0.5035032", "0.501124", "0.5009416", "0.50092393", "0.50066733", "0.49974698", "0.49953747", "0.499092", "0.49863535", "0.4986189", "0.49807656", "0.4976692", "0.49623197", "0.49604517", "0.49590003", "0.49573672", "0.4954771", "0.4944074", "0.4931263", "0.49295944", "0.49190173", "0.49187347", "0.49180278", "0.49111733", "0.49081358", "0.49078083", "0.4901386", "0.4896381", "0.48831567", "0.4881957", "0.48806834", "0.48805562", "0.48726717", "0.48712584", "0.48578274", "0.48539582", "0.4851618", "0.48513898", "0.48472157", "0.48426804", "0.48411003", "0.48400754", "0.483956" ]
0.5644732
6
Gathers the metrics from the netstate file.
def gather_sample(self, stat_file, collector=None): # This file format is weird. Each set of stats is outputted in two # lines. First, a header line that list the field names. Then a # a value line where each value is specified in the appropriate column. # You have to match the column name from the header line to determine # what that column's value is. Also, each pair of lines is prefixed # with the same name to make it clear they are tied together. all_lines = stat_file.readlines() # We will create an array of all of the column names in field_names # and all of the corresponding values in field_values. field_names = [] field_values = [] # To simplify the stats, we add together the two forms of retransmit # I could find in the netstats. Those to fast retransmit Reno and those # to selective Ack. retransmits = 0 found_retransmit_metric = False # Read over lines, looking at adjacent lines. If their row names match, # then append their column names and values to field_names # and field_values. This will break if the two rows are not adjacent # but I do not think that happens in practice. If it does, we just # won't report the stats. for i in range(0, len(all_lines) - 1): names_split = all_lines[i].split() values_split = all_lines[i + 1].split() # Check the row names are the same. if names_split[0] == values_split[0] and len(names_split) == len( values_split ): field_names.extend(names_split) field_values.extend(values_split) if not collector: collector = {} # Now go back and look for the actual stats we care about. for i in range(0, len(field_names)): if field_names[i] == "InOctets": collector.update({Metric("app.net.bytes", "in"): field_values[i]}) elif field_names[i] == "OutOctets": collector.update({Metric("app.net.bytes", "out"): field_values[i]}) elif field_names[i] == "TCPRenoRecovery": retransmits += int(field_values[i]) found_retransmit_metric = True elif field_names[i] == "TCPSackRecovery": retransmits += int(field_values[i]) found_retransmit_metric = True # If we found both forms of retransmit, add them up. if found_retransmit_metric: collector.update({Metric("app.net.tcp_retransmits", None): retransmits}) return collector
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_metrics(self):\n raise NotImplementedError()", "def gather_sample(self, stat_file, collector=None):\n\n if not collector:\n collector = {}\n\n for line in stat_file:\n # We just look for the different \"inuse\" lines and output their\n # socket type along with the count.\n m = re.search(r\"(\\w+): inuse (\\d+)\", line)\n if m is not None:\n collector.update(\n {\n Metric(\"app.net.sockets_in_use\", m.group(1).lower()): int(\n m.group(2)\n )\n }\n )\n return collector", "def setup_metrics_file(self):\n\n with open(self.metrics_path, \"w+\") as f_metrics:\n\n f_metrics.write(get_metrics_file_form())", "def fetch(self):\n\n\n # Update Prometheus metrics with application metrics\n self.current_requests.set(get_current_requests())\n self.total_uptime.set(get_uptime())\n self.health.state(get_health())", "def _get_ganglia_metrics(hostname, port, file_):\n if file_:\n f = open(file_, 'r')\n return \"\".join(f.readlines())\n else:\n return netcat(hostname, port, '')", "def possible_metrics(filename):\n\n metrics = {}\n\n raw_data = parse_config(filename)\n\n for graph in raw_data:\n for metric in graph['metrics']:\n metrics.update({metric['label']: [(metric['x_stream'], metric['y_stream'], metric['z_stream']), metric['func']]})\n\n return metrics", "def emit_metrics(self):\n parse_time = time.perf_counter() - self._parsing_start_time\n Stats.gauge(\"dag_processing.total_parse_time\", parse_time)\n Stats.gauge(\"dagbag_size\", sum(stat.num_dags for stat in self._file_stats.values()))\n Stats.gauge(\n \"dag_processing.import_errors\", sum(stat.import_errors for stat in self._file_stats.values())\n )", "def gather_sample(self, stat_file, collector=None):\n if not collector:\n collector = {}\n # The file format is just a single line of all the fields.\n line = stat_file.readlines()[0]\n # Chop off first part which is the pid and executable file. The\n # executable file is terminated with a paren so just search for that.\n line = line[(line.find(\") \") + 2) :]\n fields = line.split()\n # Then the fields we want are just at fixed field positions in the\n # string. Just grab them.\n\n # See http://man7.org/linux/man-pages/man5/proc.5.html for reference on field numbers\n # Keep in mind that we chop first 3 values away (pid, command line, state), so you need to\n # subtract 3 from the field numbers from the man page (e.g. on the man page nice is number\n # 19, but in our case it's 16 aka 19 - 3)\n process_uptime = self.__get_uptime_ms() - self.calculate_time_ms(\n int(fields[19])\n )\n\n collector.update(\n {\n Metric(\"app.cpu\", \"user\"): self.__calculate_time_cs(int(fields[11])),\n Metric(\"app.cpu\", \"system\"): self.__calculate_time_cs(int(fields[12])),\n Metric(\"app.uptime\", None): process_uptime,\n Metric(\"app.nice\", None): float(fields[16]),\n Metric(\"app.threads\", None): int(fields[17]),\n Metric(\"app.mem.majflt\", None): int(fields[9]),\n Metric(\"app.io.wait\", None): int(fields[39])\n if len(fields) >= 39\n else 0,\n }\n )\n return collector", "def network_io_counters():\r\n f = open(\"/proc/net/dev\", \"r\")\r\n try:\r\n lines = f.readlines()\r\n finally:\r\n f.close()\r\n\r\n retdict = dict()\r\n for line in lines[2:]:\r\n colon = line.find(':')\r\n assert colon > 0, line\r\n name = line[:colon].strip()\r\n fields = line[colon + 1:].strip().split()\r\n bytes_recv = int(fields[0])\r\n packets_recv = int(fields[1])\r\n errin = int(fields[2])\r\n dropin = int(fields[2])\r\n bytes_sent = int(fields[8])\r\n packets_sent = int(fields[9])\r\n errout = int(fields[10])\r\n dropout = int(fields[11])\r\n retdict[name] = nt_net_iostat(bytes_sent, bytes_recv, packets_sent, packets_recv,\r\n errin, errout, dropin, dropout)\r\n return retdict", "def parse(self):\n\n\t\t# Open and parse the file\n\t\twith open(self.name, 'r') as fdi:\n\t\t\tfor line in fdi:\n\t\t\t\twords = [word for word in line.split(' ') if word != ' ' and word != ':']\n\t\t\t\t\n\t\t\t\t# Store the data in the hash\n\t\t\t\tself.data_hash[int(words[0], 16)] = int(words[1])\n\n\t\t# Sort the dictionary by addresses\n\t\tself.data_hash = od(sorted(self.data_hash.items(), key = lambda t : t[0]))\n\n\t\tprint 'Total Bytes :', float(sum(self.data_hash.values())) / 1024 / 1024\n\n\t\treturn", "def parse_metrics_file(self) -> Dict[int, dict]:\n LOG.info(\"Parsing Dragen demultiplexing adapter metrics file %s\", self.adapter_metrics_path)\n parsed_metrics = {}\n\n with self.adapter_metrics_path.open(\"r\") as metrics_file:\n metrics_reader = csv.DictReader(metrics_file)\n for row in metrics_reader:\n lane = int(row[\"Lane\"])\n read_number = row[\"ReadNumber\"]\n sample_id = row[\"Sample_ID\"]\n parsed_metrics[lane] = parsed_metrics.get(lane, {})\n parsed_metrics[lane][(read_number, sample_id)] = row\n\n return self.summerize_adapter_metrics(parsed_metrics=parsed_metrics)", "def get_data(self):\n\n self.read_expression()\n self.read_tfs()\n self.read_metadata()\n self.set_gold_standard_and_priors()", "def collect_stat(self):\n\n cnstat_dict, ratestat_dict = self.get_cnstat()\n self.cnstat_dict.update(cnstat_dict)\n self.ratestat_dict.update(ratestat_dict)", "def compute_metrics(self):\n pass", "def main():\n logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s',\n level=logging.INFO)\n\n parser = argparse.ArgumentParser(description='Monitors observation reception.')\n parser.add_argument('--config', type=str, help='configuration file to use '\n '(default: monitor.cfg)')\n\n args = parser.parse_args()\n config_file = args.config if args.config else 'monitor.cfg'\n\n if not exists(config_file):\n logging.error('Could not find configuration file \"%s\"', args.config_file)\n sys.exit(1)\n\n config = configparser.ConfigParser()\n config.read(config_file)\n\n state_file_dir = None\n if 'STATE_FILE_DIRECTORY' in environ:\n state_file_dir = environ['STATE_FILE_DIRECTORY']\n if not exists(state_file_dir) or not isdir(state_file_dir):\n logging.error('State file directory \"%s\" does not exist', state_file_dir)\n sys.exit(1)\n\n state_file_name = 'monitor_state.json' if not state_file_dir \\\n else f'{state_file_dir}/monitor_state.json'\n\n try:\n with open(state_file_name, 'r', encoding='utf-8') as state_file:\n state = json.load(state_file)\n except FileNotFoundError:\n state = {'observation': {'email_sent': 'False'},\n 'blebeacon': {'email_sent': 'False'},\n 'ruuvitag': {}}\n for location in config['ruuvitag']['Location'].split(','):\n state['ruuvitag'][location] = {}\n state['ruuvitag'][location]['email_sent'] = 'False'\n\n if config['observation']['Enabled'] == 'True':\n obs = ObservationMonitor(config, state['observation'])\n obs.check_observation()\n state['observation'] = obs.get_state()\n if config['blebeacon']['Enabled'] == 'True':\n beacon = BeaconMonitor(config, state['blebeacon'])\n beacon.check_beacon()\n state['blebeacon'] = beacon.get_state()\n if config['ruuvitag']['Enabled'] == 'True':\n ruuvitag = RuuvitagMonitor(config, state['ruuvitag'])\n ruuvitag.check_ruuvitag()\n state['ruuvitag'] = ruuvitag.get_state()\n\n with open(state_file_name, 'w', encoding='utf-8') as state_file:\n json.dump(state, state_file, indent=4)", "def gather_sample(self, stat_file, collector=None):\n\n if not collector:\n collector = {}\n\n for line in stat_file:\n # Each line has a format of:\n # Tag: Value\n #\n # We parse out all lines looking like that and match the stats we care about.\n m = re.search(r\"^(\\w+):\\s*(\\d+)\", line)\n if m is None:\n continue\n\n field_name = m.group(1)\n int_value = int(m.group(2))\n # FDSize is not the same as the number of open file descriptors. Disable\n # for now.\n # if field_name == \"FDSize\":\n # self.print_sample(\"app.fd\", int_value)\n if field_name == \"VmSize\":\n collector.update({Metric(\"app.mem.bytes\", \"vmsize\"): int_value * 1024})\n elif field_name == \"VmPeak\":\n collector.update(\n {Metric(\"app.mem.bytes\", \"peak_vmsize\"): int_value * 1024}\n )\n elif field_name == \"VmRSS\":\n collector.update(\n {Metric(\"app.mem.bytes\", \"resident\"): int_value * 1024}\n )\n elif field_name == \"VmHWM\":\n collector.update(\n {Metric(\"app.mem.bytes\", \"peak_resident\"): int_value * 1024}\n )\n return collector", "def train(self, counts_file):\n for l in read_counts(counts_file):\n n, count_type, args = int(l[0]), l[1], l[2:]\n if count_type == 'WORDTAG': # emission counts\n self.emission_counts[tuple(args)] = n\n else: # ngram counts\n self.ngram_counts[len(args) - 1][tuple(args)] = n", "def _parse(self):\n with open(self._path, 'r') as file:\n try:\n line = file.readline()\n while line:\n if SENDING in line:\n self._req_set.add(self._get_request(line, True))\n line = file.readline()\n except Exception as err:\n print(\"Failed to read garbage collector log. Log was not a complete test log.\\n\"\n f\"{err!s}\")\n raise TestFailedException", "def collect(self):\n if not self._btime:\n return\n\n try:\n with open(\"/proc/self/stat\", 'rb') as stat:\n parts = (stat.read().split(b')')[-1].split())\n\n self.process_metrics[\"vmem\"].set(\"\", float(parts[20]))\n self.process_metrics[\"rss\"].set(\"\", float(parts[21]) * _PAGESIZE)\n start_time_secs = float(parts[19]) / self._ticks\n self.process_metrics[\"start_time\"].set(\n \"\", start_time_secs + self._btime\n )\n utime = float(parts[11]) / self._ticks\n stime = float(parts[12]) / self._ticks\n self.process_metrics[\"cpu\"].set(\"\", utime + stime)\n except IOError:\n pass\n\n try:\n with open(\"/proc/self/limits\", 'rb') as limits:\n for line in limits:\n if line.startswith(b'Max open file'):\n self.process_metrics[\"max_fds\"].set(\n \"\", float(line.split()[3])\n )\n break\n\n self.process_metrics[\"open_fds\"].set(\n \"\", float(len(os.listdir(\"/proc/self/fd\")))\n )\n except (IOError, OSError):\n pass\n\n # Update gc metrics if enabled.\n if \"collected\" in self.process_metrics:\n for generation, stat in enumerate(gc.get_stats()):\n generation = {\"generation\": str(generation)}\n self.process_metrics[\"collected\"].set(\n generation, stat[\"collected\"]\n )\n self.process_metrics[\"uncollectable\"].set(\n generation, stat[\"uncollectable\"]\n )\n self.process_metrics[\"collections\"].set(\n generation, stat[\"collections\"]\n )", "def readOpsimData(self):\n if self.opsim_data:\n good, string = self.checkOpsimData()\n if good:\n self.opsim_visits = self.opsim_data\n else:\n raise Exception(string)\n elif self.opsim_filename:\n dataDir = os.getenv('LSST_POINTING_DIR')\n if not dataDir:\n raise Exception('LSST_POINTING_DIR env not set')\n opsimfile = os.path.join(dataDir, self.opsim_filename)\n # Read the file, store the info.\n with open(opsimfile, 'r') as opsim:\n for visit in opsim:\n if visit.startswith('o'):\n print(visit)\n continue\n data = visit.strip().split()\n visitDict = {}\n visitDict[_opsim_keys[0]] = int(data[0])\n for i in range(1, len(data)):\n visitDict[_opsim_keys[i]] = float(data[i])\n self.opsim_visits.append(visitDict)\n else:\n raise Exception('No data specified')", "def read_lnet_stats(f):\n ret = {'send_count': 0, 'recv_count': 0, 'send_length':0, 'recv_length': 0}\n\n pfile = os.path.normpath(f) + \"/stats\"\n with open(pfile, \"r\") as f:\n for line in f:\n chopped = line.split()\n if chopped[3]:\n ret[\"send_count\"] = int(chopped[3])\n if chopped[4]:\n ret[\"recv_count\"] = int(chopped[4])\n if chopped[7]:\n ret[\"send_length\"] = int(chopped[7])\n\t\tif chopped[8]:\n\t\t ret[\"recv_length\"] = int(chopped[8])\t\n \n\n if ret['send_count'] == 0 and ret['recv_count'] == 0 and ret['send_length'] == 0 and ret['recv_length'] == 0 :\n return None\n\n return ret", "def run_main():\n # Matching lines against a matcher function.\n matched_lines = match_file(file_names, matcher)\n\n # Will contain data sorted by file.\n binned_data = {}\n\n # Looking through the lines that were inserted into the metrics file via the metrics component.\n for key in matched_lines:\n\n # Grabbing matched lines by the file or orgination.\n buffer = matched_lines[key]\n\n # This will contain dictionaries converted from JSON.\n data = []\n\n # Loop through the collection, appending data converted from JSON entries.\n for line in buffer:\n data.append(extract_data(line))\n\n # Sort the data by file.\n binned_data[key] = sort_data(data)\n\n # Output the final results.\n generate_statistics(binned_data)\n return 0", "def extract_tstat_data(pcap_filepath):\n connections = {}\n conn_id = 0\n print('We are here')\n with co.cd(os.path.basename(pcap_filepath[:-5])):\n with co.cd(os.listdir('.')[0]):\n print(connections)\n # Complete TCP connections\n connections, conn_id = extract_tstat_data_tcp_complete('log_tcp_complete', connections, conn_id)\n # Non complete TCP connections (less info, but still interesting data)\n connections, conn_id = extract_tstat_data_tcp_nocomplete('log_tcp_nocomplete', connections, conn_id)\n\n return connections", "def parseFile(self, file):\n return_dict = {}\n with open(file) as f:\n for line in f:\n line = line.strip()\n\n if line:\n if line.startswith('Left'):\n return_dict['Left'] = self.getStats(f)\n elif line.startswith('Right'):\n return_dict['Right'] = self.getStats(f)\n elif line.startswith('Aligned'):\n return_dict['Aligned'] = self.getStats(f, line)\n elif line.startswith('Reads'):\n return_dict['Reads'] = self.getStats(f)\n else:\n matched_summary = re.search('([\\d|.%]+)', line)\n return_dict['Overall'] = matched_summary.group(1)\n\n #return_dict['Summary'] = re.search('(\\d+\\.\\d+%)', line).group(1)\n\n return return_dict", "def load_state_dict(self, state_dict):\n self.XY_net.load_state_dict(state_dict['XY_net'])\n self.XY_optimizer_minee.load_state_dict(\n state_dict['XY_optimizer_minee'])\n self.X_net.load_state_dict(state_dict['X_net'])\n self.X_optimizer_minee.load_state_dict(state_dict['X_optimizer_minee'])\n self.Y_net.load_state_dict(state_dict['Y_net'])\n self.Y_optimizer_minee.load_state_dict(state_dict['Y_optimizer_minee'])\n self.X = state_dict['X']\n self.Y = state_dict['Y']\n if 'lr' in state_dict:\n self.lr = state_dict['lr']\n if 'batch_size' in state_dict:\n self.batch_size = state_dict['batch_size']\n if 'ref_batch_factor' in state_dict:\n self.ref_batch_factor = state_dict['ref_batch_factor']", "def start_monitor_loop(self):\n read_file = read_config_file.ConfigFileReader()\n\n communication_time = read_file.get_send_communication_time()\n metrics_array = read_file.get_metrics()\n\n self.add_metrics_to_monitor_object(communication_time, metrics_array)", "def readInServers(self):\n # we'll be using the global server tracker file\n global server_tracker_file\n # first, grab a list of all files in the current working directory\n current_dir = os.listdir('.')\n # verify that our server tracker file exists here\n if server_tracker_file not in current_dir:\n # if there's nothing to read in, simply return\n return\n \n # read in the csv\n with open(server_tracker_file, 'rb') as infile:\n # initialize the reader\n reader = csv.reader(infile)\n # verify that the header looks exactly as we expect\n header = reader.next()\n if header != ['Server','Ping Interval','Status']:\n # if this isn't the case, we won't try to read the file\n return\n else:\n # update our servers with the records we know about\n # while we update, we'll keep a count of how many\n # we can successfully read in\n server_count = 0\n for record in reader:\n # pull out the server name and ping interval\n server = record[0]\n try:\n interval = int(record[1])\n except ValueError:\n continue\n # ping the server to determine whether it is online\n # or offline\n status = sendPing(server)\n if status == 'Online':\n # allocate to online\n self.online_servers[server] = [0, interval]\n else:\n # allocate to offline\n self.offline_servers[server] = [0, interval]\n # udpate our count\n server_count += 1\n # repeat for every record from our pseudo memory dump file\n # report and return\n print 'Read in {0} known servers'.format(server_count)\n \n # file read complete\n return", "def parse_conf(self):\n\n parser = configparser.RawConfigParser()\n parser.read(self.filename)\n\n try:\n self.id_node = parser['CONF_MACHINE']['ID_NODE']\n\n # eliminate possible white spaces between metrics\n temp = parser['CONF_MACHINE']['METRICS'].split(',')\n for itr in temp:\n self.metrics.append(itr.strip())\n\n except Exception:\n raise Exception(\"missing id or metrics\")\n\n try:\n self.interval = parser['CONF_MAHCINE']['INTERVAL']\n except Exception:\n self.interval = 1\n\n try:\n self.ampq_url = parser['ampq']['url']\n self.ampq_port = parser['ampq']['port']\n self.ampq_vhost = parser['ampq']['vhost']\n self.ampq_user = parser['ampq']['user']\n self.ampq_password = parser['ampq']['password']\n except Exception:\n raise Exception(\"missing ampq configs\")", "def calculate_batch_metrics(self):\n pass", "def _update_counters(self, filepath, step):\n\n counters = {}\n\n # Load\n if os.path.exists(self.counters_file):\n with open(self.counters_file) as f:\n counters = json.load(f)\n\n counters[filepath] = dict(step=step)\n\n # Save\n with open(self.counters_file, \"w\") as f:\n json.dump(counters, f, indent=4)", "def stats(self):\n stats = {\n 'lines' : '', # This will count the lines under each split\n 'status_code': self.status_code,\n 'content_type': self.mime,\n 'hop': self.hop_path[-1:],\n 'sum:content_length': self.content_length,\n 'host': self.host(),\n 'source': self.source\n }\n # Add in annotations:\n for annot in self.annotations:\n # Set a prefix based on what it is:\n prefix = ''\n if self.re_tries.match(annot):\n prefix = 'tries:'\n elif self.re_ip.match(annot):\n prefix = \"ip:\"\n # Only emit lines with annotations:\n if annot != \"-\":\n stats[\"%s%s\" % (prefix, annot)] = \"\"\n return stats", "def readStatFile(filePath):\n f = open(filePath, 'r')\n\n allStats = {}\n overviewStats = {}\n category = ''\n folder = ''\n method = ''\n\n for line in f:\n # Check if the line contains the 'cm' character and thus provides information of the specific folder\n if 'cm' in line:\n words = line.split()\n\n for word in words:\n if '/' in word:\n # All processed folder has either a /MoG or /SubSENSE folder. Exploit this to get the filename\n category = os.path.basename(os.path.dirname(os.path.dirname(os.path.normpath(filePath))))\n folder = os.path.basename(os.path.dirname(os.path.normpath(filePath)))\n method = word\n\n # Get the raw FP, TN, etc. count\n folderNumbers = {'TP': words[4], 'FP': words[5], 'FN': words[6], 'TN': words[7],\n 'ErrorShadow': words[8]}\n overviewStats[method] = folderNumbers\n\n\n # CHeck if line is not empty, does not contain certain characters, and that the folder has been found\n if '#' not in line and 'cm' not in line and line and folder and '\\n' != line and method:\n measures = line.split()\n\n isRealMeasure = True\n\n for measure in measures:\n if not RepresentsFloat(measure):\n isRealMeasure = False\n break\n\n\n if len(measures) == 7 and isRealMeasure:\n folderStats = {'recall': measures[0], 'specificity': measures[1], 'FPR': measures[2], 'FNR': measures[3], \n 'PBC': measures[4], 'precision': measures[5], 'f-measure': measures[6]}\n allStats[method] = folderStats\n\n method = ''\n\n return allStats, overviewStats", "def loadData(self):\n machineToNode = {}\n self.listOfMachines = []\n nextID = 0\n self.processingSteps = []\n with open(self.filename) as f:\n lines = f.read().splitlines()\n for line in lines:\n formatted = line.split(\"\\t\")\n order = int(formatted[0])\n machine = int(formatted[1])\n timestamp = float(formatted[2])\n if machine not in machineToNode: # normalizing machines according to the nodes (1,2,3... instead of 1,34,2...)\n machineToNode[machine] = nextID\n nextID +=1\n self.listOfMachines.append(machineToNode[machine]) # normalized list of all machines\n\n pstep = ProcessingStep(machineToNode[machine], timestamp, order)\n self.processingSteps.append(pstep)", "def load_state(self, dictionary):\n self.log_formatstr = dictionary['log_formatstr']\n self.backend_interval = dictionary['backend_interval']", "def __init_metrics(self):\n\n batch = {}\n # split data into batches of size batch_size or less\n for metric_name, metric_pattern in self.metrics.items():\n # get the batch list for that metric\n batch_list = []\n for s in range(1, self.schema + 1):\n for t in range(1, self.table + 1):\n k = '/metrics/type=IndexTable/keyspace={}/scope={}/name={}/mean'.format(s, t, metric_name)\n # from Python 3.6 onwards, the standard dict type maintains insertion order by default\n batch[k] = 0\n # if the batch has batch_size items or at the end of iteration,\n # append the batch to list of that metric and create a new empty batch\n if len(batch) == self.batch_size or (s == self.schema and t == self.table):\n batch_list.append(batch)\n batch = {}\n\n # parse metric patterns\n l = metric_pattern.split()\n if l[0] == '(>':\n self.metrics[metric_name] = IncMetricStruct(float(int(l[1])), float(l[2][1:]), float(l[4][:-2]),\n batch_list)\n else:\n self.metrics[metric_name] = RandMetricStruct(float(l[0][1:]), float(l[-1][:-1]), batch_list)", "def metrics(self):\n\n data = requests.get(\n f\"http://{self.prometheus_host}:{self.prometheus_port}/metrics\"\n ).content.decode()\n lines = [line for line in data.split(\"\\n\") if not line.startswith(\"#\")]\n metrics = {}\n for line in lines:\n if not line:\n continue\n\n name, value = line.split(\" \")\n\n try:\n value = int(value) # type: ignore\n except ValueError:\n value = float(value) # type: ignore\n\n if \"{\" in name and \"}\" in name:\n base = name[: name.index(\"{\")]\n tags = name[name.index(\"{\") + 1 : -1]\n tags = [tag.split(\"=\") for tag in tags.split(\",\")]\n tags = [(key, val.replace('\"', \"\")) for key, val in tags]\n\n name = base + \"#\" + \",\".join(f\"{k}:{v}\" for k, v in sorted(tags))\n\n metrics[name] = value\n\n return metrics", "def readTestFile(self, filename):\n size = 0\n agentNum = 0\n block = {}\n agentList = []\n f = open(filename, 'r')\n for line in f:\n if line[0] != '#':\n c = line.split(' ')\n if c[0] == 'grid':\n size = int(line[5:7])\n elif c[0] =='block':\n block[(int(c[2]), int(c[1]))] = (int(c[3]) - int(c[1]) + 1, int(c[4]) - int(c[2]) + 1)\n elif c[0] == 'nets':\n agentNum = int(c[1])\n elif c[0] == 'net' or c[0] == 'xet':\n print(c)\n agentList.append([int(c[1]), (int(c[3]), int(c[2])), (int(c[6]), int(c[5]))])\n f.close()\n print(size)\n print(block)\n print(agentNum)\n print(agentList)\n return size, block, agentNum, agentList", "def load(self, filename):\n\n c = torch.load(filename)\n\n if type(c) is dict:\n sd = c['state_dict']\n self.net.load_state_dict(sd)\n if 'monitors' in c: # Remove the branching eventually\n self.monitors = c['monitors']\n else:\n self.monitors = {'loss_train': c['train_monitor'], 'loss_val': c['val_monitor'],\n 'accu_train': MetricHistory(), 'accu_val': MetricHistory()}\n if 'optimizer' in c: # Remove the branching eventually\n self.optimizer.load_state_dict(c['optimizer'])\n else:\n raise RuntimeError('Unsupported checkpoint. (Not a dict)')\n\n self.parent = filename\n self.last_checkpoint = filename\n self.start_epoch = self.monitors['loss_train'].num_epochs", "def update_status(self):\n\n # Memory information can be found in status and statm /proc/PID files\n # status file VmRSS equivalent to top's RES column\n # statm disagrees with status VmRSS, I think it may not include\n # sub-processes\n # From: man proc\n # * VmPeak: Peak virtual memory size.\n # * VmSize: Virtual memory size.\n # * VmHWM: Peak resident set size (\"high water mark\").\n # * VmRSS: Resident set size.\n\n # status_fields should be ordered as in the status file\n fields = iter(self.status_fields)\n field = next(fields)\n with open(self.status_path) as f:\n for line in f:\n if line.startswith(field):\n # separated by white-space, 2nd element is value\n # 3rd is units e.g. kB\n # At the moment all fields are ints\n self.status[field] = int(line.split()[1])\n\n try:\n field = next(fields)\n except StopIteration:\n # Just found the last field in status_fields\n break", "def gather_sample(self, stat_file, collector=None):\n\n if not collector:\n collector = {}\n\n # File format is single value per line with \"fieldname:\" prefix.\n for x in stat_file:\n fields = x.split()\n if len(fields) == 0:\n continue\n if not collector:\n collector = {}\n if fields[0] == \"rchar:\":\n collector.update({Metric(\"app.disk.bytes\", \"read\"): int(fields[1])})\n elif fields[0] == \"syscr:\":\n collector.update({Metric(\"app.disk.requests\", \"read\"): int(fields[1])})\n elif fields[0] == \"wchar:\":\n collector.update({Metric(\"app.disk.bytes\", \"write\"): int(fields[1])})\n elif fields[0] == \"syscw:\":\n collector.update({Metric(\"app.disk.requests\", \"write\"): int(fields[1])})\n return collector", "def parse(self):\n\n coverage_data = {\n 'packages': {},\n 'summary': {'lines-total': 0, 'lines-covered': 0,\n 'branches-total': 0, 'branches-covered': 0},\n 'timestamp': str(int(time.time()))\n }\n package = None\n current_file = None\n file_lines_total = 0\n file_lines_covered = 0\n file_lines = {}\n file_methods = {}\n file_branches_total = 0\n file_branches_covered = 0\n\n for line in self.lcov_data.split('\\n'):\n if line.strip() == 'end_of_record':\n if current_file is not None:\n package_dict = coverage_data['packages'][package]\n package_dict['lines-total'] += file_lines_total\n package_dict['lines-covered'] += file_lines_covered\n package_dict['branches-total'] += file_branches_total\n package_dict['branches-covered'] += file_branches_covered\n file_dict = package_dict['classes'][current_file]\n file_dict['lines-total'] = file_lines_total\n file_dict['lines-covered'] = file_lines_covered\n file_dict['lines'] = dict(file_lines)\n file_dict['methods'] = dict(file_methods)\n file_dict['branches-total'] = file_branches_total\n file_dict['branches-covered'] = file_branches_covered\n coverage_data['summary']['lines-total'] += file_lines_total\n coverage_data['summary']['lines-covered'] += file_lines_covered\n coverage_data['summary']['branches-total'] += file_branches_total\n coverage_data['summary']['branches-covered'] += file_branches_covered\n\n line_parts = line.split(':')\n input_type = line_parts[0]\n\n if input_type == 'SF':\n # Get file name\n file_name = line_parts[-1].strip()\n relative_file_name = os.path.relpath(file_name, self.base_dir)\n package = '.'.join(relative_file_name.split(os.path.sep)[0:-1])\n class_name = file_name.split(os.path.sep)[-1]\n if package not in coverage_data['packages']:\n coverage_data['packages'][package] = {\n 'classes': {}, 'lines-total': 0, 'lines-covered': 0,\n 'branches-total': 0, 'branches-covered': 0\n }\n coverage_data['packages'][package]['classes'][\n relative_file_name] = {\n 'name': class_name, 'lines': {}, 'lines-total': 0,\n 'lines-covered': 0, 'branches-total': 0,\n 'branches-covered': 0\n }\n package = package\n current_file = relative_file_name\n file_lines_total = 0\n file_lines_covered = 0\n file_lines.clear()\n file_methods.clear()\n file_branches_total = 0\n file_branches_covered = 0\n elif input_type == 'DA':\n # DA:2,0\n (line_number, line_hits) = line_parts[-1].strip().split(',')\n line_number = int(line_number)\n if line_number not in file_lines:\n file_lines[line_number] = {\n 'branch': 'false', 'branches-total': 0,\n 'branches-covered': 0\n }\n file_lines[line_number]['hits'] = line_hits\n # Increment lines total/covered for class and package\n if int(line_hits) > 0:\n file_lines_covered += 1\n file_lines_total += 1\n elif input_type == 'BRDA':\n # BRDA:1,1,2,0\n (line_number, block_number, branch_number, branch_hits) = line_parts[-1].strip().split(',')\n line_number = int(line_number)\n if line_number not in file_lines:\n file_lines[line_number] = {\n 'branch': 'true', 'branches-total': 0,\n 'branches-covered': 0, 'hits': 0\n }\n file_lines[line_number]['branch'] = 'true'\n file_lines[line_number]['branches-total'] += 1\n file_branches_total += 1\n if branch_hits != '-' and int(branch_hits) > 0:\n file_lines[line_number]['branches-covered'] += 1\n file_branches_covered += 1\n elif input_type == 'BRF':\n file_branches_total = int(line_parts[1])\n elif input_type == 'BRH':\n file_branches_covered = int(line_parts[1])\n elif input_type == 'FN':\n # FN:5,(anonymous_1)\n function_name = line_parts[-1].strip().split(',')[1]\n function_name = self.demangle_function_name(function_name)\n file_methods[function_name] = '0'\n elif input_type == 'FNDA':\n # FNDA:0,(anonymous_1)\n (function_hits, function_name) = line_parts[-1].strip().split(',')\n function_name = self.demangle_function_name(function_name)\n file_methods[function_name] = function_hits\n\n # Exclude packages\n excluded = [x for x in coverage_data['packages'] for e in self.excludes\n if re.match(e, x)]\n for package in excluded:\n del coverage_data['packages'][package]\n\n # Compute line coverage rates\n for package_data in list(coverage_data['packages'].values()):\n package_data['line-rate'] = self._percent(\n package_data['lines-total'],\n package_data['lines-covered'])\n package_data['branch-rate'] = self._percent(\n package_data['branches-total'],\n package_data['branches-covered'])\n\n return coverage_data", "def mysql_status(self):\n stamp = int(time.time())\n\n # get data\n conn = self.object.connect()\n result = {}\n try:\n with conn.cursor() as cursor:\n for key in REQUIRED_STATUS_FIELDS:\n cursor.execute('SHOW GLOBAL STATUS LIKE \"%s\";' % key)\n row = cursor.fetchone()\n result[row[0]] = row[1]\n except Exception as e:\n exception_name = e.__class__.__name__\n context.log.debug('failed to collect MySQLd metrics due to %s' % exception_name)\n context.log.debug('additional info:', exc_info=True)\n finally:\n conn.close()\n\n # counters\n counted_vars = {}\n for metric, variable_name in METRICS['counters'].items():\n if variable_name in result:\n counted_vars[metric] = int(result[variable_name])\n\n # compound counter\n counted_vars['mysql.global.writes'] = \\\n counted_vars['mysql.global.insert'] + \\\n counted_vars['mysql.global.update'] + \\\n counted_vars['mysql.global.delete']\n\n self.aggregate_counters(counted_vars, stamp=stamp)\n\n # gauges\n tracked_gauges = {}\n for metric, variable_name in METRICS['gauges'].items():\n if variable_name in result:\n tracked_gauges[metric] = {\n self.object.definition_hash: int(result[variable_name])\n }\n\n # compound gauges\n pool_util = 0\n if ('mysql.global.innodb_buffer_pool_pages_total' in tracked_gauges and\n tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] > 0):\n pool_util = (\n (tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] -\n tracked_gauges['mysql.global.innodb_buffer_pool_pages_free'][self.object.definition_hash]) /\n tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] * 100\n )\n tracked_gauges['mysql.global.innodb_buffer_pool_util'] = {\n self.object.definition_hash: pool_util\n }\n\n hit_ratio = 0\n if ('mysql.global.innodb_buffer_pool_read_requests' in tracked_gauges and\n tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] > 0):\n hit_ratio = (\n (tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] /\n (tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] +\n tracked_gauges['mysql.global.innodb_buffer_pool_reads'][self.object.definition_hash])) * 100\n )\n\n tracked_gauges['mysql.global.innodb_buffer_pool.hit_ratio'] = {\n self.object.definition_hash: hit_ratio\n }\n\n self.aggregate_gauges(tracked_gauges, stamp=stamp)\n\n # finalize\n self.increment_counters()\n self.finalize_gauges()", "def read_cpu_stats(target_file):\n test_line = target_file.readline()\n if \"CPU\" in test_line:\n logical_processors = target_file.readline().strip()\n processors_desc = target_file.readline().strip()\n threads_cores = target_file.readline().strip()\n return CpuStats(logical_processors, processors_desc, threads_cores)\n if \"logical processors\" in test_line:\n logical_processors = test_line.strip()\n processors_desc = target_file.readline().strip()\n threads_cores = target_file.readline().strip()\n return CpuStats(logical_processors, processors_desc, threads_cores)\n return CpuStats('', '', '')", "def _get_flow_stats(self):\n flow_mags = []\n desc = \"Collecting training flow stats\"\n num_flows = len(self._lbl_trn_path)\n with tqdm(total=num_flows, desc=desc, ascii=True, ncols=100) as pbar:\n for flow_path in self._lbl_trn_path:\n pbar.update(1)\n flow = flow_read(flow_path)\n flow_magnitude, _ = cv2.cartToPolar(flow[..., 0], flow[..., 1])\n nans = np.isnan(flow_magnitude)\n if np.any(nans):\n nans = np.where(nans)\n flow_magnitude[nans] = 0.\n flow_mags.append(flow_magnitude)\n self.min_flow, self.max_flow = np.min(flow_mags), np.max(flow_mags)\n self.avg_flow = np.mean(flow_mags)\n print(\n f\"train flow min={self.min_flow}, avg={self.avg_flow}, max={self.max_flow} ({num_flows} flows)\")", "def read_file(self, fp):\n try:\n self.steps = []\n f = open(fp, 'r')\n file_arr = f.read().splitlines()\n # Get number of processes.\n self.processes = int(file_arr.pop(0).split(' ')[0])\n # Get number of resources.\n self.resources = int(file_arr.pop(0).split(' ')[0])\n print(\"\\n%d processes and %d resources.\" % (self.processes, self.resources))\n # Load each step.\n for line in file_arr:\n line_arr = line.split(' ')\n # Get process num.\n p = int(line_arr[0].strip('p'))\n # Get request/release.\n if line_arr[1] == 'requests':\n re = 1\n else:\n re = 0\n # Get resource num.\n r = int(line_arr[2].strip('r'))\n # Store as tuple in our steps.\n self.steps.append((p, re, r))\n print(\"%d total steps in simulation.\\n\" % len(self.steps))\n self.state_string[0] = str(self.processes) + \" processes and \" + str(self.resources) + \" resources. \"\n self.state_string[1] = str(len(self.steps)) + \" total steps in simulation.\"\n except IOError:\n print(\"Cannot find the file at\", fp)", "def parse(filename):\n data = {}\n for line in reversed(list(open(filename))):\n date, time, ip, source = line.strip().split()\n log_time = datetime.datetime.strptime(date +\" \"+time, '%Y-%m-%d %H:%M:%S')\n diff = datetime.datetime.now() - log_time\n if diff.seconds > 600:\n break\n if ip not in data:\n data[ip] = set()\n data[ip].add(source)\n return data", "def get_nag_status(filename, threshold = 0):\n status_file = filename\n\n f = open(status_file, 'r')\n\n line = f.readline()\n\n host_statuses = {}\n\n this_host = None\n this_service = None\n group_type = None\n\n for line in f:\n if line.strip().endswith('{'):\n group_type = line.strip().split()[0]\n continue\n try:\n this_property, value = get_property(line) #fails on lines without =, the try makes us pass\n #not yet reading programstatus or info\n if group_type == 'hoststatus':\n if this_property == 'host_name':\n this_host = value\n host_statuses[this_host] = {}\n host_statuses[this_host]['HOST'] = {}\n host_statuses[this_host]['HOST']['service_comments'] = {}\n else:\n host_statuses[this_host]['HOST'][this_property] = try_to_convert(value)\n elif group_type == 'servicestatus':\n #host_name always comes before service_description\n if this_property == 'host_name':\n this_host = value\n elif this_property == 'service_description':\n this_service = value\n host_statuses[this_host][this_service] = {}\n host_statuses[this_host][this_service][this_property] = value #handy place to have the service description and host name\n host_statuses[this_host][this_service]['host_name'] = this_host\n host_statuses[this_host][this_service]['service_comments'] = {}\n else:\n host_statuses[this_host][this_service][this_property] = try_to_convert(value)\n if this_property == 'current_state' and host_statuses[this_host][this_service][this_property] < threshold:\n #by simply removing the service here, subsequent attempts to add data fail to the next loop iteration\n del host_statuses[this_host][this_service]\n elif this_property == 'last_state_change':\n host_statuses[this_host][this_service]['current_duration'] = time.time() - try_to_convert(value)\n elif group_type == 'servicecomment':\n if this_property == 'host_name':\n this_host = value\n elif this_property == 'service_description':\n this_service = value\n elif this_property == 'entry_type':\n # Need to hang on to this one for one more line\n this_entry_type = try_to_convert(value)\n elif this_property == 'comment_id':\n this_comment_id = value\n host_statuses[this_host][this_service]['service_comments'][value] = {\n 'entry_type': this_entry_type,\n 'comment_id': this_comment_id\n }\n else:\n host_statuses[this_host][this_service]['service_comments'][this_comment_id][this_property] = try_to_convert(value)\n elif group_type == 'hostcomment':\n if this_property == 'host_name':\n this_host = value\n this_service = 'HOST'\n elif this_property == 'entry_type':\n # Need to hang on to this one for one more line\n this_entry_type = try_to_convert(value)\n elif this_property == 'comment_id':\n this_comment_id = value\n host_statuses[this_host][this_service]['service_comments'][value] = {\n 'entry_type': this_entry_type,\n 'comment_id': this_comment_id\n }\n else:\n host_statuses[this_host][this_service]['service_comments'][this_comment_id][this_property] = try_to_convert(value)\n except:\n pass\n f.close()\n return host_statuses", "def get_state(self):\n try:\n json_data = open(self.state_file)\n data = json.load(json_data)\n self.state_timestamp = data[\"timestamp\"]\n json_data.close()\n\n except IOError:\n self.logger.info(\"'%s' not found: an initial state file will be create\" % \\\n self.state_file)\n data = {\"timestamp\": self.state_timestamp}\n with open(self.state_file, 'w') as out_file:\n json.dump(data, out_file, indent=4)\n out_file.close()", "def main():\n\n\t# Parse the file\n\tmem_file = advanced_analysis('../data_1/mempages.dat.out')", "def test_alt_service_perfdata(self, aggregator):\n self.log_file = tempfile.NamedTemporaryFile()\n perfdata_file = tempfile.NamedTemporaryFile()\n\n # Get the config\n config, _ = get_config(\n \"service_perfdata_file={}\\n\"\n \"service_perfdata_file_template={}\".format(perfdata_file.name, NAGIOS_TEST_ALT_SVC_TEMPLATE),\n service_perf=True\n )\n\n # Set up the check\n nagios = NagiosCheck(CHECK_NAME, {}, {}, config['instances'])\n nagios.get_topology = mocked_topology\n\n # Run the check once\n nagios.check(config['instances'][0])\n\n with open(NAGIOS_TEST_SVC, \"r\") as f:\n nagios_perf = ensure_string(f.read())\n\n perfdata_file.write(nagios_perf)\n perfdata_file.flush()\n\n nagios.check(config['instances'][0])\n\n # Test metrics\n expected_metrics = [\n {\n 'name': 'nagios.current_users.users',\n 'timestamp': 1339511440,\n 'value': 1.0,\n 'hostname': 'localhost',\n 'tags': ['warn:20', 'crit:50', 'min:0'],\n },\n {\n 'name': 'nagios.ping.pl',\n 'timestamp': 1339511500,\n 'value': 0.0,\n 'hostname': 'localhost',\n 'tags': ['unit:%', 'warn:20', 'crit:60', 'min:0'],\n },\n {\n 'name': 'nagios.ping.rta',\n 'timestamp': 1339511500,\n 'value': 0.065,\n 'hostname': 'localhost',\n 'tags': ['unit:ms', 'warn:100.000000', 'crit:500.000000', 'min:0.000000'],\n },\n {\n 'name': 'nagios.root_partition',\n 'timestamp': 1339511560,\n 'value': 2470.0,\n 'hostname': 'localhost',\n 'tags': ['unit:MB', 'warn:5852', 'crit:6583', 'min:0', 'max:7315', 'device:/'],\n },\n ]\n\n for metric in expected_metrics:\n aggregator.assert_metric(metric['name'], metric['value'], tags=metric['tags'], hostname=metric['hostname'])\n\n aggregator.assert_all_metrics_covered()", "def store_metrics_to_params(self):\n\n model = self.model_name\n\n if self.stats_path.exists():\n with open(self.stats_path, \"rb\") as f:\n stats_dict = pickle.load(f)\n else:\n stats_dict = {}\n\n if model not in stats_dict:\n stats_dict[model] = defaultdict(list)\n\n stats_dict[model]['amine'].append(self.amine)\n stats_dict[model]['accuracies'].append(self.metrics['accuracies'])\n stats_dict[model]['confusion_matrices'].append(\n self.metrics['confusion_matrices'])\n stats_dict[model]['precisions'].append(self.metrics['precisions'])\n stats_dict[model]['recalls'].append(self.metrics['recalls'])\n stats_dict[model]['bcrs'].append(self.metrics['bcrs'])\n\n # Save this dictionary in case we need it later\n with open(self.stats_path, \"wb\") as f:\n pickle.dump(stats_dict, f)", "def _calc_resource_stats(self, interval):\n result = {}\n\n if 'mem' in self.metrics:\n result['mem'] = self._get_mem_info()\n\n if 'disk-space' in self.metrics:\n result['disk-space'] = self.__get_disk_usage(self.engine.artifacts_dir).percent\n\n if 'engine-loop' in self.metrics:\n result['engine-loop'] = self.engine.engine_loop_utilization\n\n if 'conn-all' in self.metrics:\n try:\n # take all connections without address resolution\n output = subprocess.check_output(['netstat', '-an'])\n output_lines = stream_decode(output).split('\\n') # in py3 stream has 'bytes' type\n est_lines = [line for line in output_lines if line.find('EST') != -1]\n result['conn-all'] = len(est_lines)\n except BaseException as exc:\n self.log.debug(\"Failed to get connections info: %s\", exc)\n result['conn-all'] = 0\n\n if 'cpu' in self.metrics:\n result['cpu'] = self._get_cpu_percent()\n\n if 'bytes-recv' in self.metrics or 'bytes-sent' in self.metrics:\n net = self.__get_net_counters()\n if net is not None:\n tx_bytes = int((net.bytes_sent - self._net_counters.bytes_sent) / float(interval))\n rx_bytes = int((net.bytes_recv - self._net_counters.bytes_recv) / float(interval))\n self._net_counters = net\n else:\n rx_bytes = 0.0\n tx_bytes = 0.0\n\n if 'bytes-recv' in self.metrics:\n result['bytes-recv'] = rx_bytes\n if 'bytes-sent' in self.metrics:\n result['bytes-sent'] = tx_bytes\n\n if 'disk-read' in self.metrics or 'disk-write' in self.metrics:\n disk = self.__get_disk_counters()\n if disk is not None:\n dru = int((disk.read_bytes - self._disk_counters.read_bytes) / float(interval))\n dwu = int((disk.write_bytes - self._disk_counters.write_bytes) / float(interval))\n self._disk_counters = disk\n else:\n dru = 0.0\n dwu = 0.0\n\n if 'disk-read' in self.metrics:\n result['disk-read'] = dru\n if 'disk-write' in self.metrics:\n result['disk-write'] = dwu\n\n return result", "def process_config_file_stashmean(config):\n for key, data in config.walk():\n section = key[0]\n if len(key) == 1:\n retxt = re.search(r\"namelist:(?P<name>\\w+)\\((?P<num>[0-9_a-zA-Z]+)\", key[0])\n # pick out parts of config that are stash-related\n print 'section ', section\n for section_base in STASH_SECTION_BASES:\n if (section.startswith(section_base) and\n 'domain_nml' not in section):\n nl = retxt.group('name')\n print 'nl ', nl\n profile = assign_profile(nl, data)\n for profile_key, profile_value in profile.iterate():\n if profile_key == 'name':\n stashname = profile_value\n\n if nl == 'streq':\n # comment out duplicate requests\n package_duplicates(config, key)\n # if dump mean profile, change to STASH meaning\n if 'TDMPMN' in config.value[key[0]].value['tim_name'].value:\n if config.value[key[0]].value['use_name'].value == \"'UPMEAN'\":\n config.value[key[0]].value['tim_name'].value = \"'TMONMN'\"\n if 'UKCA' in config.value[key[0]].value['package'].value or \\\n 'EASYA' in config.value[key[0]].value['package'].value:\n config.value[key[0]].value['use_name'].value = \"'UP3'\"\n else:\n if (config.value[key[0]].value['isec'].value == '30'\n or 'Dust' in stashname):\n config.value[key[0]].value['use_name'].value = \"'UP2'\"\n else:\n config.value[key[0]].value['use_name'].value = \"'UP1'\"\n\n # now deal with COSP diagnostics - use hourly data on\n # radiation timesteps\n if 'T6HDMPM' in config.value[key[0]].value['tim_name'].value:\n if config.value[key[0]].value['use_name'].value == \"'UPMEAN'\":\n config.value[key[0]].value['tim_name'].value = \"'T6HMONM'\"\n if ((config.value[key[0]].value['isec'].value == '30')\n or ('Dust' in stashname)):\n config.value[key[0]].value['use_name'].value = \"'UP2'\"\n else:\n config.value[key[0]].value['use_name'].value = \"'UP1'\"\n\n # now deal with diurnal cycle diagnostics\n if 'TMPMN' in config.value[key[0]].value['tim_name'].value:\n config.value[key[0]].value['package'].value = \"'DIURNAL'\"\n period = config.value[key[0]].value['tim_name'].value[-3:-1]\n config.value[key[0]].value['tim_name'].value = \"'TMONMN\" + period + \"'\"\n config.value[key[0]].value['use_name'].value = \"'UPK'\"\n\n # now deal with COSP diagnostics - use hourly data on radiation timesteps\n if 'TRADDM' in config.value[key[0]].value['tim_name'].value:\n if config.value[key[0]].value['use_name'].value == \"'UPMEAN'\":\n config.value[key[0]].value['tim_name'].value = \"'TRADMONM'\"\n config.value[key[0]].value['use_name'].value = \"'UP1'\"\n\n # change 90 day instantaneous to 30 day\n if 'T90DAY' in config.value[key[0]].value['tim_name'].value:\n config.value[key[0]].value['tim_name'].value = \"'T30DAY'\"\n config.value[key[0]].value['use_name'].value = \"'UPU'\"\n\n # change 90 day instantaneous to 30 day\n if 'TSTEPGI' in config.value[key[0]].value['tim_name'].value:\n config.value[key[0]].value['use_name'].value = \"'UPT'\"\n config.value[key[0]].value['package'].value = \"'TSTEP_STD_GA7'\"\n\n # set all blank package switches to a standard value\n if config.value[key[0]].value['package'].value == \"''\":\n config.value[key[0]].value['package'].value = \"'STD_GA7'\"\n # TODO: Consider changing the frequency of reinitialisation of the files in automated way", "def __parse(self):\n lines = self.file.readlines()\n name_idx = 2\n name_idx_found = False\n pathre = re.compile(r\"^[A-Z]:[\\\\/]\\w+\")\n for i in range(0, len(lines)):\n line = lines[i]\n if line.strip() != \"\": # check if line isn't empty\n if pathre.match(line):\n self.path = line.strip()\n continue\n tokens = line.split()\n time_str = tokens[0] + \" \" + tokens[1]\n try:\n time = datetime.strptime(time_str, \"%m/%d/%y %H:%M:%S\")\n except ValueError:\n raise LogParseError('Invalid log format. Date must be first \\\n token for each log event.') \n if not name_idx_found:\n name_idx = tokens.index('Monitoring')\n name_idx_found = True\n name = \"\"\n if tokens[name_idx].strip() == 'Monitoring':\n name = tokens[name_idx].lower() + \" \" + tokens[name_idx + 1].lower()\n duration = 0.0\n else:\n name = tokens[name_idx].lower()\n duration = tokens[name_idx + 1]\n self.events[name] = Event(time, name, duration)\n self.start = self.events['monitoring started']\n self.end = self.events['monitoring stopped']", "def read(self):\n self.record_d = {}\n if self.__read_file():\n self.__print_report()", "def stats(self):\n pass", "def total_hpwl(file_name):\r\n\r\n nodes = {}\r\n netsx = {}\r\n netsy = {}\r\n counter = 0\r\n hpwl = 0\r\n\r\n with open(file_name + \".nodes\") as f:\r\n for i, line in enumerate(f):\r\n\r\n line = line.strip()\r\n if line:\r\n if re.match(r'[a-z]{1}[0-9]+', line.split()[0]):\r\n if line.split()[0] not in nodes:\r\n nodes[line.split()[0]] = []\r\n nodes[line.split()[0]].append(line.split()[1])\r\n nodes[line.split()[0]].append(line.split()[2])\r\n\r\n with open(file_name + \".pl\") as f:\r\n for i, line in enumerate(f):\r\n\r\n line = line.strip()\r\n if line:\r\n if re.match(r'[a-z]{1}[0-9]+', line.split()[0]):\r\n nodes[line.split()[0]].append(line.split()[1])\r\n nodes[line.split()[0]].append(line.split()[2])\r\n\r\n with open(file_name + \".nets\") as f:\r\n for i, line in enumerate(f):\r\n\r\n line = line.strip()\r\n if line:\r\n if \"NetDegree\" in line:\r\n num_of_nodes = int(line.split()[2])\r\n net_name = \"n\" + str(counter)\r\n counter += 1\r\n netsx[net_name] = []\r\n netsy[net_name] = []\r\n elif re.match(r'[a-z]{1}[0-9]+', line.split()[0]):\r\n if net_name in netsx:\r\n if len(netsx[net_name]) == 0:\r\n netsx[net_name].append(int(nodes[line.split()[0]][2]))\r\n netsx[net_name].append(int(nodes[line.split()[0]][2]) + int(nodes[line.split()[0]][0]))\r\n\r\n netsy[net_name].append(int(nodes[line.split()[0]][3]))\r\n netsy[net_name].append(int(nodes[line.split()[0]][3]) + int(nodes[line.split()[0]][1]))\r\n else:\r\n if int(nodes[line.split()[0]][2]) < netsx[net_name][0]:\r\n netsx[net_name][0] = int(nodes[line.split()[0]][2])\r\n\r\n if int(nodes[line.split()[0]][2]) + int(nodes[line.split()[0]][0]) > netsx[net_name][1]:\r\n netsx[net_name][1] = int(nodes[line.split()[0]][2]) + int(nodes[line.split()[0]][0])\r\n\r\n if int(nodes[line.split()[0]][3]) < netsy[net_name][0]:\r\n netsy[net_name][0] = int(nodes[line.split()[0]][3])\r\n\r\n if int(nodes[line.split()[0]][3]) + int(nodes[line.split()[0]][1]) > netsy[net_name][1]:\r\n netsy[net_name][1] = int(nodes[line.split()[0]][3]) + int(nodes[line.split()[0]][1])\r\n\r\n for net in netsx:\r\n hpwl += float(netsx[net][1] - netsx[net][0] + netsy[net][1] - netsy[net][0])\r\n\r\n return (hpwl)", "def _load_tracker(self):\n\n if os.path.isfile(config.TRACKER_JSON):\n with self.__writelock, open(config.TRACKER_JSON, encoding='utf-8-sig') as f:\n d = json.loads(f.read())\n try:\n self.stats.previous_requests = d[self.maps.key]\n except KeyError:\n self.stats.previous_requests = 0\n else:\n self.stats.previous_requests = 0", "def stats(self):", "def parse_and_map(self, local_inet_path):\n for file_name in tqdm(self.filenames):\n # TODO: Add some log while processing data\n # Reads file name from full file path\n sliced_list = file_name.split(sep='/t')[-1].split(sep='_')\n self.data_dict['path'].append(file_name)\n self.data_dict['dataset'].append(sliced_list[1])\n self.data_dict['device'].append(sliced_list[2])\n self.data_dict['wn_id'].append(sliced_list[3])\n self.data_dict['im_id'].append(sliced_list[4])\n self.data_dict['eeg_session'].append(sliced_list[5])\n self.data_dict['global_session'].append(sliced_list[6].split(sep='.')[0])\n # File name: /MindBigData_Imagenet_Insight_n00007846_6247_1_785\n # Imagenet file path: /n00007846/n00007846_6247.JPEG\n file_name = str(sliced_list[3] + '_' + sliced_list[4] + '.JPEG')\n inet_path = os.path.join(local_inet_path, sliced_list[3], file_name)\n # If copy is true, data related local ImageNet images will be copied to separate folder\n if self.copy:\n try:\n # New file paths\n new_dir_path = os.path.join(self.copy_path, sliced_list[3])\n new_inet_path = os.path.join(new_dir_path, file_name)\n # Creates recursive folders in disk\n os.makedirs(new_dir_path, exist_ok=True, mode=0o771)\n # Copies file to destination\n shutil.copy(inet_path, new_inet_path)\n # Appends new file path to list\n self.data_dict['inet_path'].append(new_inet_path)\n except Exception as e:\n # TODO: More useful exception\n print(e)\n else:\n # Append local ImageNet path to list\n self.data_dict['inet_path'].append(inet_path)", "def load_stat():\n loadavg = {}\n f = open(\"/proc/loadavg\")\n con = f.read().split()\n f.close()\n loadavg['lavg_1'] = con[0]\n loadavg['lavg_5'] = con[1]\n loadavg['lavg_15'] = con[2]\n loadavg['nr'] = con[3]\n loadavg['last_pid'] = con[4]\n return loadavg", "def load(self, path):\n\n # Restore\n self.checkpoint.restore(path).expect_partial()\n print(\"> Loaded:\", path)\n\n # Read counters\n with open(self.counters_file) as f:\n data = json.load(f)\n return data[path]", "def get_load_data():\n proc_stat = open(\"/proc/stat\", \"r\")\n ret = []\n #times_since_startup = proc_stat.readline().strip().split()[1:]\n for line in proc_stat:\n line_split = line.strip().split()\n if(not (\"cpu\" in line_split[0])): #we have gone past the CPU lines\n break\n else:\n #everything but the label since we know [0] is overall and after that is per core by index\n ret.append(line_split[1:]) \n proc_stat.close()\n return ret", "def walk(self):\n data = open(self.data_file_path, 'rb')\n read_metric = globals()[\"ProtoDefinition\"].Payload()\n read_metric.ParseFromString(data.read())\n\n # One record for the whole file\n self.payload_metadata = read_metric.payloadMetadata\n self.device = read_metric.device\n\n # Get list of all *repeated* field types\n field_names = []\n for field_desc in read_metric.DESCRIPTOR.fields:\n field_name = field_desc.name\n\n if field_desc.label == field_desc.LABEL_REPEATED:\n field_names.append(field_name)\n\n # For each repeated field type, get the data and yield one item at a time\n for field_name in field_names:\n stream_samples = getattr(read_metric, field_name)\n for sample in stream_samples:\n yield self.device, sample", "def set_metrics(self):", "def ParseStateFile(self):\n s3Log.info (\"In crash run - updating File Paths from stateFile! \")\n try:\n with open('stateFile.json') as statefile:\n self.fileTobeUploaded=json.load(statefile)\n except json.decoder.JSONDecodeError as e:\n s3Log.error(\"FATAL ERROR: Unable to parse stateFile; wont be able to list down backup file paths. \")\n sys.exit(1)", "def compute_metrics(self, results: list) -> dict:\n dump(results, self.out_file_path)\n print_log(\n f'Results has been saved to {self.out_file_path}.',\n logger='current')\n return {}", "def load_from_file(self, file):\n\n if (args.replacetopip): #create list of IP addresses and the number of times they occur\n with open(args.dirty) as dirty_file:\n for line in dirty_file:\n ip = self._extract_by_key(line, self._attr_key)\n if (self.ip_dict.has_key(ip)):\n self.ip_dict[ip] += 1\n else:\n self.ip_dict[ip] = 1\n #sort list\n self.top_ip = sorted(self.ip_dict.items(), key=operator.itemgetter(1), reverse=True)\n count = 0\n with open(file) as ip_file:\n for line in ip_file:\n if (args.replacetopip): #replace top IP addresses from the sorted list with new ones from the file\n ip_old = self.top_ip[count][0]\n ip_new = line.strip()\n count += 1\n else:\n ip_old,ip_new = line.split(\",\")\n self._insts[ip_old] = ip_new.strip()", "def meter_stats():\n current_time = time.time()\n r = requests.get('http://localhost:8080/stats/flow/1')\n r.raise_for_status()\n data = r.json()\n bytes_tx = 0\n for stat in data['1']:\n if stat['match'].get('dl_src') == '00:00:00:00:00:01':\n bytes_tx += stat['byte_count']\n global LAST_TIME\n global LAST_BYTES_TX\n time_diff = current_time - LAST_TIME\n byte_diff = bytes_tx - LAST_BYTES_TX\n LAST_TIME = current_time\n LAST_BYTES_TX = bytes_tx\n transfer_rate = byte_diff / time_diff / 1024\n # We need to accomodate the dropping of our rule with the hard timeout\n return jsonify({'transfer_rate': transfer_rate})", "def load(self, file_name_with_path: str):\n\n if self.state._models is None:\n self.register_models()\n logger.info(\"Agent State loaded successfully\")\n for k, model in self.state._models.items():\n model.load(file_name_with_path=os.path.join(f'{file_name_with_path}_{model.name}.th'))\n logger.info(f'{file_name_with_path}_{model.name}.th loaded')\n logger.info(f\"{model.name} model loaded successfully\")\n self.state = Munch(json.load(open(file_name_with_path + \".meta\")))", "def collect(self):\n self.status['serial'] = self.config.get('dlmconfig', 'serial')\n self.status['timestamp'] = time.strftime('%Y/%m/%d %H:%M:%S', time.localtime())\n self.status['uptime'] = system.stats.uptime()\n self.status['free_disk_space_sdcard'] = system.stats.disk_usage('root')\n self.status['free_disk_space_stick'] = system.stats.disk_usage('sda1')\n self.status['wwan_reception'] = system.interfaces.WwanInterface.signal_strength(self.config.get('network', 'iface'))", "def get_data():\n \n data = {\n 'loadAvg1Min': 0, #load average 1 min\n 'loadAvg5Min': 0, #load average 5 min\n 'loadAvg15Min': 0, #load average 15 min\n 'cpuUsage': [], #usage distribution for each cpu\n 'memUsage': {}, #memory usage \n 'networkReads': [], #network reads per second for each interface\n 'networkWrites': [], #network writes per second for each interface\n 'diskReads': [], #disk reads per second for each disk\n 'diskWrites': [] #disk writes per second for each disk\n }\n \n #metrics that doesnt need sampling\n data['loadAvg1Min'], data['loadAvg5Min'], data['loadAvg15Min'] = get_load_avg() #get load avg\n data['memUsage'].update(get_mem_usage()) #memory usage\n \n #metrics that needs sampling\n #they are written as a generator so that we can sleep before collection again\n sampling_duration = 1\n cpu_usage_gen = get_cpu_usage(sampling_duration) #generator for cpu usage\n net_rw_gen = get_net_rw(sampling_duration) #generator for network read write\n disk_rw_gen = get_disk_rw(sampling_duration) #generator for disk read write\n \n while 1: #now start sampling, whenever we have walid data, we can exit the loop\n cpu_usage = next(cpu_usage_gen)\n net_rw = next(net_rw_gen)\n disk_rw = next(disk_rw_gen)\n \n if cpu_usage or net_rw or disk_rw: #we have valid data\n break\n \n time.sleep(sampling_duration)\n \n #append cpu usage for each cpu core\n for cpu, usage in cpu_usage.items():\n data['cpuUsage'].append({'name': cpu, 'value': usage})\n \n #append network read and write for each interface\n for interface, rw in net_rw.items():\n data['networkReads'].append({'name': interface, 'value': rw['reads']})\n data['networkWrites'].append({'name': interface, 'value': rw['writes']}) \n \n #append disk read and write for each logical disk\n for device, rw in disk_rw.items():\n data['diskReads'].append({'name': device, 'value': rw['reads']})\n data['diskWrites'].append({'name': device, 'value': rw['writes']})\n \n return data", "def test_state(self):\n # blocks [0 3583] [3840 4058]\n test_file1 = os.path.join(INPUT_HYPM_PATH, 'unit_364-2013-206-2-0.mdd')\n # blocks [0 1279] [1536 1791] [2048 2303] [2560 2815] [3072 4059]\n test_file2 = os.path.join(INPUT_HYPM_PATH, 'unit_364-2013-206-3-0.mdd')\n\n # parse the two .mdd files into the node and instrument group files\n mdd.procall([test_file1, test_file2])\n\n file_state = self.get_file_state('node58p1.dat')\n # there is an unprocessed '/n' in between records\n expected_file_state = {StateKey.UNPROCESSED_DATA: [[4059, 4060]],\n StateKey.FILE_SIZE: 4060,\n StateKey.OUTPUT_INDEX: 1}\n\n if file_state != expected_file_state:\n print file_state\n self.fail(\"Expected file state 1 does not match\")\n\n # blocks [0 2047] [2304 4095] [4096 7451]\n test_file3 = os.path.join(INPUT_HYPM_PATH, 'unit_364-2013-206-6-0.mdd')\n\n # parse another .mdd file adding on to the node file, and making\n # another sequence of instrument group files\n mdd.procall([test_file3])\n\n file_state = self.get_file_state('node58p1.dat')\n expected_file_state = {StateKey.UNPROCESSED_DATA: [[4059, 4060]],\n StateKey.FILE_SIZE: 7452,\n StateKey.OUTPUT_INDEX: 2}\n\n if file_state != expected_file_state:\n print \"file state: '%s'\" % file_state\n self.fail(\"Expected file state 2 does not match\")\n\n data_orig = self.read_full_file('node58p1.dat')\n\n # read the data from all generated files into one data string\n data_out = self.read_full_file('node58p1_0.status_1236801.dat')\n data_out += self.read_full_file('node58p1_0.wa_wfp_1236820.dat')\n data_out += self.read_full_file('node58p1_0.wc_wfp_1236820.dat')\n data_out += self.read_full_file('node58p1_0.we_wfp_1236820.dat')\n data_out += self.read_full_file('node58p1_1.status_1236801.dat')\n data_out += self.read_full_file('node58p1_1.wa_wfp_1236822.dat')\n data_out += self.read_full_file('node58p1_1.wc_wfp_1236822.dat')\n data_out += self.read_full_file('node58p1_1.we_wfp_1236822.dat')\n\n # confirm data in the node file matches those output in the instrument groups\n if not TestSioUnpack.compare_sio_matches(data_orig, data_out):\n self.fail(\"Failed sio block compare\")", "def read(self, filePath):\n \n result = {\n 'coordinates': {\n 'count': 0,\n 'nodes': []\n },\n 'element_groups': { \n 'number_of_elements': 0,\n 'count': 0,\n 'groups': []\n },\n 'bars': [],\n 'materials': {\n 'count': 0,\n 'materials': []\n },\n 'geometric_properties': {\n 'count': 0\n },\n 'bcnodes': {\n 'count': 0\n },\n 'loads': {\n 'count': 0\n }\n }\n # print(result['coordinates']['nodes'])\n \n with open(filePath,'r') as f:\n lines = f.readlines()\n elementCounter = 0\n groupCounter = 0\n geometricCounter = 0\n\n for line in lines:\n line = line.strip()\n el = line.split(' ')\n \n if len(line) == 0:\n continue\n\n if len(line) != 0 and line[0] == \"*\":\n section = line[1:].lower()\n continue\n \n if section == 'coordinates':\n if len(el) == 1 :\n result[section]['count'] = el[0]\n else:\n result[section]['nodes'].append(Node(int(el[0]), float(el[1]), float(el[2])))\n \n elif section == 'element_groups':\n if len(line) == 1:\n result[section]['count'] = int(el[0])\n else: \n result[section]['groups'].append(Group(el[0], el[1], el[2]))\n result[section]['number_of_elements'] += int(el[1])\n\n elif section == 'incidences':\n groups = result['element_groups']['groups']\n nodes = result['coordinates']['nodes']\n print(el)\n\n currentGroup = groups[groupCounter]\n if (currentGroup.amount == 0):\n groupCounter += 1\n currentGroup = groups[groupCounter]\n \n print(\"Group n: {} count: {}\".format(currentGroup.n, currentGroup.amount))\n \n bar = Bar(el[0], nodes[int(el[1])-1], nodes[int(el[2])-1], groups[groupCounter])\n print(\n \"\"\"\n Bar {} created \n Start node: {} End Node: {} Group: {}\n \"\"\".format(bar.id, bar.startNode.n, bar.endNode.n, bar.group))\n result['bars'].append(bar)\n currentGroup.amount -= 1\n \n elif section == 'materials':\n if len(el) == 1:\n result[section]['count'] = el[0]\n groupCounter = 0\n else:\n material = Material(el[0], el[1], el[2])\n result[section]['materials'].append(material)\n result['element_groups']['groups'][groupCounter].setMaterial(material)\n groupCounter += 1\n\n elif section == 'geometric_properties':\n if geometricCounter == 0:\n result[section]['count'] = el[0]\n else:\n result['element_groups']['groups'][geometricCounter - 1].setSectionArea(\n el[0]\n )\n geometricCounter += 1\n\n elif section == 'bcnodes':\n if len(el) == 1:\n result[section]['count'] = el[0]\n else:\n nodeIndex = next((e for e, item in enumerate(\n result['coordinates']['nodes']) if item.n == int(el[0])), None\n )\n result['coordinates']['nodes'][nodeIndex].setRestriction(int(el[1]))\n\n elif section == 'loads':\n if len(el) == 1:\n result[section]['count'] = el[0]\n else:\n load = Load(el[1], el[2])\n nodeIndex = next((e for e, item in enumerate(\n result['coordinates']['nodes']) if item.n == int(el[0])), None\n )\n result['coordinates']['nodes'][nodeIndex].addLoad(load)\n\n for bar in result['bars']:\n bar.createLocalArray()\n\n print('---------- Parsing complete! ----------')\n pprint(result)\n print('---------------------------------------')\n\n return result", "def get_run_metrics_handle(run_dir):\n #print(\"Examining: {}\".format(run_dir))\n\n valid_to_load = py_interop_run.uchar_vector(py_interop_run.MetricCount, 0)\n for v2l in (py_interop_run.Tile, py_interop_run.ExtendedTile):\n valid_to_load[v2l] = 1\n\n run_metrics = py_interop_run_metrics.run_metrics()\n run_metrics.read(run_dir, valid_to_load)\n\n return run_metrics", "def __init__(self) -> None:\n self.metrics = {}\n self.current = None\n self.run = None", "def get_total_and_retrans_frames(pcap_filepath, connections):\n # First init values to avoid strange errors if connection is empty\n for conn_id, conn in connections.iteritems():\n for direction in co.DIRECTIONS:\n connections[conn_id].flow.attr[direction][co.FRAMES_TOTAL] = 0\n connections[conn_id].flow.attr[direction][co.BYTES_FRAMES_TOTAL] = 0\n connections[conn_id].flow.attr[direction][co.FRAMES_RETRANS] = 0\n connections[conn_id].flow.attr[direction][co.BYTES_FRAMES_RETRANS] = 0\n\n stats_filename = os.path.basename(pcap_filepath)[:-5] + \"_tshark_total\"\n stats_file = open(stats_filename, 'w')\n co.tshark_stats(None, pcap_filepath, print_out=stats_file)\n stats_file.close()\n\n stats_file = open(stats_filename)\n data = stats_file.readlines()\n stats_file.close()\n for line in data:\n split_line = \" \".join(line.split()).split(\" \")\n if len(split_line) == 11:\n # Manage case with ipv6\n ip_src, port_src = get_ip_port_tshark(split_line[0])\n ip_dst, port_dst = get_ip_port_tshark(split_line[2])\n for conn_id, conn in connections.iteritems():\n if conn.flow.attr[co.SADDR] == ip_src and conn.flow.attr[co.SPORT] == port_src and \\\n conn.flow.attr[co.DADDR] == ip_dst and conn.flow.attr[co.DPORT]:\n connections[conn_id].flow.attr[co.S2C][co.FRAMES_TOTAL] = int(split_line[3])\n connections[conn_id].flow.attr[co.S2C][co.BYTES_FRAMES_TOTAL] = int(split_line[4])\n connections[conn_id].flow.attr[co.C2S][co.FRAMES_TOTAL] = int(split_line[5])\n connections[conn_id].flow.attr[co.C2S][co.BYTES_FRAMES_TOTAL] = int(split_line[6])\n break\n\n stats_file.close()\n os.remove(stats_filename)\n\n stats_filename = os.path.basename(pcap_filepath)[:-5] + \"_tshark_retrans\"\n stats_file = open(stats_filename, 'w')\n co.tshark_stats('tcp.analysis.retransmission', pcap_filepath, print_out=stats_file)\n stats_file.close()\n\n stats_file = open(stats_filename)\n data = stats_file.readlines()\n stats_file.close()\n for line in data:\n split_line = \" \".join(line.split()).split(\" \")\n if len(split_line) == 11:\n ip_src, port_src = get_ip_port_tshark(split_line[0])\n ip_dst, port_dst = get_ip_port_tshark(split_line[2])\n for conn_id, conn in connections.iteritems():\n if conn.flow.attr[co.SADDR] == ip_src and conn.flow.attr[co.SPORT] == port_src and \\\n conn.flow.attr[co.DADDR] == ip_dst and conn.flow.attr[co.DPORT]:\n connections[conn_id].flow.attr[co.S2C][co.FRAMES_RETRANS] = int(split_line[3])\n connections[conn_id].flow.attr[co.S2C][co.BYTES_FRAMES_RETRANS] = int(split_line[4])\n connections[conn_id].flow.attr[co.C2S][co.FRAMES_RETRANS] = int(split_line[5])\n connections[conn_id].flow.attr[co.C2S][co.BYTES_FRAMES_RETRANS] = int(split_line[6])\n break\n\n stats_file.close()\n os.remove(stats_filename)", "def worker_logfile_processing(worker_logfile_path, log_record_mgr):\n total_graph_str = \"\"\n graph_end_line = 0\n graph = [\"\",\"\"]\n graph_count = 0\n line_count = 0\n with open(worker_logfile_path, 'r') as f:\n line_count += 1\n line = f.readline()\n while line:\n if \"Register node\" in line:\n graph[graph_count%2] = \"node{\\n\"\n line_count += 1\n line = f.readline()\n while line and \"library {\" not in line:\n graph[graph_count%2] += line\n line_count += 1\n line = f.readline()\n graph[graph_count%2] += \"library {\\n}\\nversions {\\n producer:22\\n}\\n\"\n graph_end_line = line_count\n graph_count += 1\n line_count += 1\n line = f.readline()\n total_graph_str = graph[0] + graph[1]\n graph_def = graph_pb2.GraphDef()\n text_format.Merge(total_graph_str, graph_def)\n logfile_processing(graph_end_line, worker_logfile_path, log_record_mgr)\n return graph_def", "def _collect_data(self) -> None:\n self.set_websocket_data()\n self.set_stratum_data()\n self.set_cache_data()\n self.collect_peer_connection_metrics()\n self.set_tx_storage_data()", "def _get_stats(self):\n self.stats = set()\n self._bstats = set()\n self._h_bstats = set()\n self._tstats = set()\n self._ftstats = set()\n for cl in self.data_classes:\n for stat in cl._bstats:\n self.stats.add(stat)\n self._bstats.add(stat)\n for stat in cl._hbstats:\n self.stats.add(stat)\n self._h_bstats.add(stat)\n for stat in cl._tstats:\n self._tstats.add(stat)\n self.stats.add(stat)\n try:\n trips = cl.triples\n f_stats = cl.read_tfstats(trips,eq=False,lande=False)\n for trip in f_stats:\n for stat in f_stats[trip]:\n self._ftstats.add(stat)\n self.stats.add(stat)\n except:\n AttributeError", "def init_metrics(self):\n\n self.metrics = {}\n\n self.metrics['train_loss'] = np.zeros(0)\n self.metrics['test_loss'] = np.zeros(0)\n\n # self.orth_clf = LinearDecoder(self, self.q_, MeanClassifier)\n # self.metrics['train_orthogonality'] = np.zeros(0)\n # self.metrics['test_orthogonality'] = np.zeros(0)\n\n self.metrics['train_parallelism'] = np.zeros((0,self.q_)) \n self.metrics['test_parallelism'] = np.zeros((0,self.q_))", "def calculate_metrics(self):\n self.data_stats = self.sqlContext.read.format(\"org.apache.spark.sql.cassandra\").options(table=self.cassandra_trip_table, keyspace=self.cassandra_keyspace).load()\n self.data_stats = self.data_stats.groupBy(['time_block','day','month','borough_name']).agg(func.avg('num_trips').alias('mean'))", "def __init__(self, netlist_file):\n with open(netlist_file, 'r') as f:\n self.netlist = _parse_netlist(f)\n self.G = _create_graph(self.netlist)", "def metrics(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'metrics')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def aggregate_states(checkpoint_dir, filename_prefix='state_dict', state_dict_key_name='state_dict'):\n\n aggregated_states = {}\n num_states = len(glob.glob1(checkpoint_dir,\"{}*\".format(filename_prefix)))\n for rank in range(num_states):\n rank_state_dict = None\n with open(os.path.join(checkpoint_dir, '{}_{}.pkl'.format(filename_prefix, rank)), 'rb') as f:\n rank_state_dict = pickle.load(f)\n # if state_dict_key_name is None, then the rank_state_dictis the loaded object\n if state_dict_key_name:\n # if it has a name, index into the loaded object to extract the rank_state_dict\n rank_state_dict = rank_state_dict['{}_{}'.format(state_dict_key_name, rank)]\n\n checkpoint._aggregate_model_states(rank_state_dict, {}, aggregated_states, rank_state_dict['trainer_options']['mixed_precision'])\n\n checkpoint._aggregate_optimizer_states(rank_state_dict, {}, aggregated_states)\n \n return aggregated_states", "def metrics(self):\n self.metrics = []\n \n self.clients()\n\n if len(self.metrics) > 0:\n return self.metrics\n else:\n return []", "def flush_parse_initialize(self):\n self.current_cluster = 0\n self.current_frequency_of_clusters = []\n self.timestamp = []\n self.currentstates_of_clusters = []\n self.state_time_map = {}\n self.cpuid_time_map = {}\n self.cpu_freq_time_spent = {}\n self.cpuids_of_clusters = []\n self.parse() # Parse trace.txt generated from trace-cmd instrumentation\n # Initialize the states of each core of clusters and frequency of\n # each clusters with its minimum freq\n # cpu_id is assigned for each of clusters.\n # For IKS devices cpuid remains same in other clusters\n # and for other it will increment by 1\n count = 0\n for cluster, cores_number in enumerate(self.numberofcores_in_cluster):\n self.currentstates_of_clusters.append([-1 for dummy in range(cores_number)])\n self.current_frequency_of_clusters.append(self.minimum_frequency_cluster[cluster])\n if self.device.scheduler == 'iks':\n self.cpuids_of_clusters.append([j for j in range(cores_number)])\n else:\n self.cpuids_of_clusters.append(range(count, count + cores_number))\n count += cores_number\n\n # Initialize the time spent in each state/frequency for each core.\n for i in range(self.device.number_of_cores * self.multiply_factor):\n self.cpu_freq_time_spent[\"cpu{}\".format(i)] = {}\n for j in self.unique_freq():\n self.cpu_freq_time_spent[\"cpu{}\".format(i)][j] = 0\n # To determine offline -1 state is added\n offline_value = -1\n self.cpu_freq_time_spent[\"cpu{}\".format(i)][offline_value] = 0\n if 0 not in self.unique_freq():\n self.cpu_freq_time_spent[\"cpu{}\".format(i)][0] = 0", "def test_alt_service_perfdata(self, aggregator):\n self.log_file = tempfile.NamedTemporaryFile()\n perfdata_file = tempfile.NamedTemporaryFile()\n\n # Get the config\n config, _ = get_config(\n \"service_perfdata_file={}\\n\"\n \"service_perfdata_file_template={}\".format(perfdata_file.name, NAGIOS_TEST_ALT_SVC_TEMPLATE),\n service_perf=True,\n )\n\n # Set up the check\n nagios = NagiosCheck(CHECK_NAME, {}, config['instances'])\n\n # Run the check once\n nagios.check(config['instances'][0])\n\n with open(NAGIOS_TEST_SVC, \"r\") as f:\n nagios_perf = ensure_bytes(f.read())\n\n perfdata_file.write(nagios_perf)\n perfdata_file.flush()\n\n nagios.check(config['instances'][0])\n\n # Test metrics\n expected_metrics = [\n {\n 'name': 'nagios.current_users.users',\n 'timestamp': 1339511440,\n 'value': 1.0,\n 'hostname': 'localhost',\n 'tags': ['warn:20', 'crit:50', 'min:0'],\n },\n {\n 'name': 'nagios.ping.pl',\n 'timestamp': 1339511500,\n 'value': 0.0,\n 'hostname': 'localhost',\n 'tags': ['unit:%', 'warn:20', 'crit:60', 'min:0'],\n },\n {\n 'name': 'nagios.ping.rta',\n 'timestamp': 1339511500,\n 'value': 0.065,\n 'hostname': 'localhost',\n 'tags': ['unit:ms', 'warn:100.000000', 'crit:500.000000', 'min:0.000000'],\n },\n {\n 'name': 'nagios.root_partition',\n 'timestamp': 1339511560,\n 'value': 2470.0,\n 'hostname': 'localhost',\n 'tags': ['unit:MB', 'warn:5852', 'crit:6583', 'min:0', 'max:7315', 'device:/'],\n },\n ]\n\n for metric in expected_metrics:\n aggregator.assert_metric(metric['name'], metric['value'], tags=metric['tags'], hostname=metric['hostname'])\n\n aggregator.assert_all_metrics_covered()", "def _load_data(self):\n self.mapper = Mapper()\n self.mapper.generate_vocabulary(self.review_summary_file)\n self.X_fwd, self.X_bwd, self.Y = self.mapper.get_tensor(reverseflag=True)\n # Store all the mapper values in a dict for later recovery\n self.mapper_dict = dict()\n self.mapper_dict['seq_length'] = self.mapper.get_seq_length()\n self.mapper_dict['vocab_size'] = self.mapper.get_vocabulary_size()\n self.mapper_dict['rev_map'] = self.mapper.get_reverse_map()\n # Split into test and train data\n self._split_train_tst()", "def mymetrics(): \n _update_metric_counters()\n logging.debug(prom_objects_seen.collect())\n return flask.Response(generate_latest(), mimetype='text/plain')", "def load_stats():\n assert isinstance(settings.PARS['numBases'], int)\n assert isinstance(settings.PARS['dataset'], str)\n\n stat_filename = 'stat_{}_{}.json'.format(\n settings.PARS['numBases'], settings.PARS['dataset'])\n stat_full_path = os.path.join(settings.GENERATED_DATA_DIRECTORY, stat_filename)\n\n with open(stat_full_path, 'r') as file_:\n fobj_avg = json.load(file_)\n\n fobj_avg = {int(k): v for k, v in fobj_avg.items()}\n\n return fobj_avg", "def ParseNodeStatus(self):\n mc = subprocess.Popen([MOCACTL, 'show', '--nodestatus', str(self.NodeID)],\n stdout=subprocess.PIPE)\n out, _ = mc.communicate(None)\n bitloading = [[], []]\n bitloadidx = 0\n for line in out.splitlines():\n mac = MAC_RE.search(line)\n if mac is not None:\n type(self).MACAddress.Set(self, mac.group(1))\n pnc = PNC_RE.search(line)\n if pnc is not None:\n preferred = False if pnc.group(1) is '0' else True\n type(self).PreferredNC.Set(self, preferred)\n ptx = PTX_RE.search(line)\n if ptx is not None:\n type(self).PHYTxRate.Set(self, (IntOrZero(ptx.group(2)) / 1000000))\n txpowercontrol = int(FloatOrZero(ptx.group(1)))\n type(self).TxPowerControlReduction.Set(self, txpowercontrol)\n prx = PRX_RE.search(line)\n if prx is not None:\n type(self).PHYRxRate.Set(self, (IntOrZero(prx.group(2)) / 1000000))\n rxpower = FloatOrZero(prx.group(1))\n type(self).RxPowerLevel.Set(self, abs(int(rxpower)))\n type(self).X_CATAWAMPUS_ORG_RxPowerLevel_dBm.Set(self, rxpower)\n rxsnr = FloatOrZero(prx.group(3))\n type(self).RxSNR.Set(self, abs(int(rxsnr)))\n type(self).X_CATAWAMPUS_ORG_RxSNR_dB.Set(self, rxsnr)\n rxb = RXB_RE.search(line)\n if rxb is not None:\n type(self).TxBcastRate.Set(self, (IntOrZero(rxb.group(2)) / 1000000))\n rxbpower = FloatOrZero(rxb.group(1))\n type(self).RxBcastPowerLevel.Set(self, abs(int(rxbpower)))\n type(self).X_CATAWAMPUS_ORG_RxBcastPowerLevel_dBm.Set(self, rxbpower)\n qam = QAM_RE.search(line)\n if qam is not None:\n qam256 = False if qam.group(1) is '0' else True\n type(self).QAM256Capable.Set(self, qam256)\n agg = AGG_RE.search(line)\n if agg is not None:\n aggcapable = IntOrZero(agg.group(1))\n type(self).PacketAggregationCapability.Set(self, aggcapable)\n if 'Unicast Bit Loading Info' in line:\n bitloadidx = 0\n if 'Broadcast Bit Loading Info' in line:\n bitloadidx = 1\n btl = BTL_RE.search(line)\n if btl is not None:\n bitloading[bitloadidx].append(line)\n (txbitl, rxbitl) = _CombineBitloading(bitloading[0])\n type(self).X_CATAWAMPUS_ORG_RxBitloading.Set(self, '$BRCM1$' + rxbitl)\n type(self).X_CATAWAMPUS_ORG_TxBitloading.Set(self, '$BRCM1$' + txbitl)", "def doCollectTask(filename, topK):\n f = open(filename)\n dataDict = json.load(f)\n weirdOutCollect = Counter()\n for key in dataDict:\n if dataDict[key][\"weird\"]:\n # Check direction first to get the inner server.\n srcIP = dataDict[key][\"addr\"][0]\n dstIP = dataDict[key][\"addr\"][2]\n if srcIP.startswith(\"136.159.\"):\n # Which means srcIP is within our campus. it should be an outbound traffic\n weirdOutCollect[getIPCluster(dstIP)] += 1\n else:\n weirdOutCollect[getIPCluster(srcIP)] += 1\n\n return Counter(dict(weirdOutCollect.most_common(topK)))", "def get_config_metrics():\n\n metrics = {'disk_usage': 'YES',\n 'cpu_percent': 'YES',\n 'memory_info': 'YES',\n 'cpu_stats': 'YES'}\n\n return metrics", "def parse_file(self):\n with open(self.file_name, 'r', errors='ignore') as log_file:\n for line in log_file:\n self.process_line(line)", "def ParseNodeStats(self):\n mc = subprocess.Popen([MOCACTL, 'show', '--nodestats', str(self.NodeID)],\n stdout=subprocess.PIPE)\n out, _ = mc.communicate(None)\n rx_err = 0\n for line in out.splitlines():\n tx = TX_RE.search(line)\n if tx is not None:\n type(self).TxPackets.Set(self, IntOrZero(tx.group(1)))\n rx = RX_RE.search(line)\n if rx is not None:\n type(self).RxPackets.Set(self, IntOrZero(rx.group(1)))\n e1 = E1_RE.search(line)\n if e1 is not None:\n rx_err += IntOrZero(e1.group(1))\n e2 = E2_RE.search(line)\n if e2 is not None:\n rx_err += IntOrZero(e2.group(1))\n type(self).RxErroredAndMissedPackets.Set(self, rx_err)", "def _summary(in_file):\n data = Counter()\n out_file = in_file + \"_size_stats\"\n if file_exists(out_file):\n return out_file\n with open(in_file) as in_handle:\n for line in in_handle:\n counts = int(line.strip().split(\"_x\")[1])\n line = in_handle.next()\n l = len(line.strip())\n in_handle.next()\n in_handle.next()\n data[l] += counts\n with file_transaction(out_file) as tx_out_file:\n with open(tx_out_file, 'w') as out_handle:\n for l, c in data.items():\n out_handle.write(\"%s %s\\n\" % (l, c))\n return out_file", "def read_used():\n used_hashes = {\"evs\": set([]),\n \"cache\": set([]),\n \"seeds\": set([])}\n\n with open(LOG_FILEPATH, 'rb') as logfile:\n for line in logfile.readlines():\n kind, hash = tuple(line.split('...'))\n used_hashes[kind].add(hash.rstrip())\n\n return used_hashes", "def loadFeeds(self):\n\n metrics = self.config['metrics']\n for metric in metrics:\n metricConf = self.config['metrics'][metric]\n metricConf['name'] = metric\n source = metricConf['source']['driver']\n if 'metrics' not in self.sources[source['name']]:\n self.sources[source['name']]['metrics'] = []\n\n self.sources[source['name']]['metrics'].append(metricConf)", "def doCollectTask(filename, topK):\n f = open(filename)\n dataDict = json.load(f)\n weirdInCollect = Counter()\n for key in dataDict:\n if dataDict[key][\"weird\"]:\n # Check direction first to get the inner server.\n srcIP = dataDict[key][\"addr\"][0]\n dstIP = dataDict[key][\"addr\"][2]\n for _ in dataDict[key][\"weird\"]:\n if srcIP.startswith(\"136.159.\"):\n # Which means srcIP is within our campus. it should be an outbound traffic\n weirdInCollect[getIPCluster(srcIP)] += 1\n else:\n weirdInCollect[getIPCluster(dstIP)] += 1\n\n return Counter(dict(weirdInCollect.most_common(topK)))" ]
[ "0.59211487", "0.5842457", "0.5809869", "0.5712683", "0.5633532", "0.56009007", "0.55989665", "0.5477207", "0.54482716", "0.5303792", "0.5302852", "0.5279958", "0.52649426", "0.5263945", "0.52634865", "0.5221303", "0.5212169", "0.52069163", "0.5195175", "0.5184402", "0.51823294", "0.51739126", "0.5126451", "0.51253086", "0.5121291", "0.51188177", "0.50977314", "0.50863683", "0.50824237", "0.5081592", "0.50720924", "0.5064585", "0.50642854", "0.5058912", "0.50552607", "0.50539595", "0.5049715", "0.5028696", "0.50257474", "0.500906", "0.49958315", "0.49866474", "0.498527", "0.49825513", "0.49779966", "0.49732795", "0.4966522", "0.49618492", "0.49593928", "0.495567", "0.49537438", "0.49380732", "0.4921737", "0.49166545", "0.49132577", "0.49123114", "0.49112982", "0.4900577", "0.4898634", "0.48979637", "0.48937058", "0.48914602", "0.48849958", "0.48848352", "0.4881596", "0.48759052", "0.4874206", "0.48705256", "0.48704988", "0.48658577", "0.48630965", "0.4854195", "0.4849564", "0.48471075", "0.48453772", "0.48437807", "0.48421833", "0.484182", "0.48402163", "0.48402083", "0.4840084", "0.48377252", "0.48371297", "0.4836346", "0.48323062", "0.48290738", "0.48285612", "0.4826151", "0.4825672", "0.48214328", "0.48204938", "0.48152545", "0.48098522", "0.4806205", "0.4806167", "0.48060313", "0.48055482", "0.48041302", "0.48035768", "0.4795505" ]
0.56973577
4
Gathers the metrics from the sockstat file.
def gather_sample(self, stat_file, collector=None): if not collector: collector = {} for line in stat_file: # We just look for the different "inuse" lines and output their # socket type along with the count. m = re.search(r"(\w+): inuse (\d+)", line) if m is not None: collector.update( { Metric("app.net.sockets_in_use", m.group(1).lower()): int( m.group(2) ) } ) return collector
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gather_sample(self, stat_file, collector=None):\n\n # This file format is weird. Each set of stats is outputted in two\n # lines. First, a header line that list the field names. Then a\n # a value line where each value is specified in the appropriate column.\n # You have to match the column name from the header line to determine\n # what that column's value is. Also, each pair of lines is prefixed\n # with the same name to make it clear they are tied together.\n all_lines = stat_file.readlines()\n # We will create an array of all of the column names in field_names\n # and all of the corresponding values in field_values.\n field_names = []\n field_values = []\n\n # To simplify the stats, we add together the two forms of retransmit\n # I could find in the netstats. Those to fast retransmit Reno and those\n # to selective Ack.\n retransmits = 0\n found_retransmit_metric = False\n\n # Read over lines, looking at adjacent lines. If their row names match,\n # then append their column names and values to field_names\n # and field_values. This will break if the two rows are not adjacent\n # but I do not think that happens in practice. If it does, we just\n # won't report the stats.\n for i in range(0, len(all_lines) - 1):\n names_split = all_lines[i].split()\n values_split = all_lines[i + 1].split()\n # Check the row names are the same.\n if names_split[0] == values_split[0] and len(names_split) == len(\n values_split\n ):\n field_names.extend(names_split)\n field_values.extend(values_split)\n\n if not collector:\n collector = {}\n\n # Now go back and look for the actual stats we care about.\n for i in range(0, len(field_names)):\n if field_names[i] == \"InOctets\":\n collector.update({Metric(\"app.net.bytes\", \"in\"): field_values[i]})\n elif field_names[i] == \"OutOctets\":\n collector.update({Metric(\"app.net.bytes\", \"out\"): field_values[i]})\n elif field_names[i] == \"TCPRenoRecovery\":\n retransmits += int(field_values[i])\n found_retransmit_metric = True\n elif field_names[i] == \"TCPSackRecovery\":\n retransmits += int(field_values[i])\n found_retransmit_metric = True\n\n # If we found both forms of retransmit, add them up.\n if found_retransmit_metric:\n collector.update({Metric(\"app.net.tcp_retransmits\", None): retransmits})\n return collector", "def gather_sample(self, stat_file, collector=None):\n if not collector:\n collector = {}\n # The file format is just a single line of all the fields.\n line = stat_file.readlines()[0]\n # Chop off first part which is the pid and executable file. The\n # executable file is terminated with a paren so just search for that.\n line = line[(line.find(\") \") + 2) :]\n fields = line.split()\n # Then the fields we want are just at fixed field positions in the\n # string. Just grab them.\n\n # See http://man7.org/linux/man-pages/man5/proc.5.html for reference on field numbers\n # Keep in mind that we chop first 3 values away (pid, command line, state), so you need to\n # subtract 3 from the field numbers from the man page (e.g. on the man page nice is number\n # 19, but in our case it's 16 aka 19 - 3)\n process_uptime = self.__get_uptime_ms() - self.calculate_time_ms(\n int(fields[19])\n )\n\n collector.update(\n {\n Metric(\"app.cpu\", \"user\"): self.__calculate_time_cs(int(fields[11])),\n Metric(\"app.cpu\", \"system\"): self.__calculate_time_cs(int(fields[12])),\n Metric(\"app.uptime\", None): process_uptime,\n Metric(\"app.nice\", None): float(fields[16]),\n Metric(\"app.threads\", None): int(fields[17]),\n Metric(\"app.mem.majflt\", None): int(fields[9]),\n Metric(\"app.io.wait\", None): int(fields[39])\n if len(fields) >= 39\n else 0,\n }\n )\n return collector", "def gather_sample(self, stat_file, collector=None):\n\n if not collector:\n collector = {}\n\n for line in stat_file:\n # Each line has a format of:\n # Tag: Value\n #\n # We parse out all lines looking like that and match the stats we care about.\n m = re.search(r\"^(\\w+):\\s*(\\d+)\", line)\n if m is None:\n continue\n\n field_name = m.group(1)\n int_value = int(m.group(2))\n # FDSize is not the same as the number of open file descriptors. Disable\n # for now.\n # if field_name == \"FDSize\":\n # self.print_sample(\"app.fd\", int_value)\n if field_name == \"VmSize\":\n collector.update({Metric(\"app.mem.bytes\", \"vmsize\"): int_value * 1024})\n elif field_name == \"VmPeak\":\n collector.update(\n {Metric(\"app.mem.bytes\", \"peak_vmsize\"): int_value * 1024}\n )\n elif field_name == \"VmRSS\":\n collector.update(\n {Metric(\"app.mem.bytes\", \"resident\"): int_value * 1024}\n )\n elif field_name == \"VmHWM\":\n collector.update(\n {Metric(\"app.mem.bytes\", \"peak_resident\"): int_value * 1024}\n )\n return collector", "def get_stats(self):\n\n\t\tserver_data = {}\n\n\t\tyield self.sendall(\"stats\\r\\n\")\n\n\t\twhile True:\n\t\t\tline = yield self.read_line()\n\n\t\t\tif not line or line.strip() == \"END\":\n\t\t\t\tbreak\n\n\t\t\t_stat, name, value = line.split(' ', 2)\n\t\t\tserver_data[name] = value\n\n\t\traise StopIteration(server_data)", "def stats(self):\n stats = {\n 'lines' : '', # This will count the lines under each split\n 'status_code': self.status_code,\n 'content_type': self.mime,\n 'hop': self.hop_path[-1:],\n 'sum:content_length': self.content_length,\n 'host': self.host(),\n 'source': self.source\n }\n # Add in annotations:\n for annot in self.annotations:\n # Set a prefix based on what it is:\n prefix = ''\n if self.re_tries.match(annot):\n prefix = 'tries:'\n elif self.re_ip.match(annot):\n prefix = \"ip:\"\n # Only emit lines with annotations:\n if annot != \"-\":\n stats[\"%s%s\" % (prefix, annot)] = \"\"\n return stats", "def read_lnet_stats(f):\n ret = {'send_count': 0, 'recv_count': 0, 'send_length':0, 'recv_length': 0}\n\n pfile = os.path.normpath(f) + \"/stats\"\n with open(pfile, \"r\") as f:\n for line in f:\n chopped = line.split()\n if chopped[3]:\n ret[\"send_count\"] = int(chopped[3])\n if chopped[4]:\n ret[\"recv_count\"] = int(chopped[4])\n if chopped[7]:\n ret[\"send_length\"] = int(chopped[7])\n\t\tif chopped[8]:\n\t\t ret[\"recv_length\"] = int(chopped[8])\t\n \n\n if ret['send_count'] == 0 and ret['recv_count'] == 0 and ret['send_length'] == 0 and ret['recv_length'] == 0 :\n return None\n\n return ret", "def collect_stat(self):\n\n cnstat_dict, ratestat_dict = self.get_cnstat()\n self.cnstat_dict.update(cnstat_dict)\n self.ratestat_dict.update(ratestat_dict)", "def gather_sample(self, stat_file, collector=None):\n\n if not collector:\n collector = {}\n\n # File format is single value per line with \"fieldname:\" prefix.\n for x in stat_file:\n fields = x.split()\n if len(fields) == 0:\n continue\n if not collector:\n collector = {}\n if fields[0] == \"rchar:\":\n collector.update({Metric(\"app.disk.bytes\", \"read\"): int(fields[1])})\n elif fields[0] == \"syscr:\":\n collector.update({Metric(\"app.disk.requests\", \"read\"): int(fields[1])})\n elif fields[0] == \"wchar:\":\n collector.update({Metric(\"app.disk.bytes\", \"write\"): int(fields[1])})\n elif fields[0] == \"syscw:\":\n collector.update({Metric(\"app.disk.requests\", \"write\"): int(fields[1])})\n return collector", "def _get_openvpn_stats(path=\"/var/run/openvpn/server-0.sock\"):\n try:\n logging.debug(\"Getting metrics from %s\", path)\n with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as sock:\n sock.connect(path)\n sock.send(b\"load-stats\\n\")\n sock.setblocking(0)\n\n ready = select.select([sock], [], [], 5.0)\n if ready[0]:\n data = sock.recv(4096)\n if not data:\n logging.debug(\"No result?\")\n return 0\n data = data.decode('utf-8')\n logging.debug(\"Received %s\", data)\n data_match = re.search(r'nclients=(\\d+)', data)\n logging.debug(\"pattern match result %s\", data_match)\n if data_match:\n logging.debug(\"%s connections\", data_match.group(1))\n return int(data_match.group(1))\n except Exception as exc:\n logging.debug(\"Error gathering openvpn stats: %s\", exc)\n\n return 0", "def _get_ganglia_metrics(hostname, port, file_):\n if file_:\n f = open(file_, 'r')\n return \"\".join(f.readlines())\n else:\n return netcat(hostname, port, '')", "def get_all_stats(self) -> Dict[str, Any]:\n return self.http.get(self.config.paths.stat)", "def _request_stats(self, datapath):\n self.logger.debug('send stats request: %016x', datapath.id)\n ofproto = datapath.ofproto\n parser = datapath.ofproto_parser\n\n req = parser.OFPPortDescStatsRequest(datapath, 0)\n datapath.send_msg(req)\n\n req = parser.OFPPortStatsRequest(datapath, 0, ofproto.OFPP_ANY)\n datapath.send_msg(req)\n\n req = parser.OFPFlowStatsRequest(datapath)\n datapath.send_msg(req)", "def sstat(self):\n coh = self.cohorts[0]\n nsample = count_lines(wtccc2_sample_file(coh, opts.platform)) - 2 \n nfac = count_lines(opts.factor_file)\n if nsample != nfac:\n raise Exception('Number of individuals in sample file (%d) does not match number if factor file (%d)' % (\n (nsample, nfac)))\n for chrom in opts.chroms:\n system('gunzip -c %s | sstat -n %d -p -f %s > %s-%02d.sstat' % (\n gen_gz_file(coh, chrom, opts.platform), nsample, opts.factor_file, coh, chrom),\n verbose=True)", "def readInServers(self):\n # we'll be using the global server tracker file\n global server_tracker_file\n # first, grab a list of all files in the current working directory\n current_dir = os.listdir('.')\n # verify that our server tracker file exists here\n if server_tracker_file not in current_dir:\n # if there's nothing to read in, simply return\n return\n \n # read in the csv\n with open(server_tracker_file, 'rb') as infile:\n # initialize the reader\n reader = csv.reader(infile)\n # verify that the header looks exactly as we expect\n header = reader.next()\n if header != ['Server','Ping Interval','Status']:\n # if this isn't the case, we won't try to read the file\n return\n else:\n # update our servers with the records we know about\n # while we update, we'll keep a count of how many\n # we can successfully read in\n server_count = 0\n for record in reader:\n # pull out the server name and ping interval\n server = record[0]\n try:\n interval = int(record[1])\n except ValueError:\n continue\n # ping the server to determine whether it is online\n # or offline\n status = sendPing(server)\n if status == 'Online':\n # allocate to online\n self.online_servers[server] = [0, interval]\n else:\n # allocate to offline\n self.offline_servers[server] = [0, interval]\n # udpate our count\n server_count += 1\n # repeat for every record from our pseudo memory dump file\n # report and return\n print 'Read in {0} known servers'.format(server_count)\n \n # file read complete\n return", "def setup_metrics_file(self):\n\n with open(self.metrics_path, \"w+\") as f_metrics:\n\n f_metrics.write(get_metrics_file_form())", "def get_stats(self): \n return dict(l.split('\\t', 1) \\\n for l in wait(self.proto.stat()).splitlines() if l)", "def update_stat_file(self):\n logfile = \"../data/{}_stat.json\".format(self.ID)\n statobj = {\n 'hp': self.hp,\n 'max_hp': MAX_TANK_HP,\n 'ammo': self.ammo,\n 'score': self.score,\n 'age': self.age,\n 'alive': not self.is_dead(),\n 'color': TANK_COLORS[self.color],\n }\n if USE_SIMULATOR:\n js.globals.handle_stat(self.ID, json.dumps(statobj))\n else:\n with open(logfile, 'w') as f:\n f.write(json.dumps(statobj))", "def emit_metrics(self):\n parse_time = time.perf_counter() - self._parsing_start_time\n Stats.gauge(\"dag_processing.total_parse_time\", parse_time)\n Stats.gauge(\"dagbag_size\", sum(stat.num_dags for stat in self._file_stats.values()))\n Stats.gauge(\n \"dag_processing.import_errors\", sum(stat.import_errors for stat in self._file_stats.values())\n )", "def extract_tstat_data(pcap_filepath):\n connections = {}\n conn_id = 0\n print('We are here')\n with co.cd(os.path.basename(pcap_filepath[:-5])):\n with co.cd(os.listdir('.')[0]):\n print(connections)\n # Complete TCP connections\n connections, conn_id = extract_tstat_data_tcp_complete('log_tcp_complete', connections, conn_id)\n # Non complete TCP connections (less info, but still interesting data)\n connections, conn_id = extract_tstat_data_tcp_nocomplete('log_tcp_nocomplete', connections, conn_id)\n\n return connections", "def network_io_counters():\r\n f = open(\"/proc/net/dev\", \"r\")\r\n try:\r\n lines = f.readlines()\r\n finally:\r\n f.close()\r\n\r\n retdict = dict()\r\n for line in lines[2:]:\r\n colon = line.find(':')\r\n assert colon > 0, line\r\n name = line[:colon].strip()\r\n fields = line[colon + 1:].strip().split()\r\n bytes_recv = int(fields[0])\r\n packets_recv = int(fields[1])\r\n errin = int(fields[2])\r\n dropin = int(fields[2])\r\n bytes_sent = int(fields[8])\r\n packets_sent = int(fields[9])\r\n errout = int(fields[10])\r\n dropout = int(fields[11])\r\n retdict[name] = nt_net_iostat(bytes_sent, bytes_recv, packets_sent, packets_recv,\r\n errin, errout, dropin, dropout)\r\n return retdict", "def get_host_stats(self, refresh=False):", "def get_cache_stats():\n hostnames = get_memcached_hosts()\n\n if not hostnames:\n return None\n\n all_stats = []\n\n for hostname in hostnames:\n try:\n host, port = hostname.split(':')\n except ValueError:\n # Assume this is a hostname without a port.\n socket_af = socket.AF_INET\n host = hostname\n port = 11211\n\n if host == 'unix':\n socket_af = socket.AF_UNIX\n connect_param = port\n else:\n socket_af = socket.AF_INET\n connect_param = (host, int(port))\n\n s = socket.socket(socket_af, socket.SOCK_STREAM)\n\n try:\n s.connect(connect_param)\n except socket.error:\n logger.error('Unable to connect to \"%s\"' % hostname)\n s.close()\n continue\n\n s.send(b'stats\\r\\n')\n data = s.recv(2048).decode('ascii')\n s.close()\n\n stats = {}\n\n for line in data.splitlines():\n info = line.split(' ')\n\n if info[0] == 'STAT' and len(info) == 3:\n try:\n value = int(info[2])\n except ValueError:\n value = info[2]\n\n stats[info[1]] = value\n\n if stats['cmd_get'] == 0:\n stats['hit_rate'] = 0\n stats['miss_rate'] = 0\n else:\n stats['hit_rate'] = 100 * stats['get_hits'] / stats['cmd_get']\n stats['miss_rate'] = 100 * stats['get_misses'] / stats['cmd_get']\n\n all_stats.append((hostname, stats))\n\n return all_stats", "def stats(filename):\n from .utils import stats as print_stats\n click.echo('Starting to gather statistics on file {}'.format(filename))\n print_stats(filename)\n click.echo('Statistics printing finished')", "def meter_stats():\n current_time = time.time()\n r = requests.get('http://localhost:8080/stats/flow/1')\n r.raise_for_status()\n data = r.json()\n bytes_tx = 0\n for stat in data['1']:\n if stat['match'].get('dl_src') == '00:00:00:00:00:01':\n bytes_tx += stat['byte_count']\n global LAST_TIME\n global LAST_BYTES_TX\n time_diff = current_time - LAST_TIME\n byte_diff = bytes_tx - LAST_BYTES_TX\n LAST_TIME = current_time\n LAST_BYTES_TX = bytes_tx\n transfer_rate = byte_diff / time_diff / 1024\n # We need to accomodate the dropping of our rule with the hard timeout\n return jsonify({'transfer_rate': transfer_rate})", "def get_stats(self):\n return utils.csv_to_dict(wait(self.proto.stat()))", "def get_file_stat(host, fqpath):\n statformat = '%F:%n:%i:%a:%s:%h:%u:%g:%U:%G'\n command = \"stat -c '%s' %s\" % (statformat, fqpath)\n rcode, rout, rerr = g.run(host, command)\n if rcode == 0:\n stat_data = {}\n stat_string = rout.strip()\n (filetype, filename, inode,\n access, size, links,\n uid, gid, username, groupname) = stat_string.split(\":\")\n\n stat_data['filetype'] = filetype\n stat_data['filename'] = filename\n stat_data[\"inode\"] = inode\n stat_data[\"access\"] = access\n stat_data[\"size\"] = size\n stat_data[\"links\"] = links\n stat_data[\"username\"] = username\n stat_data[\"groupname\"] = groupname\n stat_data[\"uid\"] = uid\n stat_data[\"gid\"] = gid\n\n return stat_data\n\n g.log.error(\"Could not stat file %s: %s\" % (fqpath, rerr))\n return None", "def possible_metrics(filename):\n\n metrics = {}\n\n raw_data = parse_config(filename)\n\n for graph in raw_data:\n for metric in graph['metrics']:\n metrics.update({metric['label']: [(metric['x_stream'], metric['y_stream'], metric['z_stream']), metric['func']]})\n\n return metrics", "def extract_tstat_data_tcp_complete(filename, connections, conn_id):\n log_file = open(filename)\n data = log_file.readlines()\n for line in data:\n # Case 1: line start with #; skip it\n if not line.startswith(\"#\"):\n # Case 2: extract info from the line\n info = line.split()\n conn_id += 1\n connection = TCPConnection(conn_id)\n connection.flow.attr[co.TCP_COMPLETE] = True\n connection.flow.attr[co.SADDR] = co.long_ipv6_address(info[0])\n connection.flow.attr[co.DADDR] = co.long_ipv6_address(info[14])\n connection.flow.attr[co.SPORT] = info[1]\n connection.flow.attr[co.DPORT] = info[15]\n connection.flow.detect_ipv4()\n connection.flow.indicates_wifi_or_cell()\n # Except RTT, all time (in ms in tstat) shoud be converted into seconds\n connection.flow.attr[co.START] = timedelta(seconds=float(info[28])/1000)\n connection.flow.attr[co.DURATION] = float(info[30]) / 1000.0\n connection.flow.attr[co.C2S][co.PACKS] = int(info[2])\n connection.flow.attr[co.S2C][co.PACKS] = int(info[16])\n # Note that this count is about unique data bytes (sent in the payload)\n connection.flow.attr[co.C2S][co.BYTES] = int(info[6])\n connection.flow.attr[co.S2C][co.BYTES] = int(info[20])\n # This is about actual data bytes (sent in the payload, including retransmissions)\n connection.flow.attr[co.C2S][co.BYTES_DATA] = int(info[8])\n connection.flow.attr[co.S2C][co.BYTES_DATA] = int(info[22])\n\n connection.flow.attr[co.C2S][co.PACKS_RETRANS] = int(info[9])\n connection.flow.attr[co.S2C][co.PACKS_RETRANS] = int(info[23])\n connection.flow.attr[co.C2S][co.BYTES_RETRANS] = int(info[10])\n connection.flow.attr[co.S2C][co.BYTES_RETRANS] = int(info[24])\n\n connection.flow.attr[co.C2S][co.PACKS_OOO] = int(info[11])\n connection.flow.attr[co.S2C][co.PACKS_OOO] = int(info[25])\n\n connection.flow.attr[co.C2S][co.NB_SYN] = int(info[12])\n connection.flow.attr[co.S2C][co.NB_SYN] = int(info[26])\n connection.flow.attr[co.C2S][co.NB_FIN] = int(info[13])\n connection.flow.attr[co.S2C][co.NB_FIN] = int(info[27])\n connection.flow.attr[co.C2S][co.NB_RST] = int(info[3])\n connection.flow.attr[co.S2C][co.NB_RST] = int(info[17])\n connection.flow.attr[co.C2S][co.NB_ACK] = int(info[4])\n connection.flow.attr[co.S2C][co.NB_ACK] = int(info[18])\n\n # Except RTT, all time (in ms in tstat) shoud be converted into seconds\n connection.flow.attr[co.C2S][co.TIME_FIRST_PAYLD] = float(info[31]) / 1000.0\n connection.flow.attr[co.S2C][co.TIME_FIRST_PAYLD] = float(info[32]) / 1000.0\n connection.flow.attr[co.C2S][co.TIME_LAST_PAYLD] = float(info[33]) / 1000.0\n connection.flow.attr[co.S2C][co.TIME_LAST_PAYLD] = float(info[34]) / 1000.0\n connection.flow.attr[co.C2S][co.TIME_FIRST_ACK] = float(info[35]) / 1000.0\n connection.flow.attr[co.S2C][co.TIME_FIRST_ACK] = float(info[36]) / 1000.0\n\n connection.flow.attr[co.C2S][co.RTT_SAMPLES] = int(info[48])\n connection.flow.attr[co.S2C][co.RTT_SAMPLES] = int(info[55])\n connection.flow.attr[co.C2S][co.RTT_MIN] = float(info[45])\n connection.flow.attr[co.S2C][co.RTT_MIN] = float(info[52])\n connection.flow.attr[co.C2S][co.RTT_MAX] = float(info[46])\n connection.flow.attr[co.S2C][co.RTT_MAX] = float(info[53])\n connection.flow.attr[co.C2S][co.RTT_AVG] = float(info[44])\n connection.flow.attr[co.S2C][co.RTT_AVG] = float(info[51])\n connection.flow.attr[co.C2S][co.RTT_STDEV] = float(info[47])\n connection.flow.attr[co.S2C][co.RTT_STDEV] = float(info[54])\n connection.flow.attr[co.C2S][co.TTL_MIN] = float(info[49])\n connection.flow.attr[co.S2C][co.TTL_MIN] = float(info[56])\n connection.flow.attr[co.C2S][co.TTL_MAX] = float(info[50])\n connection.flow.attr[co.S2C][co.TTL_MAX] = float(info[57])\n\n connection.flow.attr[co.C2S][co.SS_MIN] = int(info[71])\n connection.flow.attr[co.S2C][co.SS_MIN] = int(info[94])\n connection.flow.attr[co.C2S][co.SS_MAX] = int(info[70])\n connection.flow.attr[co.S2C][co.SS_MAX] = int(info[93])\n\n connection.flow.attr[co.C2S][co.CWIN_MIN] = int(info[76])\n connection.flow.attr[co.S2C][co.CWIN_MIN] = int(info[99])\n connection.flow.attr[co.C2S][co.CWIN_MAX] = int(info[75])\n connection.flow.attr[co.S2C][co.CWIN_MAX] = int(info[98])\n\n connection.flow.attr[co.C2S][co.NB_RTX_RTO] = int(info[78])\n connection.flow.attr[co.S2C][co.NB_RTX_RTO] = int(info[101])\n connection.flow.attr[co.C2S][co.NB_RTX_FR] = int(info[79])\n connection.flow.attr[co.S2C][co.NB_RTX_FR] = int(info[102])\n connection.flow.attr[co.C2S][co.NB_REORDERING] = int(info[80])\n connection.flow.attr[co.S2C][co.NB_REORDERING] = int(info[103])\n connection.flow.attr[co.C2S][co.NB_NET_DUP] = int(info[81])\n connection.flow.attr[co.S2C][co.NB_NET_DUP] = int(info[104])\n connection.flow.attr[co.C2S][co.NB_UNKNOWN] = int(info[82])\n connection.flow.attr[co.S2C][co.NB_UNKNOWN] = int(info[105])\n connection.flow.attr[co.C2S][co.NB_FLOW_CONTROL] = int(info[83])\n connection.flow.attr[co.S2C][co.NB_FLOW_CONTROL] = int(info[106])\n connection.flow.attr[co.C2S][co.NB_UNNECE_RTX_RTO] = int(info[84])\n connection.flow.attr[co.S2C][co.NB_UNNECE_RTX_RTO] = int(info[107])\n connection.flow.attr[co.C2S][co.NB_UNNECE_RTX_FR] = int(info[85])\n connection.flow.attr[co.S2C][co.NB_UNNECE_RTX_FR] = int(info[108])\n\n connection.attr[co.C2S][co.BYTES] = {}\n connection.attr[co.S2C][co.BYTES] = {}\n\n connection.flow.attr[co.C2S][co.TIMESTAMP_RETRANS] = []\n connection.flow.attr[co.S2C][co.TIMESTAMP_RETRANS] = []\n\n connection.flow.attr[co.C2S][co.TIME_FIN_ACK_TCP] = timedelta(0)\n connection.flow.attr[co.S2C][co.TIME_FIN_ACK_TCP] = timedelta(0)\n\n connection.flow.attr[co.C2S][co.TIME_LAST_ACK_TCP] = timedelta(0)\n connection.flow.attr[co.S2C][co.TIME_LAST_ACK_TCP] = timedelta(0)\n\n connection.flow.attr[co.C2S][co.TIME_LAST_PAYLD_TCP] = timedelta(0)\n connection.flow.attr[co.S2C][co.TIME_LAST_PAYLD_TCP] = timedelta(0)\n\n connection.flow.attr[co.C2S][co.TIME_LAST_PAYLD_WITH_RETRANS_TCP] = timedelta(0)\n connection.flow.attr[co.S2C][co.TIME_LAST_PAYLD_WITH_RETRANS_TCP] = timedelta(0)\n\n connections[conn_id] = connection\n\n log_file.close()\n return connections, conn_id", "def _show_general_stats(self):\n\n stat = YuStats()\n template_filename = self._get_config_template('stats')\n text = read_template(\n template_filename,\n title=SERVER_NAME,\n header=SERVER_NAME,\n number_of_links=format_none(stat.links_all),\n number_of_redirects=format_none(stat.redirect_all),\n number_of_redirects_today=format_none(stat.redirect_today),\n number_of_redirects_this_week=format_none(stat.redirect_this_week),\n number_of_redirects_this_month=format_none(stat.redirect_this_month),\n number_of_redirects_this_year=format_none(stat.redirect_this_year),\n number_of_url_today=format_none(stat.links_today),\n number_of_url_this_week=format_none(stat.links_this_week),\n number_of_url_this_month=format_none(stat.links_this_month),\n number_of_url_this_year=format_none(stat.links_this_year),\n date_of_first_redirect=format_none(stat.date_of_first_redirect),\n )\n if text:\n self._send_head(text, 200)\n if not self._header_only:\n try:\n self.wfile.write(text)\n except socket.error:\n # clients like to stop reading after they got a 404\n pass\n else:\n self._send_internal_server_error()", "def start_monitor_loop(self):\n read_file = read_config_file.ConfigFileReader()\n\n communication_time = read_file.get_send_communication_time()\n metrics_array = read_file.get_metrics()\n\n self.add_metrics_to_monitor_object(communication_time, metrics_array)", "def collect(self):\n if not self._btime:\n return\n\n try:\n with open(\"/proc/self/stat\", 'rb') as stat:\n parts = (stat.read().split(b')')[-1].split())\n\n self.process_metrics[\"vmem\"].set(\"\", float(parts[20]))\n self.process_metrics[\"rss\"].set(\"\", float(parts[21]) * _PAGESIZE)\n start_time_secs = float(parts[19]) / self._ticks\n self.process_metrics[\"start_time\"].set(\n \"\", start_time_secs + self._btime\n )\n utime = float(parts[11]) / self._ticks\n stime = float(parts[12]) / self._ticks\n self.process_metrics[\"cpu\"].set(\"\", utime + stime)\n except IOError:\n pass\n\n try:\n with open(\"/proc/self/limits\", 'rb') as limits:\n for line in limits:\n if line.startswith(b'Max open file'):\n self.process_metrics[\"max_fds\"].set(\n \"\", float(line.split()[3])\n )\n break\n\n self.process_metrics[\"open_fds\"].set(\n \"\", float(len(os.listdir(\"/proc/self/fd\")))\n )\n except (IOError, OSError):\n pass\n\n # Update gc metrics if enabled.\n if \"collected\" in self.process_metrics:\n for generation, stat in enumerate(gc.get_stats()):\n generation = {\"generation\": str(generation)}\n self.process_metrics[\"collected\"].set(\n generation, stat[\"collected\"]\n )\n self.process_metrics[\"uncollectable\"].set(\n generation, stat[\"uncollectable\"]\n )\n self.process_metrics[\"collections\"].set(\n generation, stat[\"collections\"]\n )", "def collect_statistics(self, stat_col, data_streams):\n self.module.collect_statistics(stat_col, data_streams)", "def get_stats():\n logger.info(\"Retrieving stats\")\n # create datetime iso format zero hour offset\n current_datetime = datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n # if filename doesn't exist\n if not path.exists(filename):\n return \"Statistics do not exist\", 404\n\n # get current stats\n with open(filename, 'r') as f:\n currentstats = json.loads(f.read())\n\n # return json\n stats_obj = {}\n stats_obj[\"num_users\"] = currentstats[\"num_users\"]\n stats_obj[\"num_facts\"] = currentstats[\"num_facts\"]\n stats_obj[\"most_popular_tag\"] = currentstats[\"most_popular_tag\"]\n # stats_obj[\"avg_jokes_added_weekly\"] = currentstats[\"avg_jokes_added_weekly\"]\n stats_obj[\"num_subscribed_users\"] = currentstats[\"num_subscribed_users\"]\n stats_obj[\"datetime\"] = current_datetime\n\n logger.debug(stats_obj)\n logger.info(\"Returning stats\")\n return stats_obj, 200", "def get_self_stats(stat_path):\n index_utime = 13\n index_stime = 14\n index_cutime = 15\n index_cstime = 16\n index_vsize = 22\n index_rss = 23\n self_stats = {'utime': 0, 'stime': 0, 'vsize': 0, 'rss': 0}\n\n if not os.path.exists(stat_path):\n collectd.error('mlab: get_self_stats stat path does not exist: %s' %\n stat_path)\n return {}\n\n with open(stat_path, 'r') as stat_file:\n stat_fields = stat_file.read().strip().split()\n\n if len(stat_fields) < 24:\n collectd.error('mlab: get_self_stats found only %s fields.' %\n len(stat_fields))\n return {}\n\n self_stats['utime'] = (\n float(stat_fields[index_utime]) + float(stat_fields[index_cutime]))\n self_stats['stime'] = (\n float(stat_fields[index_stime]) + float(stat_fields[index_cstime]))\n self_stats['vsize'] = int(stat_fields[index_vsize])\n self_stats['rss'] = int(stat_fields[index_rss]) * _PAGESIZE\n return self_stats", "def get_stats(self):\n\t\n\tceph_cluster = \"%s-%s\" % (self.prefix, self.cluster)\n\n\tdata = { ceph_cluster: { } }\n\tadmin_folder=\"/var/run/ceph/\"\n\tif(os.path.isdir(admin_folder)):\n\t\tfiles=os.walk(admin_folder).next()[2]\n else:\n\t\tprint \"No folder exists \"+admin_folder\n\t\treturn -1\n\tabs_path=[admin_folder+x for x in files]\n\tadmin_socket = max(abs_path, key=os.path.getmtime)\n\tcmd = \"ceph --admin-daemon \"+admin_socket +\" perf dump -f json\"\n\ttry:\n\t\toutput = subprocess.check_output(cmd, shell=True)\n\texcept Exception as exc:\n\t\tcollectd.error(\"ceph-osd: failed to ceph osd perf dump :: %s :: %s\" % (exc, traceback.format_exc()))\n\t\treturn\n\n\tif output is None:\n\t\tcollectd.error('ceph-osd: failed to ceph osd perf dump :: output was None')\n\n\tjson_data = json.loads(output)\n\tmatch=(re.search(r'([\\w.-]+)(\\d)([\\w.-]+)',admin_socket))\n\tif match:\n\t\tosd_id=match.group(2)\n\telse:\n\t\treturn\n\tdata[ceph_cluster][osd_id]={}\n\tdata[ceph_cluster][osd_id]['op_latency']={}\n\tdata[ceph_cluster][osd_id]['op_w_latency']={}\n\tdata[ceph_cluster][osd_id]['op_r_latency']={}\n\tdata[ceph_cluster][osd_id]['op_latency']['sum']=json_data['osd']['op_latency']['sum']\n\tdata[ceph_cluster][osd_id]['op_latency']['avgcount']=json_data['osd']['op_latency']['avgcount']\n\tdata[ceph_cluster][osd_id]['op_w_latency']['sum']=json_data['osd']['op_w_latency']['sum']\n\tdata[ceph_cluster][osd_id]['op_w_latency']['avgcount']=json_data['osd']['op_w_latency']['avgcount']\n\tdata[ceph_cluster][osd_id]['op_r_latency']['sum']=json_data['osd']['op_r_latency']['sum']\n\tdata[ceph_cluster][osd_id]['op_r_latency']['avgcount']=json_data['osd']['op_r_latency']['avgcount']\n\n\t#print data\t\n\treturn data", "def _calc_resource_stats(self, interval):\n result = {}\n\n if 'mem' in self.metrics:\n result['mem'] = self._get_mem_info()\n\n if 'disk-space' in self.metrics:\n result['disk-space'] = self.__get_disk_usage(self.engine.artifacts_dir).percent\n\n if 'engine-loop' in self.metrics:\n result['engine-loop'] = self.engine.engine_loop_utilization\n\n if 'conn-all' in self.metrics:\n try:\n # take all connections without address resolution\n output = subprocess.check_output(['netstat', '-an'])\n output_lines = stream_decode(output).split('\\n') # in py3 stream has 'bytes' type\n est_lines = [line for line in output_lines if line.find('EST') != -1]\n result['conn-all'] = len(est_lines)\n except BaseException as exc:\n self.log.debug(\"Failed to get connections info: %s\", exc)\n result['conn-all'] = 0\n\n if 'cpu' in self.metrics:\n result['cpu'] = self._get_cpu_percent()\n\n if 'bytes-recv' in self.metrics or 'bytes-sent' in self.metrics:\n net = self.__get_net_counters()\n if net is not None:\n tx_bytes = int((net.bytes_sent - self._net_counters.bytes_sent) / float(interval))\n rx_bytes = int((net.bytes_recv - self._net_counters.bytes_recv) / float(interval))\n self._net_counters = net\n else:\n rx_bytes = 0.0\n tx_bytes = 0.0\n\n if 'bytes-recv' in self.metrics:\n result['bytes-recv'] = rx_bytes\n if 'bytes-sent' in self.metrics:\n result['bytes-sent'] = tx_bytes\n\n if 'disk-read' in self.metrics or 'disk-write' in self.metrics:\n disk = self.__get_disk_counters()\n if disk is not None:\n dru = int((disk.read_bytes - self._disk_counters.read_bytes) / float(interval))\n dwu = int((disk.write_bytes - self._disk_counters.write_bytes) / float(interval))\n self._disk_counters = disk\n else:\n dru = 0.0\n dwu = 0.0\n\n if 'disk-read' in self.metrics:\n result['disk-read'] = dru\n if 'disk-write' in self.metrics:\n result['disk-write'] = dwu\n\n return result", "def _flow_stats_reply_handler(self, ev):\n body = ev.msg.body\n dpid = ev.msg.datapath.id\n self.stats['flow'][dpid] = body\n self.flow_stats.setdefault(dpid, {})\n self.flow_speed.setdefault(dpid, {})\n for stat in sorted([flow for flow in body if flow.priority == 1],\n key=lambda flow: (flow.match.get('in_port'),\n flow.match.get('ipv4_dst'))):\n # print(stat)\n key = (stat.match['in_port'], stat.match.get('ipv4_dst'),\n stat.instructions[0].actions[-1].port)\n value = (stat.packet_count, stat.byte_count,\n stat.duration_sec, stat.duration_nsec)\n self._save_stats(self.flow_stats[dpid], key, value, 5)\n self.link_loss[dpid][(stat.match.get('ipv4_src'),stat.match.get('ipv4_dst'))] = stat.packet_count\n # Get flow's speed.\n pre = 0\n period = setting.MONITOR_PERIOD\n tmp = self.flow_stats[dpid][key]\n if len(tmp) > 1:\n pre = tmp[-2][1]\n period = self._get_period(tmp[-1][2], tmp[-1][3],\n tmp[-2][2], tmp[-2][3])\n\n speed = self._get_speed(self.flow_stats[dpid][key][-1][1],\n pre, period)\n\n self._save_stats(self.flow_speed[dpid], key, speed, 5)", "def file_stat(self, file_path):", "def samtools_stats(filename):\n stats, err = Popen([\"samtools\",\"stats\",filename], stdout=PIPE, stderr=PIPE).communicate()\n if err != \"\":\n raise Exception(err)\n stats = [x.split(\"\\t\") for x in stats.split(\"\\n\")]\n chksum = [x for x in stats if x[0].startswith(\"CHK\")][0]\n stats = dict([(x[1].replace(\":\",\"\"),set_type(x[2]),) for x in stats if x[0].startswith(\"SN\")])\n stats[\"filename\"] = filename\n stats[\"chksum_read_names\"] = chksum[1]\n stats[\"chksum_sequences\"] = chksum[2]\n stats[\"chksum_qualities\"] = chksum[3]\n return stats", "def add_stats(self):\n units = self.get_unit_map()\n for metric in self.raw_metrics:\n unit, metric_type = units.get(metric, (DEFAULT_UNIT, DEFAULT_TYPE))\n if metric_type == \"counter\":\n # Unit/Second\n unit = \"/\".join((unit, \"Second\"))\n self.add_derive_value(metric, unit, self.raw_metrics[metric], rate=True)\n else:\n self.add_gauge_value(metric, unit, self.raw_metrics[metric])", "def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n used_space = total_space - free_space\n gb_used = used_space / 1024 / 1024 / 1024\n\n # log!(format!(\"Collecting metric gb-used {}\", gb_used), Info)\n add_metric(\"gb-used\", \"{}\".format(gb_used))", "def stats(self):\n pass", "def read_metrics(self):\n raise NotImplementedError()", "def collect(self):\n self.status['serial'] = self.config.get('dlmconfig', 'serial')\n self.status['timestamp'] = time.strftime('%Y/%m/%d %H:%M:%S', time.localtime())\n self.status['uptime'] = system.stats.uptime()\n self.status['free_disk_space_sdcard'] = system.stats.disk_usage('root')\n self.status['free_disk_space_stick'] = system.stats.disk_usage('sda1')\n self.status['wwan_reception'] = system.interfaces.WwanInterface.signal_strength(self.config.get('network', 'iface'))", "def read_collector_config(cfg_file):\n hpifreqs = []\n linefreq = None\n if op.isfile(cfg_file):\n with open(cfg_file, 'r') as f:\n flines = f.read().splitlines()\n for line in flines:\n lit = line.split()\n if len(lit) > 1:\n if lit[0].find('hpiFreq') == 0:\n hpifreqs.append(float(lit[1]))\n elif lit[0].find('lineFreq') == 0:\n linefreq = float(lit[1])\n return linefreq, hpifreqs", "def read():\n for host in _hosts:\n remaining = ssl_valid_time_remaining(host)\n remaining = remaining.total_seconds()\n remaining = int(remaining)\n\n collectd.info(\n 'tls-cert-monitor(host=%s): Reading data (data=%d)' %\n (host, remaining))\n\n val = collectd.Values(type='gauge', type_instance=host)\n\n val.plugin = 'tls-cert-monitor'\n val.dispatch(values=[remaining])", "def report_meta_metrics(stat_path):\n collectd_stats = get_self_stats(stat_path)\n backend_stats = read_vsys_data('backend_stats', _VSYS_FRONTEND_VERSION)\n submit_meta('collectd', collectd_stats)\n submit_meta('backend', backend_stats)", "def update_link_statistics(self):\n if (self.track):\n key = self.id + \":\" + self.source + \"->\" + self.destination + \":\" \\\n + globals.BUFFEROCCUPANCY\n globals.statistics[key][globals.systime] = self.buffersize", "def readStatFile(filePath):\n f = open(filePath, 'r')\n\n allStats = {}\n overviewStats = {}\n category = ''\n folder = ''\n method = ''\n\n for line in f:\n # Check if the line contains the 'cm' character and thus provides information of the specific folder\n if 'cm' in line:\n words = line.split()\n\n for word in words:\n if '/' in word:\n # All processed folder has either a /MoG or /SubSENSE folder. Exploit this to get the filename\n category = os.path.basename(os.path.dirname(os.path.dirname(os.path.normpath(filePath))))\n folder = os.path.basename(os.path.dirname(os.path.normpath(filePath)))\n method = word\n\n # Get the raw FP, TN, etc. count\n folderNumbers = {'TP': words[4], 'FP': words[5], 'FN': words[6], 'TN': words[7],\n 'ErrorShadow': words[8]}\n overviewStats[method] = folderNumbers\n\n\n # CHeck if line is not empty, does not contain certain characters, and that the folder has been found\n if '#' not in line and 'cm' not in line and line and folder and '\\n' != line and method:\n measures = line.split()\n\n isRealMeasure = True\n\n for measure in measures:\n if not RepresentsFloat(measure):\n isRealMeasure = False\n break\n\n\n if len(measures) == 7 and isRealMeasure:\n folderStats = {'recall': measures[0], 'specificity': measures[1], 'FPR': measures[2], 'FNR': measures[3], \n 'PBC': measures[4], 'precision': measures[5], 'f-measure': measures[6]}\n allStats[method] = folderStats\n\n method = ''\n\n return allStats, overviewStats", "def _parse(self):\n with open(self._path, 'r') as file:\n try:\n line = file.readline()\n while line:\n if SENDING in line:\n self._req_set.add(self._get_request(line, True))\n line = file.readline()\n except Exception as err:\n print(\"Failed to read garbage collector log. Log was not a complete test log.\\n\"\n f\"{err!s}\")\n raise TestFailedException", "async def skribbl_get_stats(self) -> int:\r\n return await self.read(self._skribbl_get_stats)", "def doCollectTask(filename, topK):\n f = open(filename)\n dataDict = json.load(f)\n weirdInCollect = Counter()\n for key in dataDict:\n if dataDict[key][\"weird\"]:\n # Check direction first to get the inner server.\n srcIP = dataDict[key][\"addr\"][0]\n dstIP = dataDict[key][\"addr\"][2]\n for _ in dataDict[key][\"weird\"]:\n if srcIP.startswith(\"136.159.\"):\n # Which means srcIP is within our campus. it should be an outbound traffic\n weirdInCollect[getIPCluster(srcIP)] += 1\n else:\n weirdInCollect[getIPCluster(dstIP)] += 1\n\n return Counter(dict(weirdInCollect.most_common(topK)))", "def getServerStats():\n return _xmlUrlToDict(serverString + \"/rest/stats\", int)", "def doCollectTask(filename, topK):\n f = open(filename)\n dataDict = json.load(f)\n weirdOutCollect = Counter()\n for key in dataDict:\n if dataDict[key][\"weird\"]:\n # Check direction first to get the inner server.\n srcIP = dataDict[key][\"addr\"][0]\n dstIP = dataDict[key][\"addr\"][2]\n if srcIP.startswith(\"136.159.\"):\n # Which means srcIP is within our campus. it should be an outbound traffic\n weirdOutCollect[getIPCluster(dstIP)] += 1\n else:\n weirdOutCollect[getIPCluster(srcIP)] += 1\n\n return Counter(dict(weirdOutCollect.most_common(topK)))", "def collectStat(self, thread):\n\t\t# update average page load time\n\t\tif self.updated_count == 0:\n\t\t\tself.average_time = thread.load_time\n\t\telse:\n\t\t\tself.average_time = (self.average_time * self.updated_count + thread.load_time) / (self.updated_count + 1)\n\t\t# update stitistics by HTTP code\n\t\tif thread.code not in self.code_statistics:\n\t\t\tself.code_statistics[thread.code] = 1 \n\t\telse:\n\t\t\tself.code_statistics[thread.code] += 1\n\t\t# update count of processed pages\n\t\tself.updated_count += 1", "def get_stats(self):\n return {\n \"pings_sent\" : self.ping_count,\n \"measurements\" : self.measurements,\n }", "def main():\n SOCKET = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n SOCKET.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n SOCKET.bind(settings.STASH_ADDRESS)\n SOCKET.listen(1)\n\n while 1:\n connection, address = SOCKET.accept()\n command, key, value = pickle.loads(connection.recv(4096).decode())\n\n if command == 'STATS':\n response = handle_stats()\n elif command in ('GET', 'INCREMENT', 'DELETE'):\n response = COMMAND_HANDLERS[command](key)\n elif command in ('PUT', 'APPEND'):\n response = COMMAND_HANDLERS[command](key, value)\n else:\n response = (False, 'Unknown command type [{}]'.format(command))\n\n update_stats(command, response[0])\n connection.sendall(pickle.dumps(response))\n connection.close()\n\n SOCKET.shutdown(socket.SHUT_RDWR)\n SOCKET.close()", "def get_user_statistics(self, jid):\n self.data[jid] = {}\n\n iq = self.xmpp.plugin['xep_0050'].send_command(\n config['domain'],\n USER_STATS)\n sessionid = iq['command']['sessionid']\n\n form = self.xmpp.plugin['xep_0004'].make_form(ftype='submit')\n field = form.add_field(\n ftype='hidden',\n type='hidden',\n var='FORM_TYPE',\n value=ADMIN)\n field['type'] = 'hidden'\n form.add_field(var='accountjid', value=jid)\n\n result = self.xmpp.plugin['xep_0050'].send_command(\n config['domain'],\n USER_STATS,\n sessionid=sessionid,\n payload=form)\n fields = result['command']['form']['fields']\n\n for field in fields.values():\n if field['type'] != 'hidden':\n if field['var'] == 'onlineresources':\n value = field['value'].split('\\n')\n elif field['var'] == 'ipaddresses':\n value = []\n for ip in field['value'].split('\\n'):\n lookup = ip_lookup(ip)\n if not lookup:\n lookup = 'Unknown'\n value.append((ip, lookup))\n else:\n value = field['value']\n self.data[jid][field['var']] = value", "def get_total_and_retrans_frames(pcap_filepath, connections):\n # First init values to avoid strange errors if connection is empty\n for conn_id, conn in connections.iteritems():\n for direction in co.DIRECTIONS:\n connections[conn_id].flow.attr[direction][co.FRAMES_TOTAL] = 0\n connections[conn_id].flow.attr[direction][co.BYTES_FRAMES_TOTAL] = 0\n connections[conn_id].flow.attr[direction][co.FRAMES_RETRANS] = 0\n connections[conn_id].flow.attr[direction][co.BYTES_FRAMES_RETRANS] = 0\n\n stats_filename = os.path.basename(pcap_filepath)[:-5] + \"_tshark_total\"\n stats_file = open(stats_filename, 'w')\n co.tshark_stats(None, pcap_filepath, print_out=stats_file)\n stats_file.close()\n\n stats_file = open(stats_filename)\n data = stats_file.readlines()\n stats_file.close()\n for line in data:\n split_line = \" \".join(line.split()).split(\" \")\n if len(split_line) == 11:\n # Manage case with ipv6\n ip_src, port_src = get_ip_port_tshark(split_line[0])\n ip_dst, port_dst = get_ip_port_tshark(split_line[2])\n for conn_id, conn in connections.iteritems():\n if conn.flow.attr[co.SADDR] == ip_src and conn.flow.attr[co.SPORT] == port_src and \\\n conn.flow.attr[co.DADDR] == ip_dst and conn.flow.attr[co.DPORT]:\n connections[conn_id].flow.attr[co.S2C][co.FRAMES_TOTAL] = int(split_line[3])\n connections[conn_id].flow.attr[co.S2C][co.BYTES_FRAMES_TOTAL] = int(split_line[4])\n connections[conn_id].flow.attr[co.C2S][co.FRAMES_TOTAL] = int(split_line[5])\n connections[conn_id].flow.attr[co.C2S][co.BYTES_FRAMES_TOTAL] = int(split_line[6])\n break\n\n stats_file.close()\n os.remove(stats_filename)\n\n stats_filename = os.path.basename(pcap_filepath)[:-5] + \"_tshark_retrans\"\n stats_file = open(stats_filename, 'w')\n co.tshark_stats('tcp.analysis.retransmission', pcap_filepath, print_out=stats_file)\n stats_file.close()\n\n stats_file = open(stats_filename)\n data = stats_file.readlines()\n stats_file.close()\n for line in data:\n split_line = \" \".join(line.split()).split(\" \")\n if len(split_line) == 11:\n ip_src, port_src = get_ip_port_tshark(split_line[0])\n ip_dst, port_dst = get_ip_port_tshark(split_line[2])\n for conn_id, conn in connections.iteritems():\n if conn.flow.attr[co.SADDR] == ip_src and conn.flow.attr[co.SPORT] == port_src and \\\n conn.flow.attr[co.DADDR] == ip_dst and conn.flow.attr[co.DPORT]:\n connections[conn_id].flow.attr[co.S2C][co.FRAMES_RETRANS] = int(split_line[3])\n connections[conn_id].flow.attr[co.S2C][co.BYTES_FRAMES_RETRANS] = int(split_line[4])\n connections[conn_id].flow.attr[co.C2S][co.FRAMES_RETRANS] = int(split_line[5])\n connections[conn_id].flow.attr[co.C2S][co.BYTES_FRAMES_RETRANS] = int(split_line[6])\n break\n\n stats_file.close()\n os.remove(stats_filename)", "def stats(self):", "def collect_stats(self, cursor):\n metrics = self.config.get('metrics', DEFAULT_METRICS)\n if isinstance(metrics, str):\n if metrics == \"all\":\n # puffer_pool_status is only for 5.5, so we ignore that by default\n metrics = CATEGORIES.keys()\n metrics.remove('buffer_pool_stats')\n else:\n # support comma-separated list\n metrics = re.split(\"\\s*,\\s*\", metrics)\n\n self.logger.debug(\"metrics to collect: %s\" % \", \".join(metrics))\n for cat in metrics:\n if cat in CATEGORIES:\n self.add_category_stats(cat, cursor)\n else:\n self.logger.warning(\"%s is not a valid metric category\" % cat)\n\n if 'newrelic' in metrics:\n self.derive_newrelic_stats()", "def parse_stats(logfile):\n\n stats = open(logfile)\n\n block_marker = re.compile(r'^# stats: dump at (\\d+)')\n\n stats_marker = re.compile(r'^stats: (\\w+) (.+)$')\n\n start_time = None\n block_time = None\n\n data = {}\n\n for ln in stats:\n # Construct a more compact time-series\n mark = block_marker.match(ln)\n if mark:\n if not start_time:\n start_time = int(mark.groups()[0])\n\n block_time = int(mark.groups()[0]) - start_time\n\n elif stats_marker.match(ln):\n stats = stats_marker.match(ln).groups()\n stat_name = stats[0]\n stat_data = []\n\n for num in stats[1].split():\n if '.' in num:\n stat_data.append(float(num))\n else:\n stat_data.append(int(num))\n \n if not stat_name in data:\n data[stat_name] = []\n data[stat_name].append((block_time, stat_data))\n\n json_data = {\n 'start_time': start_time, \n 'data': data\n }\n\n return json.dumps(json_data)", "def __call__(self, subserver, stats, sock, ip, port):\n try:\n while True:\n data = sock.recv(self.chunk_read)\n if not data:\n break\n stats.set('%s.byte_count' % self.name,\n stats.get('%s.byte_count' % self.name) + len(data))\n while data:\n i = sock.send(data)\n data = data[i:]\n finally:\n subserver.logger.notice('served request from %s:%s' % (ip, port))\n sock.close()", "def _parse(self):\n socket_paths = []\n cert_paths = []\n for conf_file in self.conf_files:\n # Get relevant lines from all config files.\n relevant_lines = self._parse_relevant_lines(conf_file)\n # Parse all sockets from the relevant lines.\n socket_paths.append(\n self._parse_haproxy_sockets(relevant_lines['stats'])\n )\n # Find out if a crt-base is set. `crt` directives depend on that\n # value so we need to find it first. We assume crt-base can only be\n # set once.\n cert_base = self._parse_haproxy_cert_base(\n relevant_lines['crt-base']\n )\n cert_paths.append(\n self._parse_haproxy_cert_paths(\n relevant_lines['crt'],\n cert_base\n )\n )\n return (cert_paths, socket_paths)", "def get_proc_stats(proc):\n file_size = os.path.getsize(proc['filename'])\n return {\n 'file_size': file_size,\n 'formatted_file_size': size(file_size),\n 'started_at': time.strftime(\n \"%H:%M\", time.localtime(proc['time'])),\n 'recording_time': str(\n timedelta(seconds=int(time.time()) - proc['time']))\n }", "def load_stats():\n assert isinstance(settings.PARS['numBases'], int)\n assert isinstance(settings.PARS['dataset'], str)\n\n stat_filename = 'stat_{}_{}.json'.format(\n settings.PARS['numBases'], settings.PARS['dataset'])\n stat_full_path = os.path.join(settings.GENERATED_DATA_DIRECTORY, stat_filename)\n\n with open(stat_full_path, 'r') as file_:\n fobj_avg = json.load(file_)\n\n fobj_avg = {int(k): v for k, v in fobj_avg.items()}\n\n return fobj_avg", "def parse_metrics_file(self) -> Dict[int, dict]:\n LOG.info(\"Parsing Dragen demultiplexing adapter metrics file %s\", self.adapter_metrics_path)\n parsed_metrics = {}\n\n with self.adapter_metrics_path.open(\"r\") as metrics_file:\n metrics_reader = csv.DictReader(metrics_file)\n for row in metrics_reader:\n lane = int(row[\"Lane\"])\n read_number = row[\"ReadNumber\"]\n sample_id = row[\"Sample_ID\"]\n parsed_metrics[lane] = parsed_metrics.get(lane, {})\n parsed_metrics[lane][(read_number, sample_id)] = row\n\n return self.summerize_adapter_metrics(parsed_metrics=parsed_metrics)", "def _analyzeFile(self, filename):\n date = os.path.basename(filename)[:10]\n if filename.endswith('gz'):\n f = gzip.open(filename)\n else:\n f = open(filename)\n lines = f.read().splitlines()\n for line in lines:\n if re.search('joined the game', line):\n self._analyzeLine(line, date, self._start_times)\n elif re.search('left the game', line) or re.search('lost connection',\n line):\n self._analyzeLine(line, date, self._end_times)\n elif re.search('Stopping server', line):\n self._server_stop_times.append(ConvertTime(date, line))", "def calculate_thresholds(self):\n try:\n f = open('resources/server_stats.txt', 'r')\n except IOError:\n print(\"server_stats.txt does not exist please run ddosw_baseline\")\n sys.exit()\n\n # extract the value rom server_stats.txt\n stats = f.readlines()\n f.close()\n raw_stats = list()\n for line in stats:\n stats = line.split()\n raw_stats.append(stats[2])\n\n thresholds = dict()\n\n # set the orange threshold at 50% higher than the previously recorded maximum cpu value\n # set the red threshold at 75% higher than the previously recorded maximum cpu value\n if float(raw_stats[1]) < 57:\n thresholds['orange_cpu_threshold'] = float(raw_stats[1]) * 1.5\n thresholds['red_cpu_threshold'] = float(raw_stats[1]) * 1.75\n else: # ensure the threshold cannot go above 100%\n thresholds['orange_cpu_threshold'] = 85\n thresholds['red_cpu_threshold'] = 95\n\n return thresholds", "def main():\n # Load and parse json object from file with specific\n file_name = \"./benchmark.log\"\n doc = re.sub(\"[\\n|\\t]\", \"\", \"\".join(benchmark.read_text_file(file_name)))\n json_object = json.loads(\"\".join(doc))\n\n intervals = json_object[\"intervals\"]\n\n socket_keys = benchmark.get_socket_keys(intervals)\n\n result = benchmark.get_result_dictionary(intervals, socket_keys)\n\n print_to_csv(result, socket_keys)", "def stats_conf(cls, name, parsed_conf):\n return [('%s.byte_count' % name, 'sum')]", "def stats(self, file, **options):\n\n options['file'] = file\n\n return self._get('stats', **options)", "def usagestats_parse(dirpath):\r\n # Create database\r\n # TODO: change to an easier format, probably json.\r\n db, cursor = create_table()\r\n\r\n # Some vars for logging\r\n processed = 0\r\n err = 0\r\n\r\n # Iterate through the /usagestats/ directory and fetch all files\r\n for root, dirnames, filenames in os.walk(dirpath, topdown=True, onerror=None, followlinks=False):\r\n if 'daily' in root or 'weekly' in root or 'monthly' in root or 'yearly' in root:\r\n # Retrieve the folder name to save what the frequency of the usagestats were:\r\n frequency = root.split('/')[-1]\r\n for filename in filenames:\r\n # Check if filename is only numbers (which is an epoch time representation)\r\n if filename.isnumeric():\r\n try:\r\n tree = ET.parse(os.path.join(root, filename))\r\n except ET.ParseError:\r\n parse_file_with_protobuf(os.path.join(root, filename), db)\r\n continue\r\n\r\n # We have sucessfully parsed the usagestats xml.\r\n # So continue processing\r\n tree_root = tree.getroot()\r\n\r\n for elem in tree_root:\r\n parse_sub_elements(frequency, elem, filename, db)\r\n\r\n # query for reporting\r\n cursor.execute('''\r\n select \r\n usage_type,\r\n datetime(lastime/1000, 'UNIXEPOCH', 'localtime') as lasttimeactive,\r\n timeactive as time_Active_in_msecs,\r\n timeactive/1000 as timeactive_in_secs,\r\n case last_time_service_used WHEN '' THEN ''\r\n ELSE datetime(last_time_service_used/1000, 'UNIXEPOCH', 'localtime')\r\n end last_time_service_used,\r\n case last_time_visible WHEN '' THEN ''\r\n ELSE datetime(last_time_visible/1000, 'UNIXEPOCH', 'localtime') \r\n end last_time_visible,\r\n total_time_visible,\r\n app_launch_count,\r\n package,\r\n CASE types\r\n WHEN '1' THEN 'MOVE_TO_FOREGROUND'\r\n WHEN '2' THEN 'MOVE_TO_BACKGROUND'\r\n WHEN '5' THEN 'CONFIGURATION_CHANGE'\r\n WHEN '7' THEN 'USER_INTERACTION'\r\n WHEN '8' THEN 'SHORTCUT_INVOCATION'\r\n ELSE types\r\n END types,\r\n classs,\r\n source,\r\n fullatt\r\n from data\r\n order by lasttimeactive DESC\r\n ''')\r\n all_rows = cursor.fetchall()\r\n\r\n # HTML report section\r\n h = open('./Report.html', 'w')\r\n h.write('<html><body>')\r\n h.write('<h2>Android Usagestats report (Dates are localtime!)</h2>')\r\n h.write('<style> table, th, td {border: 1px solid black; border-collapse: collapse;}</style>')\r\n h.write('<br />')\r\n\r\n # HTML headers\r\n h.write('<table>')\r\n h.write('<tr>')\r\n h.write('<th>Usage Type</th>')\r\n h.write('<th>Last Time Active</th>')\r\n h.write('<th>Time Active in Msecs</th>')\r\n h.write('<th>Time Active in Secs</th>')\r\n h.write('<th>Last Time Service Used</th>')\r\n h.write('<th>Last Time Visible</th>')\r\n h.write('<th>Total Time Visible</th>')\r\n h.write('<th>App Launch Count</th>')\r\n h.write('<th>Package</th>')\r\n h.write('<th>Types</th>')\r\n h.write('<th>Class</th>')\r\n h.write('<th>Source</th>')\r\n h.write('</tr>')\r\n\r\n for row in all_rows:\r\n usage_type = row[0]\r\n lasttimeactive = row[1]\r\n time_Active_in_msecs = row[2]\r\n timeactive_in_secs = row[3]\r\n last_time_service_used = row[4]\r\n last_time_visible = row[5]\r\n total_time_visible = row[6]\r\n app_launch_count = row[7]\r\n package = row[8]\r\n types = row[9]\r\n classs = row[10]\r\n source = row[11]\r\n\r\n processed = processed + 1\r\n # report data\r\n h.write('<tr>')\r\n h.write('<td>' + str(usage_type) + '</td>')\r\n h.write('<td>' + str(lasttimeactive) + '</td>')\r\n h.write('<td>' + str(time_Active_in_msecs) + '</td>')\r\n h.write('<td>' + str(timeactive_in_secs) + '</td>')\r\n h.write('<td>' + str(last_time_service_used) + '</td>')\r\n h.write('<td>' + str(last_time_visible) + '</td>')\r\n h.write('<td>' + str(total_time_visible) + '</td>')\r\n h.write('<td>' + str(app_launch_count) + '</td>')\r\n h.write('<td>' + str(package) + '</td>')\r\n h.write('<td>' + str(types) + '</td>')\r\n h.write('<td>' + str(classs) + '</td>')\r\n h.write('<td>' + str(source) + '</td>')\r\n h.write('</tr>')\r\n\r\n # HTML footer\r\n h.write('<table>')\r\n h.write('<br />')\r\n\r\n print('')\r\n print('Records processed: ' + str(processed))\r\n print('Triage report completed. See Reports.html.')", "def stat_file(self, path, info):\n return {}", "def _port_stats_reply_handler(self, ev):\n body = ev.msg.body\n dpid = ev.msg.datapath.id\n self.stats['port'][dpid] = body\n self.free_bandwidth.setdefault(dpid, {})\n\n for stat in sorted(body, key=attrgetter('port_no')):\n # self.link_loss[dpid][stat.port_no] = [stat.rx_packets,stat.tx_packets]\n port_no = stat.port_no\n if port_no != ofproto_v1_3.OFPP_LOCAL:\n key = (dpid, port_no)\n value = (stat.tx_bytes, stat.rx_bytes, stat.rx_errors,\n stat.duration_sec, stat.duration_nsec)\n\n self._save_stats(self.port_stats, key, value, 5)\n\n # Get port speed.\n pre = 0\n period = setting.MONITOR_PERIOD\n tmp = self.port_stats[key]\n if len(tmp) > 1:\n pre = tmp[-2][0] + tmp[-2][1]\n period = self._get_period(tmp[-1][3], tmp[-1][4],\n tmp[-2][3], tmp[-2][4])\n\n speed = self._get_speed(\n self.port_stats[key][-1][0] + self.port_stats[key][-1][1],\n pre, period)\n\n self._save_stats(self.port_speed, key, speed, 5)\n self._save_freebandwidth(dpid, port_no, speed)", "def _summary(in_file):\n data = Counter()\n out_file = in_file + \"_size_stats\"\n if file_exists(out_file):\n return out_file\n with open(in_file) as in_handle:\n for line in in_handle:\n counts = int(line.strip().split(\"_x\")[1])\n line = in_handle.next()\n l = len(line.strip())\n in_handle.next()\n in_handle.next()\n data[l] += counts\n with file_transaction(out_file) as tx_out_file:\n with open(tx_out_file, 'w') as out_handle:\n for l, c in data.items():\n out_handle.write(\"%s %s\\n\" % (l, c))\n return out_file", "def compute_stats(self):\n from vmc.common.oal import osobj\n d = osobj.get_iface_stats()\n d.addCallback(self.update_stats)", "def __collect_stats(self, encode, file_name):\n if encode not in self.__hash.keys():\n self.__hash[encode] = []\n self.__hash[encode].append(file_name)\n self.__files_count += 1\n with open(file_name, 'r', encoding=encode) as fr:\n for line in fr:\n self.__lines += 1\n self.__chars += len(line)", "def stats_process():\n nonlocal d_stats, b_status\n log = slog()\n d_stats = self.stats_compute()\n if self.toConsole() or self.args['duf'] or self.args['du']:\n self.dp.qprint(d_stats['report'], level = self.debugLevel)\n slog_filter = filters_show()\n log.title_set('Size statistics')\n if self.args['table3D']: log.render3D()\n log('Total size (raw): %d\\n' % d_stats['totalSize'] )\n log('Total size (friendly): {:,}\\n'.format(d_stats['totalSize']) )\n log('Total size (human): %s\\n' % d_stats['totalSize_human'] )\n log('Total files: %s\\n' % d_stats['files'] )\n log('Total dirs: %s\\n' % d_stats['dirs'] )\n log('Total runtime: %5.3f s' % other.toc() )\n b_status = b_status and d_stats['status']\n return {\n 'status': b_status,\n 'filterLog': slog_filter,\n 'bodyLog': log\n }", "def update_status(self):\n\n # Memory information can be found in status and statm /proc/PID files\n # status file VmRSS equivalent to top's RES column\n # statm disagrees with status VmRSS, I think it may not include\n # sub-processes\n # From: man proc\n # * VmPeak: Peak virtual memory size.\n # * VmSize: Virtual memory size.\n # * VmHWM: Peak resident set size (\"high water mark\").\n # * VmRSS: Resident set size.\n\n # status_fields should be ordered as in the status file\n fields = iter(self.status_fields)\n field = next(fields)\n with open(self.status_path) as f:\n for line in f:\n if line.startswith(field):\n # separated by white-space, 2nd element is value\n # 3rd is units e.g. kB\n # At the moment all fields are ints\n self.status[field] = int(line.split()[1])\n\n try:\n field = next(fields)\n except StopIteration:\n # Just found the last field in status_fields\n break", "def get_statistics():\n logger.info(\"Started request\")\n if os.path.exists(app_config['datastore']['filename']):\n with open(app_config['datastore']['filename']) as f:\n data = json.loads(f.read())\n\n logging.debug(\"Request data: {}\".format(data))\n logging.info(\"Request completed\")\n\n return data, 200\n else:\n logger.error(\"File not found\")\n return 404", "def get(self, *args, **kwargs):\n output = self._base_stats()\n output['connections'] = dict()\n for key in self.application.rabbitmq.keys():\n output['connections'][key] = self.application.rabbitmq[key].stats\n self.write(output)", "def get_stats(ns_profnum, clear=False, **kwargs):\n global SLOCK, STATS\n SLOCK.acquire()\n st = STATS\n if clear:\n STATS['ntotal'] = 0\n STATS['rtotal'] = 0\n STATS['oktotal'] = 0\n STATS['ertotal'] = 0\n STATS['ettotal'] = 0.0\n STATS['ethigh'] = 0.0\n STATS['etlow'] = 0.0\n SLOCK.release()\n #_LOGGER.info('get_stats(): %d %f %d', st['ntotal'], st['ettotal'], st['rtotal'])\n return st", "def update_server_stats(self):\n try:\n aio.run(self.client.execute, 'ANALYZE')\n except Exception:\n pass # swallow; CrateDB 4.1.0+ is required to run ANALYZE", "def statsWorker():\n logger.info('STATS: Starting. Will report out every {0:.1g} hours'.format(\n config.STATS_HOURS))\n while True:\n gevent.sleep(timedelta(hours=config.STATS_HOURS).total_seconds())\n logger.info('STATS: {0}'.format(stats))\n stats.resetStats()\n\n return", "def run_main():\n # Matching lines against a matcher function.\n matched_lines = match_file(file_names, matcher)\n\n # Will contain data sorted by file.\n binned_data = {}\n\n # Looking through the lines that were inserted into the metrics file via the metrics component.\n for key in matched_lines:\n\n # Grabbing matched lines by the file or orgination.\n buffer = matched_lines[key]\n\n # This will contain dictionaries converted from JSON.\n data = []\n\n # Loop through the collection, appending data converted from JSON entries.\n for line in buffer:\n data.append(extract_data(line))\n\n # Sort the data by file.\n binned_data[key] = sort_data(data)\n\n # Output the final results.\n generate_statistics(binned_data)\n return 0", "def server_stats():\n out = subprocess.check_output(cmd_preamble + [\"admin\", \"stats\"])\n return out.decode()", "def get_stats(self):\n # pool.map needs an arg for each function that will be run\n dmx_mean = [self.dmx.mean()] * len(self.genome_paths)\n with ProcessingPool() as pool:\n results = pool.map(genome.mp_stats, self.genome_paths, dmx_mean)\n self.stats = pd.concat(results)\n self.stats.to_csv(self.stats_path)", "def get_host_stats(self):\n status, data, errors, messages = self._make_get_request(CraftyAPIRoutes.SERVER_STATS)\n \n if status == 200:\n return data\n elif status == 500:\n self._check_errors(errors, messages)", "def mymetrics(): \n _update_metric_counters()\n logging.debug(prom_objects_seen.collect())\n return flask.Response(generate_latest(), mimetype='text/plain')", "def send_metrics(self):\n metrics = self.get_metrics()\n if not metrics:\n return\n\n for mkey, metric in metrics.items():\n for mname, mval in metric.items():\n try:\n self.agent.record_custom_metric(self.convert_metric_name(mkey, mname), mval, None)\n except Exception as e:\n print_(e)", "def _base_stats(self):\n usage = resource.getrusage(resource.RUSAGE_SELF)\n return {'host': self.application.host,\n 'port': self.application.port,\n 'requests': self.application.counters,\n 'timestamp': int(time.time()),\n 'block': {'input': usage.ru_inblock,\n 'output': usage.ru_oublock},\n 'context_switches': usage.ru_nvcsw + usage.ru_nivcsw,\n 'cpu_time': {'user': usage.ru_utime,\n 'system': usage.ru_stime},\n 'memory_usage': usage.ru_maxrss,\n 'page_faults': {'minor': usage.ru_minflt,\n 'major': usage.ru_majflt},\n 'page_size': resource.getpagesize(),\n 'signals_received': usage.ru_nsignals,\n 'swap_outs': usage.ru_nswap}", "def MainStats(path, filetype, NrExp, col, start, stop):\n# path= path.split('/') # here is better to google and see what is going on. Or experiment alone\n# path= \"/\".join(path[:-1]) \n dato=ExtractData_raw_files(path, filetype)\n dBase=dato.createDictBase()\n stats = Stats(dBase, NrExp, col, start, stop)\n means, stds=stats.Means_Stds()\n times = stats.time_return()\n return means , stds, times", "def dataStats(self):\n print (\"Performing statistical analysis of the data\")\n # stuff to do", "def stats(self):\n\n res = self.read_block(REG_STATS, 9)\n\n ret = {\n \"completed_cycles\": (res[1] << 8) + (res[0] << 0),\n \"last_boot\": {\n \"retries\": res[2],\n \"duration\": (res[6] << 24) + (res[5] << 16) + (res[4] << 8) + (res[3] << 0)\n },\n \"forced_shutdowns\": (res[8] << 8) + (res[7] << 0)\n }\n\n return ret", "def metrics(self):\n self.metrics = []\n \n self.clients()\n\n if len(self.metrics) > 0:\n return self.metrics\n else:\n return []", "def stat(**kwargs):\n print(\"output stats\")", "def mysql_status(self):\n stamp = int(time.time())\n\n # get data\n conn = self.object.connect()\n result = {}\n try:\n with conn.cursor() as cursor:\n for key in REQUIRED_STATUS_FIELDS:\n cursor.execute('SHOW GLOBAL STATUS LIKE \"%s\";' % key)\n row = cursor.fetchone()\n result[row[0]] = row[1]\n except Exception as e:\n exception_name = e.__class__.__name__\n context.log.debug('failed to collect MySQLd metrics due to %s' % exception_name)\n context.log.debug('additional info:', exc_info=True)\n finally:\n conn.close()\n\n # counters\n counted_vars = {}\n for metric, variable_name in METRICS['counters'].items():\n if variable_name in result:\n counted_vars[metric] = int(result[variable_name])\n\n # compound counter\n counted_vars['mysql.global.writes'] = \\\n counted_vars['mysql.global.insert'] + \\\n counted_vars['mysql.global.update'] + \\\n counted_vars['mysql.global.delete']\n\n self.aggregate_counters(counted_vars, stamp=stamp)\n\n # gauges\n tracked_gauges = {}\n for metric, variable_name in METRICS['gauges'].items():\n if variable_name in result:\n tracked_gauges[metric] = {\n self.object.definition_hash: int(result[variable_name])\n }\n\n # compound gauges\n pool_util = 0\n if ('mysql.global.innodb_buffer_pool_pages_total' in tracked_gauges and\n tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] > 0):\n pool_util = (\n (tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] -\n tracked_gauges['mysql.global.innodb_buffer_pool_pages_free'][self.object.definition_hash]) /\n tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] * 100\n )\n tracked_gauges['mysql.global.innodb_buffer_pool_util'] = {\n self.object.definition_hash: pool_util\n }\n\n hit_ratio = 0\n if ('mysql.global.innodb_buffer_pool_read_requests' in tracked_gauges and\n tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] > 0):\n hit_ratio = (\n (tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] /\n (tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] +\n tracked_gauges['mysql.global.innodb_buffer_pool_reads'][self.object.definition_hash])) * 100\n )\n\n tracked_gauges['mysql.global.innodb_buffer_pool.hit_ratio'] = {\n self.object.definition_hash: hit_ratio\n }\n\n self.aggregate_gauges(tracked_gauges, stamp=stamp)\n\n # finalize\n self.increment_counters()\n self.finalize_gauges()", "async def write_metrics(every: int, to: str):\n while True:\n line = f\"pyvast-threatbus,host={socket.gethostname()} \"\n start_length = len(line)\n for m in metrics:\n if not m.is_set:\n continue\n if type(m) is Gauge or type(m) is InfiniteGauge:\n if len(line) > start_length:\n line += \",\"\n line += f\"{m.name}={m.value}\"\n if type(m) is Summary:\n if len(line) > start_length:\n line += \",\"\n line += (\n f\"{m.name}_min={m.min},{m.name}_max={m.max},{m.name}_avg={m.avg}\"\n )\n m.reset()\n\n if len(line) > start_length:\n # only update the file if there were metrics collected.\n line += f\" {time.time_ns()}\" # append current nanoseconds ts\n with open(to, \"a\") as f:\n f.write(line + \"\\n\")\n await asyncio.sleep(every)", "def parse(filename):\n data = {}\n for line in reversed(list(open(filename))):\n date, time, ip, source = line.strip().split()\n log_time = datetime.datetime.strptime(date +\" \"+time, '%Y-%m-%d %H:%M:%S')\n diff = datetime.datetime.now() - log_time\n if diff.seconds > 600:\n break\n if ip not in data:\n data[ip] = set()\n data[ip].add(source)\n return data" ]
[ "0.6393384", "0.6306739", "0.60562605", "0.6037875", "0.597491", "0.59416795", "0.5933875", "0.5916901", "0.57845867", "0.577276", "0.5749845", "0.571024", "0.5700701", "0.56980515", "0.5637241", "0.5629849", "0.56119055", "0.55269897", "0.5518267", "0.55144465", "0.5510193", "0.5505477", "0.54930115", "0.5482835", "0.5477913", "0.54672444", "0.5464341", "0.54504126", "0.543767", "0.542593", "0.54002225", "0.5395835", "0.5394207", "0.53784513", "0.53719324", "0.5371375", "0.537043", "0.5341618", "0.5331093", "0.53308815", "0.53250813", "0.53141654", "0.5311003", "0.5300046", "0.5280523", "0.5268669", "0.5251246", "0.52273476", "0.5223584", "0.5220291", "0.52139705", "0.52105844", "0.5201081", "0.5199306", "0.51947874", "0.51918614", "0.51900744", "0.51895905", "0.51826435", "0.51806414", "0.5174816", "0.51676214", "0.5164008", "0.5159872", "0.515345", "0.51490736", "0.514752", "0.5145388", "0.51445365", "0.5144135", "0.5141903", "0.5141577", "0.5137326", "0.5133575", "0.51309574", "0.51301134", "0.51293266", "0.5114674", "0.5113953", "0.51122224", "0.5104178", "0.51036096", "0.51028454", "0.5082293", "0.50821495", "0.5077082", "0.5077037", "0.50683415", "0.5067934", "0.50578266", "0.5056356", "0.5051237", "0.50472265", "0.50455093", "0.5043777", "0.5043227", "0.50414515", "0.5040936", "0.50368184", "0.5031512" ]
0.71067053
0
Collects the metrics from the gathers
def collect(self): collector = {} for gather in self.gathers: try: stats = gather.run_single_cycle(collector=collector) if stats: collector.update(stats) except Exception as ex: self._logger.exception( "Exception while collecting metrics for PID: %s of type: %s. Details: %s", self.pid, type(gather), repr(ex), ) return collector
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def collect(self): # pylint: disable=no-self-use\n start = time.time()\n for metric in metric_rq():\n yield metric\n\n gauge = GaugeMetricFamily(\n \"nautobot_rq_metrics_processing_ms\", \"Time in ms to generate the app metrics endpoint\"\n )\n duration = time.time() - start\n gauge.add_metric([], format(duration * 1000, \".5f\"))\n yield gauge", "def collect(self): # pylint: disable=no-self-use\n start = time.time()\n\n if \"jobs\" in PLUGIN_SETTINGS and PLUGIN_SETTINGS[\"jobs\"]:\n for metric in metric_jobs():\n yield metric\n\n if \"models\" in PLUGIN_SETTINGS:\n for metric in metric_models(PLUGIN_SETTINGS[\"models\"]):\n yield metric\n\n # --------------------------------------------------------------\n # Extras Function defined in configuration.py or the Regristry\n # # --------------------------------------------------------------\n if \"extras\" in PLUGIN_SETTINGS:\n for metric in collect_extras_metric(PLUGIN_SETTINGS[\"extras\"]):\n yield metric\n\n for metric in collect_extras_metric(__REGISTRY__):\n yield metric\n\n gauge = GaugeMetricFamily(\n \"nautobot_app_metrics_processing_ms\", \"Time in ms to generate the app metrics endpoint\"\n )\n duration = time.time() - start\n gauge.add_metric([], format(duration * 1000, \".5f\"))\n yield gauge", "def compute_metrics(self):\n pass", "def collect_stats(self, cursor):\n metrics = self.config.get('metrics', DEFAULT_METRICS)\n if isinstance(metrics, str):\n if metrics == \"all\":\n # puffer_pool_status is only for 5.5, so we ignore that by default\n metrics = CATEGORIES.keys()\n metrics.remove('buffer_pool_stats')\n else:\n # support comma-separated list\n metrics = re.split(\"\\s*,\\s*\", metrics)\n\n self.logger.debug(\"metrics to collect: %s\" % \", \".join(metrics))\n for cat in metrics:\n if cat in CATEGORIES:\n self.add_category_stats(cat, cursor)\n else:\n self.logger.warning(\"%s is not a valid metric category\" % cat)\n\n if 'newrelic' in metrics:\n self.derive_newrelic_stats()", "def gather_sample(self):\n\n for _pid in self._select_processes():\n if not self.__trackers.get(_pid):\n self.__trackers[_pid] = ProcessTracker(_pid, self._logger, self.__id)\n\n self._reset_absolute_metrics()\n\n for _tracker in self.__trackers.values():\n _metrics = _tracker.collect()\n self.record_metrics(_tracker.pid, _metrics)\n\n self._calculate_aggregated_metrics()\n self._remove_dead_processes()\n\n self.print_metrics()", "def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n used_space = total_space - free_space\n gb_used = used_space / 1024 / 1024 / 1024\n\n # log!(format!(\"Collecting metric gb-used {}\", gb_used), Info)\n add_metric(\"gb-used\", \"{}\".format(gb_used))", "def metrics(self):\n self.metrics = []\n \n self.clients()\n\n if len(self.metrics) > 0:\n return self.metrics\n else:\n return []", "def collect(self):\n if not self._btime:\n return\n\n try:\n with open(\"/proc/self/stat\", 'rb') as stat:\n parts = (stat.read().split(b')')[-1].split())\n\n self.process_metrics[\"vmem\"].set(\"\", float(parts[20]))\n self.process_metrics[\"rss\"].set(\"\", float(parts[21]) * _PAGESIZE)\n start_time_secs = float(parts[19]) / self._ticks\n self.process_metrics[\"start_time\"].set(\n \"\", start_time_secs + self._btime\n )\n utime = float(parts[11]) / self._ticks\n stime = float(parts[12]) / self._ticks\n self.process_metrics[\"cpu\"].set(\"\", utime + stime)\n except IOError:\n pass\n\n try:\n with open(\"/proc/self/limits\", 'rb') as limits:\n for line in limits:\n if line.startswith(b'Max open file'):\n self.process_metrics[\"max_fds\"].set(\n \"\", float(line.split()[3])\n )\n break\n\n self.process_metrics[\"open_fds\"].set(\n \"\", float(len(os.listdir(\"/proc/self/fd\")))\n )\n except (IOError, OSError):\n pass\n\n # Update gc metrics if enabled.\n if \"collected\" in self.process_metrics:\n for generation, stat in enumerate(gc.get_stats()):\n generation = {\"generation\": str(generation)}\n self.process_metrics[\"collected\"].set(\n generation, stat[\"collected\"]\n )\n self.process_metrics[\"uncollectable\"].set(\n generation, stat[\"uncollectable\"]\n )\n self.process_metrics[\"collections\"].set(\n generation, stat[\"collections\"]\n )", "def mymetrics(): \n _update_metric_counters()\n logging.debug(prom_objects_seen.collect())\n return flask.Response(generate_latest(), mimetype='text/plain')", "def collect(self) -> Metric:\n ret = self.source()\n if ret is None:\n LOGGER.warning('Statistics are not available')\n return\n gauge = GaugeMetricFamily('wemo_device_state', 'Status of Wemo device', labels=['address', 'parameter'])\n gauge.add_metric([ret.address, 'today_kwh'], ret.today_kwh, timestamp=ret.collection_time.timestamp())\n gauge.add_metric([ret.address, 'current_power_mW'], ret.current_power,\n timestamp=ret.collection_time.timestamp())\n gauge.add_metric([ret.address, 'today_on_time'], ret.today_on_time, timestamp=ret.collection_time.timestamp())\n gauge.add_metric([ret.address, 'on_for'], ret.on_for, timestamp=ret.collection_time.timestamp())\n gauge.add_metric([ret.address, 'today_standby_time'], ret.today_standby_time,\n timestamp=ret.collection_time.timestamp())\n\n yield gauge\n\n counter = CounterMetricFamily('wemo_power_usage', 'Today power consumption', labels=['address'])\n counter.add_metric([ret.address], ret.today_kwh, timestamp=ret.collection_time.timestamp())\n yield counter", "def metrics_group():", "def fetch(self):\n\n\n # Update Prometheus metrics with application metrics\n self.current_requests.set(get_current_requests())\n self.total_uptime.set(get_uptime())\n self.health.state(get_health())", "def collect_stat(self):\n\n cnstat_dict, ratestat_dict = self.get_cnstat()\n self.cnstat_dict.update(cnstat_dict)\n self.ratestat_dict.update(ratestat_dict)", "def _dispatch_metrics(self, payload):\n for item in payload:\n try:\n self._ingest.send(gauges=item['gauges'], counters=item['counters'])\n except Exception as e:\n self._logger.error(\"Exception while sending payload to ingest : {0}\".format(e))", "def metrics(self):\n raise NotImplementedError(\"metrics\")", "def compute_metrics(self, results: list) -> dict:", "def run(self):\r\n self.collect_data()", "def collect(self, model):\n if self.model_reporters:\n for var, reporter in self.model_reporters.items():\n self.model_vars[var].append(reporter(model))\n\n if self.agent_reporters:\n for var, reporter in self.agent_reporters.items():\n agent_records = []\n agent_records.append((model.timer.unique_id, reporter(model.timer)))\n self.agent_vars[var].append(agent_records)", "def collect_metrics(application):\n\n try:\n subprocess.check_call(['juju', 'collect-metrics', application])\n except subprocess.CalledProcessError as e:\n raise Exception(\"Unable to collect metrics: {}\".format(e))", "def _get_metrics_to_collect(self, instance_key, additional_metrics):\n if instance_key not in self.metrics_to_collect_by_instance:\n self.metrics_to_collect_by_instance[instance_key] = \\\n self._build_metric_list_to_collect(additional_metrics)\n return self.metrics_to_collect_by_instance[instance_key]", "def _collect_all(self):", "def _collect_data(self) -> None:\n self.set_websocket_data()\n self.set_stratum_data()\n self.set_cache_data()\n self.collect_peer_connection_metrics()\n self.set_tx_storage_data()", "def collect(self):\n pass", "def _proc_collect(self) -> None:\n while True:\n self.process_num_threads.set(self._process.num_threads())\n self.process_memory_bytes.set(self._process.memory_info().rss)\n self.process_cpu_percent.set(self._process.cpu_percent())\n\n sleep(self.process_scrape_interval)", "def collectData(self):\n\n self.data.datahash = {} # dict of system data\n\n vmstat_dict = self._getvmstat()\n if vmstat_dict:\n self.data.datahash.update(vmstat_dict)\n\n uptime_dict = self._getuptime()\n if uptime_dict:\n self.data.datahash.update(uptime_dict)\n\n log.log( \"<system>system.collectData(): new system list created\", 7 )", "def add_stats(self):\n units = self.get_unit_map()\n for metric in self.raw_metrics:\n unit, metric_type = units.get(metric, (DEFAULT_UNIT, DEFAULT_TYPE))\n if metric_type == \"counter\":\n # Unit/Second\n unit = \"/\".join((unit, \"Second\"))\n self.add_derive_value(metric, unit, self.raw_metrics[metric], rate=True)\n else:\n self.add_gauge_value(metric, unit, self.raw_metrics[metric])", "def get_stats(self):\n # pool.map needs an arg for each function that will be run\n dmx_mean = [self.dmx.mean()] * len(self.genome_paths)\n with ProcessingPool() as pool:\n results = pool.map(genome.mp_stats, self.genome_paths, dmx_mean)\n self.stats = pd.concat(results)\n self.stats.to_csv(self.stats_path)", "def _process(self):\n export_collect_data(self.kwargs[\"collect\"])", "def set_metrics(self):", "def _start_proc_collector(self) -> None:\n thread = threading.Thread(target=self._proc_collect, name=\"ProcessMetricsCollector\", daemon=True)\n thread.start()", "def collect():\n\n stats = {}\n for feed in Feed.objects:\n try:\n logger.info('Fetching from {0}...'.format(feed.ext_url))\n new_articles = fetch(feed)\n stats[feed.ext_url] = len(new_articles)\n\n except SAXException as e:\n if feed.errors is None:\n feed.errors = 0\n\n # Error with the feed, make a note.\n logger.info('Error fetching from {0}.'.format(feed.ext_url))\n feed.errors += 1\n feed.save()\n pretty_stats = json.dumps(stats, sort_keys=True, indent=4)\n notify('Corpora collection complete.', 'Total article count: {0}\\n\\nResults for this pass:\\n{1}'.format(len(Article.objects), pretty_stats))", "def compute_statistics(self):", "def metrics(_):\r\n collector = BuildsCollector()\r\n build_metrics, headers = collector.get_metrics_table()\r\n print(tabulate(build_metrics, headers=headers))", "def collect_stats(self):\n\n df_avg, self.transport_df, self.customer_df, self.manager_df, self.station_df = self.get_stats_dataframes()\n\n columns = []\n if self.config.simulation_name:\n df_avg[\"Simulation Name\"] = self.config.simulation_name\n columns = [\"Simulation Name\"]\n columns += [\"Avg Waiting Time\", \"Avg Total Time\", \"Simulation Time\"]\n if self.config.max_time:\n df_avg[\"Max Time\"] = self.config.max_time\n columns += [\"Max Time\"]\n columns += [\"Simulation Finished\"]\n self.df_avg = df_avg[columns]", "def test_get_all_derived_metrics(self):\n pass", "def get_metrics(self):\n return None", "def collect_metrics(params, start, uuid=str(uuid4())):\n list_of_metrics = ['AWS/EC2/CPUUtilization',\n 'CGCloud/MemUsage',\n 'CGCloud/DiskUsage_mnt_ephemeral',\n 'CGCloud/DiskUsage_root',\n 'AWS/EC2/NetworkIn',\n 'AWS/EC2/NetworkOut',\n 'AWS/EC2/DiskWriteOps',\n 'AWS/EC2/DiskReadOps']\n\n ids = get_instance_ids(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-worker')\n\n while ids:\n # metrics = {metric: [] for metric in list_of_metrics}\n for instance_id in ids:\n for metric in list_of_metrics:\n averages = []\n try:\n s = start\n while s < stop:\n e = s + (4 * 24 * 3600)\n aws_start = datetime.utcfromtimestamp(s)\n aws_stop = datetime.utcfromtimestamp(e)\n met_object = get_metric(metric, instance_id, aws_start, aws_stop)\n averages.extend([x['Average'] for x in get_datapoints(met_object)])\n s = e\n if averages:\n metrics[metric].append(averages)\n logging.info('# of Datapoints for metric {} is {}'.format(metric, len(metrics[metric][0])))\n except RuntimeError:\n if instance_id in instance_ids:\n instance_ids.remove(instance_id)\n # Remove metrics if no datapoints were collected\n metrics = dict((k, v) for k, v in metrics.iteritems() if v)\n # Save CSV of data\n mkdir_p('{}_{}'.format(uuid, str(datetime.utcnow()).split()[0]))\n for metric in metrics:\n with open('{}_{}/{}.csv'.format(uuid, str(datetime.utcnow()).split()[0], metric.rsplit('/', 1)[1]), 'wb') as f:\n writer = csv.writer(f)\n writer.writerows(metrics[metric])", "def handleTelemetry(self):\n\t\tprint(\"*****************handleTelemetry\")\n\t\tself.cpuUtilPct = self.cpuUtilTask.getTelemetryValue() # Get CPU usage performance\n\t\tself.memUtilPct = self.memUtilTask.getTelemetryValue() # Get Memory usage performance\n\t\tsysData = SystemPerformanceData()\n\t\tsysData.setCpuUtilization(self.cpuUtilPct)\n\t\tsysData.setMemoryUtilization(self.memUtilPct)\n\t\tself.dataMessageListener.handleSystemPerformanceMessage(sysData)\n\t\tlogging.info('CPU utilization is %s percent, and memory utilization is %s percent.', str(self.cpuUtilPct), str(self.memUtilPct))\n\t\t# Log out the usage performance", "def list_metrics(self):\n pass", "def metrics_collector(self, metrics_collector):\n\n self._metrics_collector = metrics_collector", "def metrics(self, metrics):\n\n self._metrics = metrics", "def read_metric_values(self):\n inv_objs = self._inventory_mgr.current_inventory()\n monitored_metrics = self._metric_mgr.get_monitored_metrics()\n perf_manager = self._si.RetrieveServiceContent().perfManager\n for mor in inv_objs.keys():\n for inv_obj in inv_objs[mor]:\n inv_obj_metrics = inv_obj.metric_id_map\n desired_keys = list(set(inv_obj_metrics.keys()) & set(monitored_metrics[mor].keys()))\n if not len(desired_keys) == 0:\n metric_id_objs = [inv_obj_metrics[key] for key in desired_keys]\n query_spec = vim.PerformanceManager.QuerySpec(\n entity=inv_obj.mor, metricId=metric_id_objs,\n intervalId=inv_obj.INSTANT_INTERVAL,\n maxSample=1, format='normal'\n )\n try:\n results = perf_manager.QueryPerf(querySpec=[query_spec])\n except Exception as e:\n self._logger.error(\"Exception while making performance query : {0}\".format(e))\n if results:\n dps = self._parse_query(inv_obj, results, monitored_metrics[mor])\n payload = self._build_payload(dps)\n self._dispatch_metrics(payload)\n else:\n self._logger.warning(\"Empty result from query : {0}\".format(query_spec))", "def _fetch_gauge_metrics_and_clear(self):\n with self._gauge_rlock:\n gauge_metrics = self._gauge_metrics\n self._gauge_metrics = defaultdict(int)\n\n return gauge_metrics", "def read_callback(data=None):\n\n hits_by_domain = get_hits_by_domain()\n\n if not hits_by_domain:\n collectd.info('hits_by_domain not collected successfully')\n pass\n else:\n for key in hits_by_domain:\n metric = collectd.Values()\n metric.plugin = 'hits_by_domain'\n metric.type = 'count'\n metric.type_instance = key\n metric.values = [hits_by_domain[key]]\n metric.dispatch()", "def compute_metrics(self):\n self.finalize_output_dict()\n self.metric_dict = {\n key: value(self.output_dict[\"labels\"], self.output_dict[\"pred_probs\"])\n for key, value in self.metric_fns.items()\n }", "def collect():\n datadir = 'data'\n if 'OUTPUT_DATA_DIR' in os.environ:\n datadir = os.environ['OUTPUT_DATA_DIR']\n\n scraper_dir = os.path.join(os.getcwd(), 'scrapers')\n scrapers = get_scraper_list(scraper_dir)\n now = datetime.now()\n total_deals = []\n for scr_instance in scrapers:\n deals = scr_instance.get_deals()\n\n # Map a timestamp on each deal\n for item in deals:\n item.update({'timestamp': now.strftime('%Y-%m-%d')})\n\n print(\"\\n Collected {0} deals for {1} \\n\\n\".format(len(deals), scr))\n\n total_deals += deals\n\n filename = '{0}_resultset.json'.format(now.strftime('%Y%m%d_%H%I%S'))\n\n fh = open(os.path.join(datadir, filename), 'w+')\n fh.write(json.dumps(total_deals))\n fh.close()", "def collect(self, app):\n pass", "def collect_storage_metrics(sys):\n try:\n session = get_session()\n client = InfluxDBClient(host=INFLUXDB_HOSTNAME, port=INFLUXDB_PORT, database=INFLUXDB_DATABASE)\n\n sys_id = sys[\"id\"]\n sys_name = get_system_name(sys)\n\n json_body = list()\n\n # Get Drive statistics\n drive_stats_list = session.get((\"{}/{}/analysed-drive-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n drive_locations = get_drive_location(sys_id, session)\n if CMD.showDriveNames:\n for stats in drive_stats_list:\n location_send = drive_locations.get(stats[\"diskId\"])\n LOG.info((\"Tray{:02.0f}, Slot{:03.0f}\").format(location_send[0], location_send[1]))\n # Add Drive statistics to json body\n for stats in drive_stats_list:\n disk_location_info = drive_locations.get(stats[\"diskId\"])\n disk_item = dict(\n measurement = \"disks\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n sys_tray = (\"{:02.0f}\").format(disk_location_info[0]),\n sys_tray_slot = (\"{:03.0f}\").format(disk_location_info[1])\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in DRIVE_PARAMS\n )\n )\n if CMD.showDriveMetrics:\n LOG.info(\"Drive payload: %s\", disk_item)\n json_body.append(disk_item)\n\n # Get interface statistics\n interface_stats_list = session.get((\"{}/{}/analysed-interface-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showInterfaceNames:\n for stats in interface_stats_list:\n LOG.info(stats[\"interfaceId\"])\n # Add interface statistics to json body\n for stats in interface_stats_list:\n if_item = dict(\n measurement = \"interface\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n interface_id = stats[\"interfaceId\"],\n channel_type = stats[\"channelType\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in INTERFACE_PARAMS\n )\n )\n if CMD.showInterfaceMetrics:\n LOG.info(\"Interface payload: %s\", if_item)\n json_body.append(if_item)\n\n # Get System statistics\n system_stats_list = session.get((\"{}/{}/analysed-system-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n # Add System statistics to json body\n sys_item = dict(\n measurement = \"systems\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name\n ),\n fields = dict(\n (metric, system_stats_list.get(metric)) for metric in SYSTEM_PARAMS\n )\n )\n if CMD.showSystemMetrics:\n LOG.info(\"System payload: %s\", sys_item)\n json_body.append(sys_item)\n \n # Get Volume statistics\n volume_stats_list = session.get((\"{}/{}/analysed-volume-statistics\").format(\n PROXY_BASE_URL, sys_id)).json()\n if CMD.showVolumeNames:\n for stats in volume_stats_list:\n LOG.info(stats[\"volumeName\"])\n # Add Volume statistics to json body\n for stats in volume_stats_list:\n vol_item = dict(\n measurement = \"volumes\",\n tags = dict(\n sys_id = sys_id,\n sys_name = sys_name,\n vol_name = stats[\"volumeName\"]\n ),\n fields = dict(\n (metric, stats.get(metric)) for metric in VOLUME_PARAMS\n )\n )\n if CMD.showVolumeMetrics:\n LOG.info(\"Volume payload: %s\", vol_item)\n json_body.append(vol_item)\n\n if not CMD.doNotPost:\n client.write_points(json_body, database=INFLUXDB_DATABASE, time_precision=\"s\")\n\n except RuntimeError:\n LOG.error((\"Error when attempting to post statistics for {}/{}\").format(sys[\"name\"], sys[\"id\"]))", "def metrics(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'metrics')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def gather_sample(self, stat_file, collector=None):\n if not collector:\n collector = {}\n # The file format is just a single line of all the fields.\n line = stat_file.readlines()[0]\n # Chop off first part which is the pid and executable file. The\n # executable file is terminated with a paren so just search for that.\n line = line[(line.find(\") \") + 2) :]\n fields = line.split()\n # Then the fields we want are just at fixed field positions in the\n # string. Just grab them.\n\n # See http://man7.org/linux/man-pages/man5/proc.5.html for reference on field numbers\n # Keep in mind that we chop first 3 values away (pid, command line, state), so you need to\n # subtract 3 from the field numbers from the man page (e.g. on the man page nice is number\n # 19, but in our case it's 16 aka 19 - 3)\n process_uptime = self.__get_uptime_ms() - self.calculate_time_ms(\n int(fields[19])\n )\n\n collector.update(\n {\n Metric(\"app.cpu\", \"user\"): self.__calculate_time_cs(int(fields[11])),\n Metric(\"app.cpu\", \"system\"): self.__calculate_time_cs(int(fields[12])),\n Metric(\"app.uptime\", None): process_uptime,\n Metric(\"app.nice\", None): float(fields[16]),\n Metric(\"app.threads\", None): int(fields[17]),\n Metric(\"app.mem.majflt\", None): int(fields[9]),\n Metric(\"app.io.wait\", None): int(fields[39])\n if len(fields) >= 39\n else 0,\n }\n )\n return collector", "def perform(self, context):\n super().perform(context)\n\n collections = context[CTX_KEY_COMMON_COLLECTD_JMX_COLLECTIONS_SET]\n for idx, entry in enumerate(collections):\n mbean_name = entry['name']\n app_name = context[CTX_KEY_COMMON_COLLECTD_JMX_APP_PREFIX]\n\n if self.is_mbean_exist_in_hierarchy(\n context[CTX_KEY_COMMON_COLLECTD_JMX_MBEANS_HIERARCHY], mbean_name, app_name):\n entry[CTX_KEY_COMMON_COLLECTD_JMX_COLLECTIONS_ENTRY_VALIDATED] = True\n elif self.is_mbean_exist_in_hierarchy(context[\n CTX_KEY_COMMON_COLLECTD_JMX_MBEANS_HIERARCHY], mbean_name, 'common'):\n entry[CTX_KEY_COMMON_COLLECTD_JMX_COLLECTIONS_ENTRY_VALIDATED] = True\n else:\n entry[CTX_KEY_COMMON_COLLECTD_JMX_COLLECTIONS_ENTRY_VALIDATED] = False\n logger.warning(\"Collect [%s] does not exist!\", mbean_name)\n\n jmxType = context[CTX_KEY_COMMON_COLLECTD_JMX_TYPE]\n logger.debug(\"======================================================================\")\n logger.debug('[Transifig] Collectd-[%s] Collected mbean names validated', jmxType)\n logger.debug(\"======================================================================\")", "def calculate_metrics(self):\n self.data_stats = self.sqlContext.read.format(\"org.apache.spark.sql.cassandra\").options(table=self.cassandra_trip_table, keyspace=self.cassandra_keyspace).load()\n self.data_stats = self.data_stats.groupBy(['time_block','day','month','borough_name']).agg(func.avg('num_trips').alias('mean'))", "def _collect_stats(self, trainer: pl.Trainer, pl_module: AnomalyModule) -> None:\n predictions = Trainer(accelerator=trainer.accelerator, devices=trainer.num_devices).predict(\n model=self._create_inference_model(pl_module), dataloaders=trainer.datamodule.train_dataloader()\n )\n pl_module.normalization_metrics.reset()\n for batch in predictions:\n if \"pred_scores\" in batch.keys():\n pl_module.normalization_metrics.update(anomaly_scores=batch[\"pred_scores\"])\n if \"anomaly_maps\" in batch.keys():\n pl_module.normalization_metrics.update(anomaly_maps=batch[\"anomaly_maps\"])\n pl_module.normalization_metrics.compute()", "def metrics(self):\n return self.__metrics", "def calculate_batch_metrics(self):\n pass", "def collect_statistics(self, stat_col, data_streams):\n self.module.collect_statistics(stat_col, data_streams)", "def metrics(self):\n return self._metrics", "def metrics(self):\n return self._metrics", "def compute_metrics(self, results: list) -> dict:\n dump(results, self.out_file_path)\n print_log(\n f'Results has been saved to {self.out_file_path}.',\n logger='current')\n return {}", "def send_metrics(self):\n metrics = self.get_metrics()\n if not metrics:\n return\n\n for mkey, metric in metrics.items():\n for mname, mval in metric.items():\n try:\n self.agent.record_custom_metric(self.convert_metric_name(mkey, mname), mval, None)\n except Exception as e:\n print_(e)", "def loadFeeds(self):\n\n metrics = self.config['metrics']\n for metric in metrics:\n metricConf = self.config['metrics'][metric]\n metricConf['name'] = metric\n source = metricConf['source']['driver']\n if 'metrics' not in self.sources[source['name']]:\n self.sources[source['name']]['metrics'] = []\n\n self.sources[source['name']]['metrics'].append(metricConf)", "def _get_stats(self):\n self.stats = set()\n self._bstats = set()\n self._h_bstats = set()\n self._tstats = set()\n self._ftstats = set()\n for cl in self.data_classes:\n for stat in cl._bstats:\n self.stats.add(stat)\n self._bstats.add(stat)\n for stat in cl._hbstats:\n self.stats.add(stat)\n self._h_bstats.add(stat)\n for stat in cl._tstats:\n self._tstats.add(stat)\n self.stats.add(stat)\n try:\n trips = cl.triples\n f_stats = cl.read_tfstats(trips,eq=False,lande=False)\n for trip in f_stats:\n for stat in f_stats[trip]:\n self._ftstats.add(stat)\n self.stats.add(stat)\n except:\n AttributeError", "def stats(self):\n pass", "def collect_browser_metrics(self, task):\n logging.debug(\"Collecting user timing metrics\")\n user_timing = self.run_js_file('user_timing.js')\n if user_timing is not None:\n path = os.path.join(task['dir'], task['prefix'] + '_timed_events.json.gz')\n with gzip.open(path, 'wb', 7) as outfile:\n outfile.write(json.dumps(user_timing))\n logging.debug(\"Collecting page-level metrics\")\n page_data = self.run_js_file('page_data.js')\n if page_data is not None:\n task['page_data'].update(page_data)\n if 'customMetrics' in self.job:\n self.driver.set_script_timeout(30)\n custom_metrics = {}\n for name in self.job['customMetrics']:\n logging.debug(\"Collecting custom metric %s\", name)\n script = 'var wptCustomMetric = function() {' +\\\n self.job['customMetrics'][name] +\\\n '};try{return wptCustomMetric();}catch(e){};'\n try:\n custom_metrics[name] = self.driver.execute_script(script)\n if custom_metrics[name] is not None:\n logging.debug(custom_metrics[name])\n except Exception:\n pass\n path = os.path.join(task['dir'], task['prefix'] + '_metrics.json.gz')\n with gzip.open(path, 'wb', 7) as outfile:\n outfile.write(json.dumps(custom_metrics))", "def get_data():\n \n data = {\n 'loadAvg1Min': 0, #load average 1 min\n 'loadAvg5Min': 0, #load average 5 min\n 'loadAvg15Min': 0, #load average 15 min\n 'cpuUsage': [], #usage distribution for each cpu\n 'memUsage': {}, #memory usage \n 'networkReads': [], #network reads per second for each interface\n 'networkWrites': [], #network writes per second for each interface\n 'diskReads': [], #disk reads per second for each disk\n 'diskWrites': [] #disk writes per second for each disk\n }\n \n #metrics that doesnt need sampling\n data['loadAvg1Min'], data['loadAvg5Min'], data['loadAvg15Min'] = get_load_avg() #get load avg\n data['memUsage'].update(get_mem_usage()) #memory usage\n \n #metrics that needs sampling\n #they are written as a generator so that we can sleep before collection again\n sampling_duration = 1\n cpu_usage_gen = get_cpu_usage(sampling_duration) #generator for cpu usage\n net_rw_gen = get_net_rw(sampling_duration) #generator for network read write\n disk_rw_gen = get_disk_rw(sampling_duration) #generator for disk read write\n \n while 1: #now start sampling, whenever we have walid data, we can exit the loop\n cpu_usage = next(cpu_usage_gen)\n net_rw = next(net_rw_gen)\n disk_rw = next(disk_rw_gen)\n \n if cpu_usage or net_rw or disk_rw: #we have valid data\n break\n \n time.sleep(sampling_duration)\n \n #append cpu usage for each cpu core\n for cpu, usage in cpu_usage.items():\n data['cpuUsage'].append({'name': cpu, 'value': usage})\n \n #append network read and write for each interface\n for interface, rw in net_rw.items():\n data['networkReads'].append({'name': interface, 'value': rw['reads']})\n data['networkWrites'].append({'name': interface, 'value': rw['writes']}) \n \n #append disk read and write for each logical disk\n for device, rw in disk_rw.items():\n data['diskReads'].append({'name': device, 'value': rw['reads']})\n data['diskWrites'].append({'name': device, 'value': rw['writes']})\n \n return data", "def metrics(self, project, callback=None):\n\n self.client.select(self.selected_db)\n\n metrics = {}\n\n for metric_name in (yield gen.Task(self.get_metrics_list, project)):\n if metric_name not in metrics.keys():\n metrics[metric_name] = {}\n\n for filter_name in (yield gen.Task(self.get_filters, project, metric_name)):\n metrics[metric_name][filter_name] = (yield gen.Task(self.get_filter_values,\n project, metric_name, filter_name))\n\n if callback:\n callback(metrics)", "def _compute_experiment_statistics(self):\n pass", "def collect():\n\n command = \"cat /proc/meminfo |grep MemTotal|awk -F' ' '{print $2}'\"\n memTotal_f = round(float(os.popen(command).read())/1024/1000,0)\n memTotal = int(memTotal_f)\n cmd = 'df -h |grep \"/dev/s\"'\n metric_disk = os.popen(cmd).readlines()\n hardNum=[]\n for i in metric_disk:\n hard_space = float((i.strip().split()[1])[:-1])\n hardNum.append(hard_space)\n\n disk_info = sum(hardNum)\n disk_use = {}\n metric_disks=os.popen('df -x tmpfs -x devtmpfs | grep -Eo \" /\\S*$\" ').readlines()\n for disk in metric_disks:\n cmd = 'df|grep -E \"%s$\"' % disk.strip()\n disks = os.popen(cmd).readlines()[0]\n disk_list = disks.split()\n disk_use[disk_list[5]]=disk_list[4]\n hard = {\n \"disk_used\" : disk_use,\n \"disk_total\":disk_info,\n \"mem_total\":memTotal\n }\n\n return hard", "def get_all_metrics():\n return get_overlap_metrics() + get_distance_metrics() + get_distance_metrics()", "def _fetch_time_metrics_and_clear(self):\n with self._time_rlock:\n time_metrics = self._time_metrics\n self._time_metrics = defaultdict(LatencyTracker)\n\n return time_metrics", "def collect(self, day: datetime) -> Dict:\n LOG.info(f'Collecting stats for {self.name} on {day.isoformat()}')\n collected = self._collect(day)\n LOG.debug(f'Collection for {self.name} complete')\n return collected", "def get_all_metrics(self):\n up_time = self.uptime()\n down_time = self.downtime()\n customer_sla = self.sla()\n objective = self.slo()\n indicator = self.sli()\n avail_percentage = self.availability()\n mt_bf = self.mtbf(up_time)\n mt_tr = self.mttr(down_time)\n list_results = [up_time,down_time,customer_sla,objective,indicator,avail_percentage,mt_bf,mt_tr]\n return list_results", "def _start_collect_stats(sc):\n\n arg_dict = {}\n ev = sc.new_event(id=lb_const.EVENT_COLLECT_STATS_V2, data=arg_dict)\n sc.post_event(ev)", "def calculate_dataset_metrics(self):\n pass", "def collect_metrics() -> Tuple[Dict[str, Dict[str, Any]], Dict[str, List[str]]]:\n metric_docs: Dict[str, Dict[str, Any]] = {}\n metrics_by_integration: DefaultDict[str, List[str]] = defaultdict(list)\n # Reverse to keep backwards-compatible behavior with old script that kept\n # the last metric seen.\n for metric_yaml_file in sorted(INTEGRATIONS_PATH.glob(\"*/metrics.yaml\")):\n if \"Example\" in str(metric_yaml_file):\n continue\n\n for metric_name, metric in (yaml.safe_load(metric_yaml_file.read_text(encoding=\"utf-8\")) or {}).items():\n metrics_by_integration[metric_yaml_file.parent.name].append(metric_name)\n\n if metric_name in metric_docs:\n # print(f\"WARNING metric {metric_name} is duplicated, info will be taken from first one processed only\")\n continue\n\n desc = \"\"\n if \"description\" in metric:\n desc = metric[\"description\"]\n del metric[\"description\"]\n metric_docs[metric_name] = {\"yaml\": metric, \"markdown\": desc}\n return metric_docs, dict(metrics_by_integration)", "def collect_data(self):\n exp_conf: ec.ExperimentConfiguration\n # Disabled multiprocess run because of huge memory usage\n processes_number = 1 # self._campaign_configuration['General']['j']\n if processes_number == 1:\n self._logger.info(\"-->Evaluate experiments (sequentially)\")\n for exp_conf in tqdm.tqdm(self._exp_confs, dynamic_ncols=True):\n exp_conf.evaluate()\n if bool(self._campaign_configuration['General']['generate_plots']):\n exp_conf.generate_plots()\n self._logger.info(\"<--\")\n else:\n self._logger.info(\"-->Evaluate experiments (in parallel)\")\n pool = multiprocessing.Pool(processes_number)\n self._exp_confs = list(tqdm.tqdm(pool.imap(evaluate_wrapper, self._exp_confs), total=len(self._exp_confs)))\n if bool(self._campaign_configuration['General']['generate_plots']):\n pool = multiprocessing.Pool(processes_number)\n self._exp_confs = list(tqdm.tqdm(pool.imap(plot_wrapper, self._exp_confs), total=len(self._exp_confs)))\n self._logger.info(\"<--\")\n\n self.raw_results = {}\n for exp_conf in self._exp_confs:\n self.raw_results[tuple(exp_conf.get_signature())] = exp_conf.mapes", "def emit_metrics(self):\n parse_time = time.perf_counter() - self._parsing_start_time\n Stats.gauge(\"dag_processing.total_parse_time\", parse_time)\n Stats.gauge(\"dagbag_size\", sum(stat.num_dags for stat in self._file_stats.values()))\n Stats.gauge(\n \"dag_processing.import_errors\", sum(stat.import_errors for stat in self._file_stats.values())\n )", "def stats(self):", "def metrics(env):\n envs = environments()\n check_env(env, envs)\n\n metrics = get_or_abort(puppetdb._query, 'mbean')\n return render_template('metrics.html',\n metrics=sorted(metrics.keys()),\n envs=envs,\n current_env=env)", "def prometheus_metrics(request):\n if not settings.DEBUG:\n return HttpResponseNotFound()\n\n # DEPRECATED: prometheus_multiproc_dir has been replaced by PROMETHEUS_MULTIPROC_DIR\n if \"PROMETHEUS_MULTIPROC_DIR\" in os.environ or \"prometheus_multiproc_dir\" in os.environ:\n registry = prometheus_client.CollectorRegistry()\n multiprocess.MultiProcessCollector(registry)\n else:\n registry = prometheus_client.REGISTRY\n metrics_page = prometheus_client.generate_latest(registry)\n return HttpResponse(\n metrics_page, content_type=prometheus_client.CONTENT_TYPE_LATEST\n )", "def compute(self) -> Any:\n # ddp hotfix, could be done better\n # but metric must handle DDP on it's own\n if self._ddp_backend == \"xla\":\n device = get_device()\n for key in self.statistics:\n key_statistics = torch.tensor([self.statistics[key]], device=device)\n key_statistics = xm.all_gather(key_statistics).sum(dim=0).cpu().numpy()\n self.statistics[key] = key_statistics\n elif self._ddp_backend == \"ddp\":\n for key in self.statistics:\n value: List[np.ndarray] = all_gather(self.statistics[key])\n value: np.ndarray = np.sum(np.vstack(value), axis=0)\n self.statistics[key] = value\n\n per_class, micro, macro, weighted = get_aggregated_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n support=self.statistics[\"support\"],\n zero_division=self.zero_division,\n )\n if self.compute_per_class_metrics:\n return per_class, micro, macro, weighted\n else:\n return [], micro, macro, weighted", "def compute(self) -> Any:\n # ddp hotfix, could be done better\n # but metric must handle DDP on it's own\n if self._ddp_backend == \"xla\":\n device = get_device()\n for key in self.statistics:\n key_statistics = torch.tensor([self.statistics[key]], device=device)\n key_statistics = xm.all_gather(key_statistics).sum(dim=0).cpu().numpy()\n self.statistics[key] = key_statistics\n elif self._ddp_backend == \"ddp\":\n for key in self.statistics:\n value: List[np.ndarray] = all_gather(self.statistics[key])\n value: np.ndarray = np.sum(np.vstack(value), axis=0)\n self.statistics[key] = value\n\n per_class, micro, macro, weighted = get_aggregated_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n support=self.statistics[\"support\"],\n zero_division=self.zero_division,\n )\n if self.compute_per_class_metrics:\n return per_class, micro, macro, weighted\n else:\n return [], micro, macro, weighted", "def read_metrics(self):\n raise NotImplementedError()", "def _get_metrics(self, *args, **kwargs):\n logger.warning(\"Could not get metric. No function registered.\")", "def fetch_metrics(self):\n\n self.explain_all_indices()", "def collect(self, collector):\n return collector(self)", "def compute(self) -> Any:\n per_class, micro, macro, weighted = get_aggregated_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n support=self.statistics[\"support\"],\n zero_division=self.zero_division,\n )\n return per_class, micro, macro, weighted", "def get_metrics(self) -> dict:\n return self.metric_dict", "def _process(self):\n export_collect_medias(self.kwargs[\"collect\"])", "def __init__(self, metrics, gt, pred):\n self.dict_metrics = self.compute_metrics(metrics, gt, pred)", "def metrics(self, **kwargs):\n return suggest.metrics(self._host, self._session, **kwargs)", "def initialize_statistics_collection(self):\n # Create statistics collector for testing.\n self.testing_stat_col = StatisticsCollector()\n self.add_statistics(self.testing_stat_col)\n self.problem.add_statistics(self.testing_stat_col)\n self.model.add_statistics(self.testing_stat_col)\n # Create the csv file to store the testing statistics.\n self.testing_batch_stats_file = self.testing_stat_col.initialize_csv_file(self.log_dir, 'testing_statistics.csv')\n\n # Create statistics aggregator for testing.\n self.testing_stat_agg = StatisticsAggregator()\n self.add_aggregators(self.testing_stat_agg)\n self.problem.add_aggregators(self.testing_stat_agg)\n self.model.add_aggregators(self.testing_stat_agg)\n # Create the csv file to store the testing statistic aggregations.\n # Will contain a single row with aggregated statistics.\n self.testing_set_stats_file = self.testing_stat_agg.initialize_csv_file(self.log_dir, 'testing_set_agg_statistics.csv')", "def derive_newrelic_stats(self):\n self.logger.debug(\"Collecting stats for newrelic\")\n self.derive_newrelic_volume()\n self.derive_newrelic_throughput()\n self.derive_newrelic_innodb()\n self.derive_newrelic_qcache()\n self.derive_newrelic_slaves()", "def getAllMetrics(self):\n result = self.getReportMetrics()\n result.update(self.getOptimizationMetrics())\n return result", "def collect(self):\n if not self.collector:\n raise AssertionError(\"Can't call collect when you've registered your own collect_fn!\")\n\n queries = []\n try:\n while True:\n queries.append(self.collector.get(block=False))\n except queue.Empty:\n pass\n\n return queries", "def get_metrics(self):\n self.logger.debug(\"Fetching metrics.\")\n return self._api_query(\"metrics\")['metrics']", "def get_all_metrics(self):\n metrics = {}\n for item in self.list_metrics():\n metric_name = item[2]\n metric = self.get_metric(\n item,\n existing_dict=metrics.get(metric_name, None))\n metrics[metric_name] = metric\n return metrics", "def _calc_stats(self):\n\n for res in self.rsts:\n _LOG.info(\"Calculate statistics for '%s'\", res.reportid)\n res.calc_stats(regexs=self._stats_colnames, funcnames=self._stats_funcs)", "def __init_metrics(self):\n\n batch = {}\n # split data into batches of size batch_size or less\n for metric_name, metric_pattern in self.metrics.items():\n # get the batch list for that metric\n batch_list = []\n for s in range(1, self.schema + 1):\n for t in range(1, self.table + 1):\n k = '/metrics/type=IndexTable/keyspace={}/scope={}/name={}/mean'.format(s, t, metric_name)\n # from Python 3.6 onwards, the standard dict type maintains insertion order by default\n batch[k] = 0\n # if the batch has batch_size items or at the end of iteration,\n # append the batch to list of that metric and create a new empty batch\n if len(batch) == self.batch_size or (s == self.schema and t == self.table):\n batch_list.append(batch)\n batch = {}\n\n # parse metric patterns\n l = metric_pattern.split()\n if l[0] == '(>':\n self.metrics[metric_name] = IncMetricStruct(float(int(l[1])), float(l[2][1:]), float(l[4][:-2]),\n batch_list)\n else:\n self.metrics[metric_name] = RandMetricStruct(float(l[0][1:]), float(l[-1][:-1]), batch_list)", "def get_results_from_aggregation_sources(self, context):" ]
[ "0.6958763", "0.69119155", "0.68189895", "0.66842306", "0.66356134", "0.6568736", "0.6555862", "0.6482938", "0.64655095", "0.64260054", "0.641891", "0.63836133", "0.63763535", "0.62167543", "0.62135196", "0.62097865", "0.6177793", "0.61742324", "0.61667204", "0.61239713", "0.6101292", "0.6057514", "0.6046839", "0.6036569", "0.60301644", "0.6017277", "0.60003597", "0.5986058", "0.5976332", "0.5970084", "0.5947343", "0.59442264", "0.5895751", "0.5872321", "0.5871243", "0.58445513", "0.5825949", "0.5819051", "0.58168805", "0.58114314", "0.5804171", "0.58009595", "0.57612973", "0.57530344", "0.5740108", "0.5737231", "0.5727704", "0.57268536", "0.5694863", "0.56923246", "0.56799686", "0.567829", "0.5671171", "0.5668734", "0.5662488", "0.5662297", "0.5658341", "0.5658341", "0.56575847", "0.56540436", "0.5648167", "0.5647991", "0.56472963", "0.56414247", "0.56414026", "0.56384796", "0.56262594", "0.56230587", "0.5618333", "0.56159335", "0.56143165", "0.5607523", "0.55993605", "0.5598379", "0.5588213", "0.5580269", "0.5566351", "0.55607384", "0.5560451", "0.55581796", "0.5552923", "0.5552923", "0.5552663", "0.5542313", "0.55319995", "0.5527367", "0.5525694", "0.5525131", "0.5521124", "0.5516284", "0.5504242", "0.5504024", "0.5502025", "0.54834485", "0.5465727", "0.54652977", "0.5458487", "0.5454299", "0.5444813", "0.54220605" ]
0.7913655
0
Return the process of the agent.
def current_process(self): return self._current_process
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _launchAgentProcess( self ):\n return subprocess.Popen( [ sys.executable, os.path.join( sys.path[0], 'agentProcess.py' ), str( _processPid ) ], stdin=subprocess.PIPE, stdout=subprocess.PIPE )", "def get_process(self, pid):\n return self.processes.get(pid, None)", "def get_my_process():\n return get_process_object(os.getpid())", "def get_process():\n data = _get_process_detail_expanded_data()[\"process\"]\n return data", "def getProcessManager(self): \n \n return self.procmgr", "def mmo_what_process_am_i(self, mmo_connection):\n return mmo_connection[\"admin\"].command(\"serverStatus\")[\"process\"];", "def proc(self):\n return self._proc", "def get_process(self) -> ApplyResult:\n return self._process", "def pid(self):\n return self._get_process_id()", "def pid(self):\n return self._process.pid", "def getPID(self):\r\n self._update('getPID')\r\n return self.supervisord.options.get_pid()", "def get_process(self):\n\n self.log.debug('Getting application process data')\n cmd_output = admin_tasks.get_process(self.app_name)\n if cmd_output:\n self.log.info('Application process is running')\n print(\"Command output: \\n\" + cmd_output)\n else:\n self.log.info('Application process is not running')", "def process_id(self):\n return self._process_id", "def process_id(self):\n return self._process_id", "def process(self):\n # type: () -> Optional[multiprocessing.Process]\n try:\n return self._process # type: ignore # pylint: disable=no-member\n except:\n return None", "def get_worker_from_agent(agent: Agent):\n return agent.mephisto_agent.get_worker()", "def pid(self):\n\n return getpid() if self.__process is None else self.__process.pid", "def get_process_pid(robot_name):\n\n try:\n result = check_output(['pgrep', 'x{0}'.format(robot_name)])\n return int(result.strip())\n except:\n return None", "def process(self) -> Union['psutil.Process', None]:\n psutil = attempt_import('psutil')\n pid = self.pid\n if pid is None:\n return None\n if not '_process' in self.__dict__ or self._process.pid != int(pid):\n try:\n self._process = psutil.Process(int(pid))\n except Exception as e:\n if self.pid_path.exists():\n self.pid_path.unlink()\n return None\n return self._process", "def pid(self):\n if self.proc is None:\n return 0\n return self._pid()", "def get_process(ngrok_path, config_path=None, auth_token=None, region=None):\n if ngrok_path in _current_processes:\n # Ensure the process is still running and hasn't been killed externally\n if _current_processes[ngrok_path].proc.poll() is None:\n return _current_processes[ngrok_path]\n else:\n _current_processes.pop(ngrok_path, None)\n\n return _start_process(ngrok_path, config_path, auth_token, region)", "def agent(self):\n return self.__agent", "def GetChromeProcess(self):\n procs = self.ListProcesses()\n session_manager_pid = self._GetSessionManagerPid(procs)\n if not session_manager_pid:\n return None\n\n # Find the chrome process that is the child of the session_manager.\n for pid, process, ppid, _ in procs:\n if ppid != session_manager_pid:\n continue\n for regex in _CHROME_PROCESS_REGEX:\n path_match = re.match(regex, process)\n if path_match is not None:\n return {'pid': pid, 'path': path_match.group(), 'args': process}\n return None", "def get_cognitive_process(self):\n if not self.has_cognitive_process():\n raise IllegalState()\n else:\n return Grade(self._get_grade_map(self._my_map['cognitiveProcessId'])),", "def get(self):\n if self.proc is not None:\n return self.proc.get()\n\n return None", "def get_processor(self):\n return self._processor", "def get_process(proc_name):\n #LOG = log.getLogger(__name__)\n procList = []\n try:\n for pr in psutil.process_iter():\n for args in pr.cmdline():\n if proc_name in args:\n procList.append(pr.pid)\n return procList\n except BaseException as e:\n print(\"Error in fetching process: {}\".format(e))\n return None", "def get_pid(self):\n if self.status():\n file = open(os.path.join(self.data_dir, 'postmaster.pid'))\n pid = int(file.readline())\n return pid\n else:\n return None", "def spawn(self):\r\n self.before_spawn()\r\n pid = Subprocess.spawn(self)\r\n if pid is None:\r\n #Remove object reference to decrement the reference count on error\r\n self.fcgi_sock = None\r\n return pid", "def get_process_name(self):\n\n return self._args.t", "def new_process() -> Process:\n return multiprocessing.Process()", "def pid(self):\n return self.__pid", "def pid(self):\n\t\treturn self.__pid", "def get_worker(self):\n return self.worker", "def pid(self):\n return self._pid", "def pid(self):\n return self._pid", "def pid(self):\n return self._pid", "def get_a_local_process(self, proc_name=''):\n for p in self.procs.itervalues():\n\n if p.name == proc_name:\n return p\n\n if p.process_type == AGENT_PROCESS_TYPE and p.resource_type == proc_name:\n return p\n\n return None", "def get_PID(self):\n return self.PID", "def process():\n reader = owslib.wps.WPSDescribeProcessReader()\n root = reader.readFromString(open(resource_file(\"process_description.xml\")).read())\n xml = root.findall(\"ProcessDescription\")[0]\n return owslib.wps.Process(xml)", "def get_goal(self):\n self._pid_lock.acquire() # Acquire Lock\n rtn = self._goal\n self._pid_lock.release() # Release Lock\n\n return rtn", "def pid(self):\n return self._query_status()['pid']", "def get_process_id():\n process_id = os.environ[\"WS_PROCESS_ID\"]\n return process_id", "def pid(self):", "def _spawn_agent_process(self, process_id, name, module, cls, config, proc_attr):\n process_instance = self._create_process_instance(process_id, name, module, cls, config, proc_attr)\n if not isinstance(process_instance, ResourceAgent) and not isinstance(process_instance, SimpleResourceAgent):\n raise ContainerConfigError(\"Agent process must extend ResourceAgent\")\n listeners = []\n\n # Set the resource ID if we get it through the config\n resource_id = get_safe(process_instance.CFG, \"agent.resource_id\")\n if resource_id:\n process_instance.resource_id = resource_id\n\n alistener = self._create_listening_endpoint(node=self.container.node,\n from_name=resource_id,\n process=process_instance)\n\n listeners.append(alistener)\n\n rsvc = self._create_listening_endpoint(node=self.container.node,\n from_name=process_instance.id,\n process=process_instance)\n\n listeners.append(rsvc)\n\n # cleanup method to delete process/agent queue (@TODO: leaks a bit here - should use XOs)\n def agent_cleanup(x):\n self._cleanup_method(process_instance.id, rsvc)\n if resource_id:\n self._cleanup_method(resource_id, alistener)\n\n proc = self.proc_sup.spawn(name=process_instance.id,\n service=process_instance,\n listeners=listeners,\n proc_name=process_instance._proc_name,\n cleanup_method=agent_cleanup)\n proc.proc._glname = \"ION Proc %s\" % process_instance._proc_name\n self.proc_sup.ensure_ready(proc, \"_spawn_agent_process for %s\" % process_instance.id)\n\n # map gproc to process_instance\n self._spawned_proc_to_process[proc.proc] = process_instance\n\n # set service's reference to process\n process_instance._process = proc\n\n # Now call the on_init of the agent.\n self._process_init(process_instance)\n\n if not process_instance.resource_id:\n log.warn(\"New agent pid=%s has no resource_id set\" % process_id)\n\n self._process_start(process_instance)\n\n try:\n proc.start_listeners()\n except IonProcessError:\n self._process_quit(process_instance)\n self._call_proc_state_changed(process_instance, ProcessStateEnum.FAILED)\n raise\n\n if not process_instance.resource_id:\n log.warn(\"Agent process id=%s does not define resource_id!!\" % process_instance.id)\n\n return process_instance", "def GetChromePid(self):\n result = self.GetChromeProcess()\n if result and 'pid' in result:\n return result['pid']\n return None", "def Spawn(proc):\n proc.start()\n return proc", "def process_control(self):\n return self._process_control", "def setup_process_stats(pid):\n return psutil.Process(pid)", "def get_agent(self):\n servers = self.get_agents()\n assert servers, \"starter: have no instances!\"\n return servers[0]", "def process_target(self):\n return self.target", "def pid(self):\n # type: () -> Optional[int]\n try:\n return self._process.pid # type: ignore # pylint: disable=no-member\n except:\n return None", "def get_command(pid):", "def get_process_by_process_id(self, process_id):\n try:\n process = Process.objects.get(pk=process_id)\n except Process.DoesNotExist:\n process = None\n\n return process", "def agent_id(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"agent_id\")", "def select_process(self):\n result = -1\n for idx in self.priority:\n if self.processes[idx].working_time < self.processes[idx].final_execution_time:\n result = idx\n break\n return result", "def agent(self) -> Entity:\n return self.__agent", "def _post_processes(self):\n return self.agents[0].post_process(self.agents)", "def cli_get_process_title():\n raise NotImplementedError()", "def get_processes_running():\r\n p = [] #array of processes\r\n if platform == \"linux\" or platform == \"linux2\":\r\n for proc in psutil.process_iter():\r\n try:\r\n tmp=Process(proc.name(),int(proc.pid),proc.username(),int(0),int(0))\r\n p.append(tmp)\r\n except:\r\n continue\r\n return (p)\r\n\t\t\t\r\n tasks = check_output(['tasklist']).decode('cp866', 'ignore').split(\"\\r\\n\")\r\n for task in tasks:\r\n m = re.match(b'(.*?)\\\\s+(\\\\d+)\\\\s+(\\\\w+)\\\\s+(\\\\w+)\\\\s+(.*?)\\\\s.*', task.encode())\r\n if m is not None:\r\n tmp=Process(m.group(1).decode(),int(m.group(2).decode()),m.group(3).decode(),int(m.group(4).decode()),int(m.group(5).decode('ascii', 'ignore')))\r\n p.append(tmp)\r\n #m.group(1).decode() image name\r\n #m.group(2).decode() process id\r\n #m.group(3).decode() session_name\r\n #m.group(4).decode() session_num\r\n #m.group(5).decode('ascii', 'ignore') memory usage\r\n return(p)", "def ppid(self):", "def get_pid_processor(pid):\n if pid != 0:\n pid_str = pid\n else:\n pid_str = 'self'\n\n # read procfs /proc/PID/stat file to get info about processor\n # that PID was scheduled on last time\n try:\n with open(\"/proc/{}/stat\".format(pid_str)) as stat_file:\n proc_stat = stat_file.readline().strip().split(' ')\n return int(proc_stat[39])\n except EnvironmentError:\n return -1", "def get(name, config):\n\n process = Process()\n\n # Build workflow\n with st.spinner(\"Building workflow....\"):\n process.build(name, config)\n\n return process", "def get_process_name(pid):\n proc = subprocess.Popen(['ps', '-p', pid, '-o', 'comm='],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out, err=proc.communicate()\n return out.strip().decode('utf-8')", "def worker(self):\n return self._worker", "def get_process_object(pid, die=True):\n try:\n return psutil.Process(pid)\n except psutil.NoSuchProcess as e:\n if die:\n raise e\n else:\n return None", "def get_pid(name):\n try: \n for process in psutil.process_iter():\n try:\n proc = process.as_dict(attrs=['pid', 'name'])\n if name in proc['name']:\n pid = proc['pid']\n logging.info(f\"Found PID {pid} for {name}\")\n return int(pid) \n except (psutil.NoSuchProcess, psutil.AccessDenied , psutil.ZombieProcess) :\n pass \n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)", "def _get_pid(self):\n ps_txt = six.ensure_str(self.controller.run(\n args=[\"ps\", \"ww\", \"-u\"+str(os.getuid())]\n ).stdout.getvalue()).strip()\n lines = ps_txt.split(\"\\n\")[1:]\n\n for line in lines:\n if line.find(\"ceph-{0} -i {1}\".format(self.daemon_type, self.daemon_id)) != -1:\n log.info(\"Found ps line for daemon: {0}\".format(line))\n return int(line.split()[0])\n log.info(\"No match for {0} {1}: {2}\".format(\n self.daemon_type, self.daemon_id, ps_txt\n ))\n return None", "def GetPublishedProcesses():\r\n pass", "def _monitorProcess( self ):\n self.processContainer.lock.acquire()\n try:\n try:\n if self.processContainer.agent is None or self.processContainer.agent.poll() is not None:\n self.processContainer.agent = self._launchAgentProcess()\n except Exception, e:\n self.logger.exception( \"Error starting monitor process\" )\n finally:\n self.processContainer.lock.release()", "def __read_pidfile(self):\n try:\n pf = file(self.pidfile, 'r')\n contents = pf.read().strip().split()\n pf.close()\n except IOError:\n return None\n\n pid = int(contents[0])\n try:\n os.kill(pid, 0)\n except OSError, e:\n # ESRCH indicates the process is not running, in which case we ignore the pidfile.\n if e.errno == errno.ESRCH:\n return None\n # EPERM indicates the current user does not have permission to signal the process.. so it exists\n # but may not be the agent process. We will just try our /proc/pid/commandline trick below if we can.\n elif e.errno != errno.EPERM:\n raise e\n\n # If we got here, the process is running, and we have to see if we can determine if it is really the\n # original agent process. For Linux systems with /proc, we see if the commandlines match up.\n # For all other Posix systems, (Mac OS X, etc) we bail for now.\n if not self.__can_read_command_line(pid):\n return pid\n\n # Handle the case that we have an old pid file that didn't have the commandline right into it.\n if len(contents) == 1:\n return pid\n\n command_line = self.__read_command_line(pid)\n if contents[1] == command_line:\n return pid\n else:\n return None", "def procinfo(self):\n\n info = {}\n info[\"pid\"] = self.pid\n info[\"exe\"] = self.exe\n info[\"procname\"] = self.procname\n\n return info", "def get_cognitive_process_metadata(self):\n return Metadata(**settings.METADATA['cognitive_process_id'])", "def get_process_info_by_pid(self, pid):\n # TODO: discuss if self.logger needs to happen here? I think not? -BY\n\n for process in self.data_storage.running_data:\n if self.data_storage.running_data[process]['pid'] == pid:\n return self.data_storage.running_data[process]", "def start(self):\n if hasattr(self, 'process'):\n err = \"video '{}' Frames Extraction has already \" \\\n \"started.\".format(self.video_file)\n print err\n raise Exception(err)\n\n process_number = subprocess.Popen(self.start_frames, stdout=subprocess.PIPE)\n process_number.wait()\n return process_number", "def _select_processes(self):\n\n # check if at least one process is running\n is_running = False\n for pid in self.__pids:\n if ProcessMonitor.__is_running(pid):\n is_running = True\n break # at least one process is running\n\n if is_running:\n if not self.__aggregate_multiple_processes:\n return self.__pids\n\n # aggregate metrics, check the last discovered time\n if (\n self.__last_discovered\n and time.time() * 1000 - self.__last_discovered\n < self.__process_discovery_interval * 1000\n ):\n return self.__pids\n\n ps = ProcessList()\n if self.__commandline_matcher:\n self.__last_discovered = time.time() * 1000\n if self.__include_child_processes:\n matched_processes = ps.get_matches_commandline_with_children(\n self.__commandline_matcher\n )\n else:\n matched_processes = ps.get_matches_commandline(\n self.__commandline_matcher\n )\n self.__pids = matched_processes\n\n if not self.__aggregate_multiple_processes and len(self.__pids) > 1:\n # old behaviour where multiple processes were not supported for aggregation\n self._logger.warning(\n \"Multiple processes match the command '%s'. Returning existing pid. \"\n \"You can turn on the multi process aggregation support by adding the \"\n \"aggregate_multiple_processes configuration to true\"\n % self.__commandline_matcher,\n limit_once_per_x_secs=300,\n limit_key=\"linux-process-monitor-existing-pid\",\n )\n self.__pids = [self.__pids[0]]\n else:\n # See if the specified target pid is running. If so, then return it.\n # Special cases:\n # '$$' mean this process.\n # '$$TBD' mean that the PID of the target process has not been determined yet and it will be set later.\n pids = []\n if self.__target_pids:\n for t_pid in self.__target_pids:\n if t_pid == \"$$\":\n t_pid = int(os.getpid())\n\n # skip this until it will be replaced with a real PID.\n elif t_pid == \"$$TBD\":\n continue\n else:\n t_pid = int(t_pid)\n pids.append(t_pid)\n self.__pids = pids\n return self.__pids", "def normalization_process(self):\n return NormalizationProcess(self._get_attr('normalization_process_id'))", "def get_state(self):\n\t\treturn Job(SDK.PrlVm_GetState(self.handle)[0])", "def num_processes(self):\n return 1", "def get_process_id(name):\n child = subprocess.Popen(['pgrep', '-f', name], stdout=subprocess.PIPE, shell=False)\n response = child.communicate()[0]\n return [int(pid) for pid in response.split()]", "def test_get_process(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.processes = {\n 'proc_1': 'first process'}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test with known application\n self.assertEqual('first process', rpc._get_process('proc_1'))\n # test with unknown application\n with self.assertRaises(RPCError) as exc:\n rpc._get_process('proc')\n self.assertEqual(Faults.BAD_NAME, exc.exception.code)\n self.assertEqual('BAD_NAME: process proc unknown in Supvisors',\n exc.exception.text)", "def getNode(self):\r\n try:\r\n output,error = Popen(\"qstat | grep \"+self.jobId, shell=True, stdout=PIPE, stderr=PIPE).communicate()\r\n if self.jobId in output:\r\n return output.split(\"\\t\")[7]\r\n if len(error) > 0:\r\n logging.error(error)\r\n except ValueError:\r\n logging.info(\"Error: waiting for not submitted job...\")", "def agent_session(self):\n return self._agent_session", "def get_cognitive_process_id(self):\n if not self.has_cognitive_process():\n raise IllegalState()\n else:\n return Id(self._my_map['cognitiveProcessId'])", "def num_processes():\n return 1", "def launch(self):\n return self._launch", "def procinfo() -> None:\n if pwndbg.gdblib.qemu.is_qemu():\n print(\n message.error(\n \"QEMU target detected: showing result for the qemu process\"\n \" - so it will be a bit inaccurate (excessive for the parts\"\n \" used directly by the qemu process)\"\n )\n )\n exe = pwndbg.auxv.get()[\"AT_EXECFN\"]\n print(\"%-10s %r\" % (\"exe\", exe))\n\n proc = Process()\n\n # qemu-usermode fail!\n if not proc.status:\n return\n\n print(\"%-10s %s\" % (\"cmdline\", proc.cmdline))\n\n print(\"%-10s %s\" % (\"cwd\", proc.cwd))\n\n files = dict(proc.open_files)\n\n for c in proc.connections:\n files[c.fd] = str(c)\n\n print(\"%-10s %s\" % (\"pid\", proc.pid))\n print(\"%-10s %s\" % (\"tid\", proc.tid))\n\n if proc.selinux != \"unconfined\":\n print(\"%-10s %s\" % (\"selinux\", proc.selinux))\n\n print(\"%-10s %s\" % (\"ppid\", proc.ppid))\n\n if not pwndbg.gdblib.android.is_android():\n print(\"%-10s %s\" % (\"uid\", proc.uid))\n print(\"%-10s %s\" % (\"gid\", proc.gid))\n print(\"%-10s %s\" % (\"groups\", proc.groups))\n else:\n print(\"%-10s %s\" % (\"uid\", list(map(pwndbg.lib.android.aid_name, proc.uid))))\n print(\"%-10s %s\" % (\"gid\", list(map(pwndbg.lib.android.aid_name, proc.gid))))\n print(\"%-10s %s\" % (\"groups\", list(map(pwndbg.lib.android.aid_name, proc.groups))))\n\n for fd, path in files.items():\n if not set(path) < set(string.printable):\n path = repr(path)\n\n print(\"%-10s %s\" % (\"fd[%i]\" % fd, path))\n\n return", "def quantification_process(self):\n return QuantificationProcess(\n self._get_attr('quantification_process_id'))", "def agent_class(self):\r\n return self._agent_class", "def getAgentID(self):\n\t\treturn self.agentID", "def get_sequencing_process(process):\n\n # Each entry in input_output_maps is an input/output specification with a single\n # input and any number of outputs. This gets the first input.\n first_io = process.input_output_maps[0]\n first_in_artifact = first_io[0]['uri']\n\n processes = process.lims.get_processes(inputartifactlimsid=first_in_artifact.id)\n seq_processes = [proc for proc in processes if proc.type_name in [p[1] for p in SEQ_PROCESSES]]\n # Use the last sequencing process. In case of crashed runs, this will be the right one.\n try:\n return seq_processes[-1]\n except IndexError:\n return None", "def _pid(self, name):\n return self.pid_lookup[name]", "def pid(self) -> str:\n if not self.pid_path.exists():\n return None\n try:\n with open(self.pid_path, 'r') as f:\n text = f.read()\n except Exception as e:\n warn(e)\n text = None\n return text.rstrip('\\n') if text is not None else text", "def get_process_state(self, path, params):\n reply = self._local_collector.get_process_state()\n self._augment_state_reply(reply, path)\n return reply", "def P_work(self):\n return self._P", "def dynamic_pid(self):\n pass", "def system(self):\r\n return self.runtime", "def processlocal(self) :\n\t\ttry :\n\t\t\treturn self._processlocal\n\t\texcept Exception as e:\n\t\t\traise e", "async def get_process_id() -> int:\n print(\"Await sleep\")\n logger = getLogger(__name__)\n logger.debug(\"Get process id\")\n # Wait for starting subprocess\n # otherwise, time.sleep() will block starting subprocess.\n current_process = psutil.Process(os.getpid())\n while len(current_process.children()) < 2:\n print(len(current_process.children()))\n await asyncio.sleep(0.01)\n logger.debug(\"Start sleep\")\n time.sleep(SECOND_SLEEP_FOR_TEST_LONG)\n print(\"Kill all processes in this window.\")\n return 0", "def _get_target_function():\n return quartz_mouse_process" ]
[ "0.69227177", "0.68076235", "0.66409147", "0.65917194", "0.6577045", "0.65301675", "0.6481512", "0.6463723", "0.6425003", "0.639765", "0.6345163", "0.6329305", "0.63107604", "0.63107604", "0.6309831", "0.6220328", "0.6186098", "0.61813504", "0.6084275", "0.6083225", "0.6051368", "0.6008365", "0.59980464", "0.59642017", "0.59629494", "0.592352", "0.58746", "0.58428466", "0.5806936", "0.58057505", "0.57619184", "0.5737387", "0.57354283", "0.5725666", "0.57077557", "0.57077557", "0.57077557", "0.5663746", "0.5657366", "0.5656398", "0.56497824", "0.56378555", "0.5617041", "0.5608434", "0.558792", "0.5581048", "0.5559917", "0.5542854", "0.5533088", "0.5514432", "0.5501145", "0.54929477", "0.5480014", "0.54711235", "0.54685354", "0.5466363", "0.5463914", "0.5441616", "0.54384595", "0.54242367", "0.5406217", "0.5390044", "0.5388014", "0.53867805", "0.53804904", "0.53745687", "0.5359537", "0.53484124", "0.5338502", "0.5329955", "0.53238773", "0.5317176", "0.53139544", "0.5304628", "0.5297982", "0.52963847", "0.5289502", "0.52843356", "0.52643275", "0.52641773", "0.5260529", "0.5247792", "0.5246464", "0.52441597", "0.5232953", "0.52316004", "0.5214579", "0.5211429", "0.52104235", "0.52102983", "0.5209226", "0.520094", "0.5197237", "0.51947975", "0.5194069", "0.5188495", "0.5188429", "0.51617044", "0.5158556", "0.5139751" ]
0.69763553
0
Given a string, match the processes on the name
def get_matches_commandline(self, match_pattern): matches = [] for _process in self.processes: if re.search(match_pattern, _process["cmd"]): matches.append(_process["pid"]) return matches
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ps_find(name):\n for proc in psutil.process_iter():\n if proc.name() == name:\n return True\n return False", "def findPIDs(name, user = os.getpid()):\n\n pids = []\n\n ps = subprocess.Popen(['ps', '-u', user, 'w'], stdout=subprocess.PIPE).communicate()[0]\n processes = ps.split('\\n')\n\n for line in processes:\n if len(line.split()) < 5:\n continue\n if re.match(name, line.split()[4]):\n #Then we have matching process\n pids.append(line.split()[0])\n\n return pids", "def find(name, arg=None):\r\n for p in get_processes():\r\n if p.name.lower().find(name.lower()) != -1:\r\n if arg is not None:\r\n for a in (p.cmdline or []):\r\n if a.lower().find(arg.lower()) != -1:\r\n return p\r\n else:\r\n return p\r\n return None", "def find(name, exact=False):\n processes = run(\"ps aux | grep {0}\".format(name))\n res = []\n for line in processes.split(\"\\n\"):\n if not line.strip():\n continue\n line = RE_SPACES.split(line, 10)\n # We skip lines that are not like we expect them (sometimes error\n # message creep up the output)\n if len(line) < 11:\n continue\n user, pid, cpu, mem, vsz, rss, tty, stat, start, time, command = line\n if (exact and command == name) \\\n or (not exact and command.startswith(name)):\n res.append(pid)\n return res", "def kill_process_by_name(re_pattern):\n\n user_name = os.getlogin()\n parent_pid = os.getppid()\n current_pid = os.getpid()\n\n stdin = subprocess.check_output([\"ps\", \"-u\", user_name])\n\n processes = []\n\n processes = [(int(re.match(\" *[0-9]+\", line).group()), line.split(' ')[-1]) for line in stdin.split('\\n')[1:-1]]\n\n for process in processes:\n\n if re.match(re_pattern, process[1]) and process[0] != current_pid:\n# print \"KILLING PID: \", process\n os.kill(process[0], signal.SIGKILL)", "def pidof(process_name):\n\n\tpids = []\n\n\tif 'licornd' in process_name:\n\t\t# licorn / linux 3.x specifiq : we can match 'licornd/wmi'\n\t\t# faster than 'licornd-wmi', and in some case the 'cmdline'\n\t\t# is empty, whereas the 'comm' is not.\n\t\tnames = [ process_name, process_name.replace('/', '-') ]\n\n\telse:\n\t\tnames = [ process_name ]\n\n\tfor entry in os.listdir('/proc'):\n\t\tif entry.isdigit():\n\t\t\ttry:\n\n\t\t\t\tif cgroup and open('/proc/%s/cpuset' % entry).read().strip() != cgroup:\n\t\t\t\t\tlogging.progress(_(u'Skipped process @{0} which is not '\n\t\t\t\t\t\t\t\t\t\tu'in the same cgroup.').format(entry))\n\t\t\t\t\tcontinue\n\n\t\t\t\ttry:\n\t\t\t\t\t# Linux 3.x only\n\t\t\t\t\tcommand_line1 = open('/proc/%s/comm' % entry).read().strip()\n\t\t\t\texcept:\n\t\t\t\t\tcommand_line1 = ''\n\n\t\t\t\tcommand_line2 = open('/proc/%s/cmdline' % entry).read().strip()\n\n\t\t\t\tfor pname in names:\n\t\t\t\t\tif pname == command_line1 or pname+'\\0' in command_line2:\n\t\t\t\t\t\tpids.append(int(entry))\n\n\t\t\texcept (IOError, OSError), e:\n\t\t\t\t# in rare cases, the process vanishes during iteration. This\n\t\t\t\t# is harmless. Any other error is not cool, raise it.\n\t\t\t\tif e.errno != errno.ENOENT:\n\t\t\t\t\traise e\n\n\treturn pids", "def get_similar_processes():\n myprocess = get_my_process()\n result = []\n for item in psutil.process_iter():\n try:\n if item.cmdline() == myprocess.cmdline():\n result.append(item)\n except psutil.NoSuchProcess:\n pass\n return result", "def process_exists(name):\n for pid in [pid for pid in os.listdir(\"/proc\") if pid.isdigit()]:\n try:\n exe_name = os.readlink(os.path.join(\"/proc/\", pid, \"exe\"))\n except OSError:\n continue\n if exe_name and exe_name.endswith(os.path.join(\"/\", name)):\n return pid\n return None", "def findProcessIdByName(processName):\n listOfProcessObjects = []\n # Iterate over the all the running process\n for proc in psutil.process_iter():\n try:\n pinfo = proc.as_dict(attrs=[\"pid\", \"name\", \"create_time\"])\n # Check if process name contains the given name string.\n if processName.lower() in pinfo[\"name\"].lower():\n listOfProcessObjects.append(pinfo)\n except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):\n pass\n return listOfProcessObjects", "def check_processes(self, name: Optional[str] = None) -> str:\n\n for process in self.processes:\n if not process.is_running():\n self.processes.remove(process)\n continue\n\n cmdline = \" \".join(process.cmdline())\n port = re.findall(r\"--port=(\\d+)\", cmdline)\n port = port[0] if port else \"\"\n\n if re.findall(r\"-m\\s+.*streamlit_run|streamlit\", cmdline):\n return f\"http://localhost:{port}/{name}\"\n\n return \"\"", "def processCheckPidAndName(uPid, sName):\n fRc = processExists(uPid);\n if fRc is True:\n try:\n from win32com.client import GetObject; # pylint: disable=F0401\n oWmi = GetObject('winmgmts:');\n aoProcesses = oWmi.InstancesOf('Win32_Process');\n for oProcess in aoProcesses:\n if long(oProcess.Properties_(\"ProcessId\").Value) == uPid:\n sCurName = oProcess.Properties_(\"Name\").Value;\n reporter.log2('uPid=%s sName=%s sCurName=%s' % (uPid, sName, sCurName));\n sName = sName.lower();\n sCurName = sCurName.lower();\n if os.path.basename(sName) == sName:\n sCurName = os.path.basename(sCurName);\n\n if sCurName == sName \\\n or sCurName + '.exe' == sName \\\n or sCurName == sName + '.exe':\n fRc = True;\n break;\n except:\n reporter.logXcpt('uPid=%s sName=%s' % (uPid, sName));\n return fRc;", "def get_process(proc_name):\n #LOG = log.getLogger(__name__)\n procList = []\n try:\n for pr in psutil.process_iter():\n for args in pr.cmdline():\n if proc_name in args:\n procList.append(pr.pid)\n return procList\n except BaseException as e:\n print(\"Error in fetching process: {}\".format(e))\n return None", "def match(self, name, args, useName=True, useMd5Digest=True):\n if self._config.name is None:\n return False\n\n # SNMP agents return a 'flexible' number of characters,\n # so exact matching isn't always reliable.\n processName = ('%s %s' % (name, args or '')).strip()\n\n # Make the comparison\n result = self._compiled_regex.search(processName) is not None\n\n # We can a match, but it might not be for us\n if result and useMd5Digest:\n # Compare this arg list against the digest of this proc\n digest = md5(args).hexdigest()\n if self.digest and digest != self.digest:\n result = False\n\n if result and useName:\n cleanNameOnly = globalPrepId(name)\n nameMatch = self._compiled_name_regex.search(cleanNameOnly)\n if not nameMatch or nameMatch.group(1) not in ('', '_'):\n log.debug(\"Discarding match based on name mismatch: %s %s\", \n cleanNameOnly, self._name_only)\n result = False\n\n return result", "def get_process_info(name):\n process_lst = list()\n all_pid = psutil.pids()\n for pid in all_pid:\n info = psutil.Process(pid)\n if name in info.name():\n process_lst.append(info)\n\n return process_lst", "def exe_match(expected_name):\n # expected_name = expected_name.encode('ascii')\n def f(win):\n n = conv(win.process_name)\n return n == expected_name\n return f", "def _complete_processes(self, text):\r\n processes = []\r\n for info in self._get_complete_info():\r\n if ':' in text or info['name'] != info['group']:\r\n processes.append('%s:%s' % (info['group'], info['name']))\r\n if '%s:*' % info['group'] not in processes:\r\n processes.append('%s:*' % info['group'])\r\n else:\r\n processes.append(info['name'])\r\n return [ p + ' ' for p in processes if p.startswith(text) ]", "def list_processes(pid, name):\n \n if not pid and not name:\n rc, out, err = j.sal.process.execute(\"ps ax\")\n click.echo(out)\n elif name:\n click.echo(j.sal.process.psfind(name))\n elif pid:\n click.echo(j.sal.process.getProcessPid(pid))", "def find_program(name):\r\n return name", "def is_proc_running(name):\n\n for p in psutil.process_iter(['name']):\n if p.info['name'] == name:\n return True\n\n return False", "def get_process_id(name):\n child = subprocess.Popen(['pgrep', '-f', name], stdout=subprocess.PIPE, shell=False)\n response = child.communicate()[0]\n return [int(pid) for pid in response.split()]", "def check_ambari_server_process_up(self):\n process_name = \"ambari-server\"\n output = self.__find_process(process_name)\n return re.search(process_name, output)", "def get_matching_pids(pattern):\n cmd = [\"pgrep\", \"-f\", pattern]\n rc, output, err = run_cmd_output(cmd)\n if rc == 0:\n # One or more processes matched\n pids = [int(p) for p in output.split('\\n') if p != \"\"]\n elif rc == 1:\n # No processes matched\n pids = []\n else:\n raise UserVisibleError(\"Failed to run {}\".format(\" \".join(cmd)))\n return pids", "def pidof(processname = None):\n processname = os.path.basename(processname)\n pidpath = os.path.join(pid_path,processname + \".pid\")\n if processname is not None and os.path.exists(pidpath):\n f = open (pidpath)\n pids = f.readlines()\n f.close()\n return pids\n else:\n return False", "def check_process_for_pid(pid, process_name):\n pid = int(pid)\n proc = psutil.Process(pid)\n return proc.name() == process_name", "def get_pid_from_name(process_name:str) -> int:\r\n\tfor process in psutil.process_iter():\r\n\t\tif process_name in process.name():\r\n\t\t\treturn process.pid\r\n\traise ProcessLookupError(\"process '\" + process_name + \"' not found.\")", "def get_pid(name: str) -> Set[int]:\n process_pids = set()\n for proc in psutil.process_iter():\n if name == proc.name():\n pid = proc.pid\n process_pids.add(pid)\n return process_pids", "def matches(self, pid):\n if self._command_wildcards or self._command_regexs:\n # Matchers requiring comm file\n path = P.join(PROC_DIR, str(pid), 'comm')\n try:\n with open(path) as f:\n comm = f.read().rstrip()\n for pattern in self._command_wildcards:\n if fnmatch(comm, pattern):\n return True\n\n for re_obj in self._command_regexs:\n if re_obj.match(comm):\n return True\n except FileNotFoundError:\n # process may have exited before file could be read\n return False\n\n return False", "def kill_process_by_name(name, sig=signal.SIGTERM.value, match_predicate=None):\n\n pids = get_pids(name, match_predicate=match_predicate)\n for pid in pids:\n kill(pid, sig)", "def match(self, string):\n matched = False\n cmd = None\n\n if string in self.commands.keys():\n matched = True\n cmd = string\n\n else:\n for command in self.commands.keys():\n if \"regex\" in self.commands[command].keys() \\\n and re.match(self.commands[command][\"regex\"], string):\n matched = True\n cmd = command\n break\n \n if cmd and len(cmd) > 0:\n self._last_matched_command = cmd\n else:\n self._last_matched_command = None\n\n return matched", "def set_process_name_and_cpu_priority(name):\n try:\n os.nice(19) # smooth cpu priority\n libc = cdll.LoadLibrary(\"libc.so.6\") # set process name\n buff = create_string_buffer(len(name.lower().strip()) + 1)\n buff.value = bytes(name.lower().strip().encode(\"utf-8\"))\n libc.prctl(15, byref(buff), 0, 0, 0)\n except Exception:\n return False # this may fail on windows and its normal, so be silent.\n else:\n log.debug(\"Process Name set to: {0}.\".format(name))\n return True", "def is_running(proc_name:str) -> bool:\r\n with Popen(\"tasklist /NH /FO TABLE\", shell=False, stdout=PIPE) as proc:\r\n rprocs = proc.stdout.read().decode(\"utf-8\")\r\n plist = rprocs.split(\"\\r\\n\")\r\n return(any(i.lower().startswith(proc_name.lower()) for i in plist))", "def __init__(self, name, process_name, regexes):\n\n self.name = name\n self.process_name = process_name\n self.searches = [re.compile(regex).search for regex in regexes]\n self.count = 0", "def test_pick_a_process_to_run(self):\n workflow = self.get_workflow(\n \"\"\"file://C <- file://B\n echo C > C\n echo B creates C\n\nfile://B <- file://A\n echo B > B\n echo A creates B\n \"\"\")\n p = workflow.pick_a_process_to_run()\n assert p.id.find(\"_5\") >= 0, p.id", "def get_pids(process_name, match_predicate=None):\n # default match predicate\n # why aren't we using psutil ??\n def default_predicate(target, given):\n return target.strip().lower() in given.lower()\n\n if match_predicate is None:\n match_predicate = default_predicate\n\n if process_name is None:\n raise j.exceptions.RuntimeError(\"process cannot be None\")\n if j.data.platform.is_unix():\n pids = set()\n for process in get_processes():\n try:\n pid = process.pid\n if not isinstance(pid, int):\n continue\n name = process.name()\n if match_predicate(process_name, name):\n pids.add(pid)\n elif match_predicate(process_name, process.exe()):\n pids.add(pid)\n else:\n cmdline = process.cmdline()\n if cmdline and cmdline[0]:\n if match_predicate(process_name, cmdline[0]):\n pids.add(pid)\n except (psutil.Error, FileNotFoundError):\n continue\n return list(pids)\n else:\n raise j.exceptions.NotImplemented(\"getProcessPid is only implemented for unix\")", "def get_pids(name=None):\n results = []\n for process in win32com.client.GetObject('winmgmts:').InstancesOf('Win32_Process'):\n if name is None or process.Properties_(\"Name\").Value == name:\n results.append(process.Properties_(\"ProcessID\").Value)\n return results", "def get_pid(name):\n try: \n for process in psutil.process_iter():\n try:\n proc = process.as_dict(attrs=['pid', 'name'])\n if name in proc['name']:\n pid = proc['pid']\n logging.info(f\"Found PID {pid} for {name}\")\n return int(pid) \n except (psutil.NoSuchProcess, psutil.AccessDenied , psutil.ZombieProcess) :\n pass \n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)", "def get_processes(sort_by_name=True):\r\n if sort_by_name:\r\n return sorted(_list_processes(), key=cmp_to_key(\r\n lambda p1, p2: (cmp(p1.name, p2.name) or cmp(p1.pid, p2.pid))\r\n ))\r\n else:\r\n return sorted(_list_processes(), key=cmp_to_key(\r\n lambda p1, p2: (cmp(p1.pid, p2.pid) or cmp(p1.name, p2.name))\r\n ))", "def get_process_cmdline(process_name):\n\n\tfor pretendant in execute(['ps', '-U', 'root', '-u', 'root', '-o', 'args='])[0].split(\n\t\t\t\"\\n\")[:-1]:\n\t\t#print pretendant\n\t\tif pretendant.find(process_name) != -1:\n\t\t\treturn pretendant.split(' ')", "def is_process_running(name):\n if not hasattr(is_process_running, \"proc\"):\n is_process_running.proc = None # it doesn't exist yet, so init it\n\n if is_process_running.proc:\n if is_process_running.proc.is_running():\n return True\n else:\n is_process_running.proc = None\n return False\n else:\n for p in psutil.process_iter():\n if p.name() == name:\n is_process_running.proc = p\n return True\n #\n return False", "def get_process_pid(robot_name):\n\n try:\n result = check_output(['pgrep', 'x{0}'.format(robot_name)])\n return int(result.strip())\n except:\n return None", "def killProcessByName(name, user = os.getpid(), sig = None):\n\n pids = findPIDs(name = name, user = user)\n\n if len(pids) == 0:\n #We have no processes to kill of this type\n return pids\n\n command = ['kill']\n if sig:\n command.append('-%i' % sig)\n for pid in pids:\n command.append(pid)\n\n subprocess.Popen(command, stdout=subprocess.PIPE).communicate()[0]\n \n\n return pids", "def killall(name, params=None):\n\n if platform.system() == \"Windows\":\n name += \".exe\"\n\n for ps in psutil.process_iter():\n cmdline = \"\"\n try:\n if ps.name() != name:\n continue\n\n if params:\n cmdline = ps.cmdline()\n except psutil.AccessDenied:\n continue\n\n ps_found = True\n\n if params: # If you want to compare command line\n check_list = []\n\n # Data converting\n if params is list:\n check_list = params\n elif params is str:\n check_list = str.split(\",\")\n else:\n check_list.append(str(params))\n\n # Compare command line's parameters\n for item in check_list:\n ps_found = False\n\n for param in cmdline:\n if param.find(item) != -1:\n ps_found = True\n break\n\n if ps_found is False: # Process is not found.\n break\n\n if ps_found:\n try:\n ps.kill()\n except Exception:\n pass", "def get_pids_filtered_by_regex(regex_list, excludes=None):\n excludes = excludes or []\n res = []\n for process in psutil.process_iter():\n try:\n cmdline = process.cmdline()\n except psutil.NoSuchProcess:\n cmdline = None\n except psutil.AccessDenied:\n cmdline = None\n if cmdline:\n name = \" \".join(cmdline)\n for r in regex_list:\n if name.strip() != \"\" and re.match(r, name):\n res.append(process.pid)\n return res", "async def edit_process(self, ctx, old_name, new_name):\n old_name = self.fix_emoji_escapes(old_name)\n new_name = self.fix_emoji_escapes(new_name)\n if old_name in PROCESSES.values():\n for process in PROCESSES:\n if PROCESSES.get(process) == old_name:\n PROCESSES.update({process: new_name})\n self.update_processes_config()\n\n else:\n await ctx.send(f\"Process name {old_name} doesn't exist\")", "def procExists(self, procname):\n\n proclist = self.getList( 'proclist' ) # safely get copy of process list\n\n count = 0 # count number of occurrences of 'procname'\n for i in proclist:\n command = string.split(i.comm, '/')[-1]\n if command == procname or i.procname == procname:\n count = count + 1\n\n return count", "def getPidByName(process_name):\n \n pid = None\n count = 0\n try:\n hProcessSnap = kernel32.CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0)\n pe32 = PROCESSENTRY32()\n pe32.dwSize = sizeof(PROCESSENTRY32)\n ret = kernel32.Process32First(hProcessSnap , byref(pe32))\n while ret:\n if pe32.szExeFile == LPSTR(process_name).value:\n pid = pe32.th32ProcessID\n count += 1\n ret = kernel32.Process32Next(hProcessSnap, byref(pe32))\n kernel32.CloseHandle (hProcessSnap)\n \n except Exception, e:\n debug_print(str(e))\n \n if not pid:\n debug_print(\"Could not find %s PID\" % process_name)\n \n return pid", "def find_match(name, dictionary):\n if name == '':\n # raise \"Didn't find name\"\n return False\n search_name = (' ').join(name.split(' ')[:-1])\n if search_name in dictionary:\n return search_name\n else:\n return find_match(search_name, dictionary)", "def findMatchingNames(regname, map):\n list = []\n regname += \"$\"\n\n # Find the existing items that match this string\n\n for name in map:\n regexp = re.compile(regname).match(name)\n if regexp:\n list.append(regexp)\n\n return list", "def process_strings(self):\n for string in self.input:\n matcher = self.choose_algorithm()\n matcher.find_match(string, self.case_insensitive)\n self.__results = matcher.results\n\n if self.counter:\n self.__results = matcher.counts\n\n if self.__results:\n self.output(string)", "def get_filtered_pids(filterstr, excludes=None):\n excludes = excludes or []\n cmd = \"ps ax | grep '%s'\" % filterstr\n rc, out, err = j.core.executors.run_local(cmd)\n # print out\n found = []\n\n def checkexclude(c, excludes):\n for item in excludes:\n c = c.lower()\n if c.find(item.lower()) != -1:\n return True\n return False\n\n for line in out.split(\"\\n\"):\n if line.find(\"grep\") != -1 or line.strip() == \"\":\n continue\n if line.strip() != \"\":\n if line.find(filterstr) != -1:\n line = line.strip()\n if not checkexclude(line, excludes):\n # print \"found pidline:%s\"%line\n found.append(int(line.split(\" \")[0]))\n return found", "def process_exists(pid=None, name=None):\n\n return count_processes(pid, name) > 0", "def checkIfProcessRunning(processName):\n # Iterate over the all the running process\n for proc in psutil.process_iter():\n try:\n # Check if process name contains the given name string.\n if processName.lower() in proc.name().lower():\n return True\n except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):\n pass\n return False", "def checkIfProcessRunning(processName):\n # Iterate over the all the running process\n for proc in psutil.process_iter():\n try:\n # Check if process name contains the given name string.\n if processName.lower() in proc.name().lower():\n return True\n except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):\n pass\n return False", "def __getitem__(self, name):\n\n processes = self.getHash( 'nameHash' ) # safely get copy of process name dictionary\n\n try:\n r = processes[name]\n except KeyError:\n r = None # no such process called 'name'\n\n return r", "def __getitem__(self, name):\n\n processes = self.getHash( 'nameHash' ) # safely get copy of process name dictionary\n\n try:\n r = processes[name]\n except KeyError:\n r = None # no such process called 'name'\n\n return r", "def get_procs_count(proc_name):\n procs = subprocess.check_output(['ps','-ef']).splitlines()\n name_procs = [proc for proc in procs if proc_name.encode() in proc]\n return len(name_procs)", "async def remove_process(self, ctx, *name):\n print(name)\n name = self.fix_emoji_escapes(\" \".join(name))\n complete = False\n for process in PROCESSES.keys():\n if PROCESSES.get(process) == name:\n PROCESSES.pop(process)\n self.update_processes_config()\n await ctx.send(f\"The process {name} has been removed\")\n complete = True\n break\n if not complete:\n await ctx.send(f\"The process {name} doesn't exist\")", "def get_process_mapping():\n with open('/proc/{0}/stat'.format(os.getpid())) as f:\n self_tty = f.read().split()[STAT_TTY]\n processes = {}\n for pid in os.listdir('/proc'):\n if not pid.isdigit():\n continue\n try:\n stat = '/proc/{0}/stat'.format(pid)\n cmdline = '/proc/{0}/cmdline'.format(pid)\n with open(stat) as fstat, open(cmdline) as fcmdline:\n stat = re.findall(r'\\(.+\\)|\\S+', fstat.read())\n cmd = fcmdline.read().split('\\x00')[:-1]\n ppid = stat[STAT_PPID]\n tty = stat[STAT_TTY]\n if tty == self_tty:\n processes[pid] = Process(\n args=tuple(cmd), pid=pid, ppid=ppid,\n )\n except IOError:\n # Process has disappeared - just ignore it.\n continue\n return processes", "def restart_pid_check(service_name, ptable_string=None):\n\n @retry_on_exception(5, base_delay=3, exc_type=AssertionError)\n def check_pids_gone(svc_string):\n log(\"Checking no pids for {} exist\".format(svc_string), level=INFO)\n assert(subprocess.call([\"pgrep\", svc_string]) == 1)\n\n if not ptable_string:\n ptable_string = service_name\n service_stop(service_name)\n check_pids_gone(ptable_string)\n service_start(service_name)", "def search_by_string(self):\n print(\"*** String Search ***\\n\")\n print(\"Enter a search string.\\n\")\n print(\"- NAME and NOTE will be searched for all tasks -\")\n print(\"- Searching IS case-sensitive, but partial matches will be returned -\\n\")\n while True:\n try:\n search_string = input(\">>> \")\n results = self.regex_entry_search(search_string)\n except re.error:\n print(\"Couldn't parse search query. Please try again.\")\n else:\n clear_screen()\n print(f\"Found {len(results)} matches for string \\\"{search_string}\\\"...\\n\")\n self.print_selected_entries(results)\n break", "def _parseProcessNames(self, results):\n self.state = ZenProcessTask.STATE_PARSING_TABLE_DATA\n\tif not AS400PLUG in self._device.zCollectorPlugins:\n\t\tif not results or not results[NAMETABLE]:\n \t\traise HostResourceMIBExecption()\n\telse:\n\t\tif not results or not results[AS400NAME]:\n \t\traise HostResourceMIBExecption()\n\n\n if self._preferences.options.captureFilePrefix:\n self.capturePacket(self._devId, results)\n\n showrawtables = self._preferences.options.showrawtables\n args, procs = mapResultsToDicts(showrawtables, results)\n if self._preferences.options.showprocs:\n self._showProcessList(procs)\n return procs", "def bonenamematch(name1, name2):\n if name1 == name2:\n return True\n if name1.startswith(\"Bip01 L \"):\n name1 = \"Bip01 \" + name1[8:] + \".L\"\n elif name1.startswith(\"Bip01 R \"):\n name1 = \"Bip01 \" + name1[8:] + \".R\"\n if name2.startswith(\"Bip01 L \"):\n name2 = \"Bip01 \" + name2[8:] + \".L\"\n elif name2.startswith(\"Bip01 R \"):\n name2 = \"Bip01 \" + name2[8:] + \".R\"\n if name1 == name2:\n return True\n return False", "def find_program(name):\r\n # See MSDN for the REAL search order.\r\n base, ext = os.path.splitext(name)\r\n if ext:\r\n exts = [ext]\r\n else:\r\n exts = ['.bat', '.exe']\r\n for directory in os.environ['PATH'].split(os.pathsep):\r\n for e in exts:\r\n fname = os.path.join(directory, base + e)\r\n if os.path.exists(fname):\r\n return fname\r\n return None", "def match(self, string):\n ary = string.split(' ', len(self.matchers))\n if all(m(a) for m, a in zip(self.matchers, ary)):\n return ary", "def _determineProcessStatus(self, procs):\n beforePids = set(self._deviceStats.pids)\n afterPidToProcessStats = {}\n pStatsWArgsAndSums, pStatsWoArgs = self._splitPStatMatchers()\n for pid, (name, psargs) in procs:\n pStats = self._deviceStats._pidToProcess.get(pid)\n if pStats:\n # We saw the process before, so there's a good\n # chance that it's the same.\n if pStats.match(name, psargs):\n # Yep, it's the same process\n log.debug(\"Found process %d on %s, matching %s %s with MD5\",\n pid, pStats._config.name, name, psargs)\n log.debug(\"%s found existing stat %s %s for pid %s - using MD5\", self._devId, pStats._config.name,\n pStats._config.originalName, pid)\n afterPidToProcessStats[pid] = pStats\n continue\n\n elif pStats.match(name, psargs, useMd5Digest=False):\n # In this case, our raw SNMP data from the\n # remote agent got futzed\n # It's the same process. Yay!\n log.debug(\"%s - Found process %d on %s, matching %s %s without MD5\",\n self._devId, pid, pStats._config.name, name, psargs)\n afterPidToProcessStats[pid] = pStats\n continue\n\n # Search for the first match in our list of regexes\n # that have arguments AND an MD5-sum argument matching.\n # Explicitly *IGNORE* any matchers not modeled by zenmodeler\n for pStats in pStatsWArgsAndSums:\n if pStats.match(name, psargs):\n log.debug(\"%s Found process %d on %s %s\",\n self._devId, pid, pStats._config.originalName, pStats._config.name)\n afterPidToProcessStats[pid] = pStats\n break\n else:\n # Now look for the first match in our list of regexes\n # that don't have arguments.\n for pStats in pStatsWoArgs:\n if pStats.match(name, psargs, useMd5Digest=False):\n log.debug(\"Found process %d on %s\",\n pid, pStats._config.name)\n afterPidToProcessStats[pid] = pStats\n break\n\n afterPids = set(afterPidToProcessStats)\n afterByConfig = reverseDict(afterPidToProcessStats)\n newPids = afterPids - beforePids\n deadPids = beforePids - afterPids\n\n restarted = {}\n for pid in deadPids:\n procStats = self._deviceStats._pidToProcess[pid]\n procStats.discardPid(pid)\n if procStats in afterByConfig:\n ZenProcessTask.RESTARTED += 1\n pConfig = procStats._config\n if pConfig.restart:\n restarted[procStats] = pConfig\n\n # Now that we've found all of the stragglers, check to see\n # what really is missing or not.\n missing = []\n for procStat in self._deviceStats.processStats:\n if procStat not in afterByConfig:\n missing.append(procStat._config)\n\n # For historical reasons, return the beforeByConfig\n beforeByConfig = reverseDict(self._deviceStats._pidToProcess)\n\n return (afterByConfig, afterPidToProcessStats,\n beforeByConfig, newPids, restarted, deadPids,\n missing)", "def check_process_exist(process_name): \n returncode = '' \n try:\n p=os.popen('tasklist /FI \"IMAGENAME eq %s\"' % process_name) \n returncode = p.read().count(process_name) \n if returncode:\n initlog('%s exists' % process_name)\n except Exception, e:\n initlog(str(e)) \n return returncode", "def get_process_list() -> Dict:\n return {proc.pid: proc.name() for proc in psutil.process_iter()}", "def __GetCurrentProcessName(self, G, node):\n prName = \"\"\n \n # Get targets of \"in relations\"\n relsIn = self.GetInOutRelationsForList(G, node, [Strings.in_, Strings.tool])\n inHolders = [rel[1] for rel in relsIn[gc.OutgoingRelations][Strings.in_]]\n \n # Get targets of \"requires\" relations\n reqTargets = set()\n for rel in relsIn[gc.InputRelations][Strings.tool]:\n reqTargets = reqTargets.union(set([r[1] for r in self.GetInOutRelationsForList(G, rel[0], [Strings.req])[gc.OutgoingRelations][Strings.req]]))\n\n # Do a search through \"in\" links\n while len(inHolders) and len(prName) == 0:\n nestedRels = self.GetInOutRelationsForList(G, inHolders[0], [Strings.is_, Strings.in_])\n isNames = [rel[1] for rel in nestedRels[gc.OutgoingRelations][Strings.is_]]\n \n # Once Process is met, return\n if Strings.ndProcess in isNames and inHolders[0] in reqTargets:\n prName = inHolders[0]\n else:\n inHolders += [rel[1] for rel in nestedRels[gc.OutgoingRelations][Strings.in_]]\n del inHolders[0]\n\n return prName", "def get_process_name(pid):\n proc = subprocess.Popen(['ps', '-p', pid, '-o', 'comm='],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out, err=proc.communicate()\n return out.strip().decode('utf-8')", "def find_match(people, STRs):\n for person in people:\n if compare_str(person, STRs):\n return person[\"name\"]\n return \"No match\"", "def _regex_comp(self, name, flist):\n if name in flist:\n return True\n for item in flist:\n p = re.compile(item)\n match = p.match(name)\n if (match is not None):\n return True\n return False", "async def find_processes(self, msg):\n running_processes = []\n new_embed = DEFAULT_EMBED.copy()\n\n for proc in psutil.process_iter():\n if proc.name() in PROCESSES.keys():\n running_processes.append(proc.name())\n elif proc.name() in [\"java.exe\", \"javaw.exe\"] and proc.cwd() in PROCESSES.keys():\n running_processes.append(proc.cwd())\n\n for process in PROCESSES:\n try:\n if process in running_processes:\n new_embed.add_field(name=PROCESSES.get(process),\n value=\"Online <:GreenTick:592083498534174721>\", inline=self.inline)\n else:\n new_embed.add_field(name=PROCESSES.get(process),\n value=\"Offline <:RedCross:592082557961633877>\", inline=self.inline)\n except PermissionError:\n new_embed.add_field(name=PROCESSES.get(process),\n value=\"Admin Required <:OrangeUnknown:592082676891123722>\", inline=self.inline)\n await msg.edit(content=\"\", embed=new_embed)", "def get_matches_commandline_with_children(self, match_pattern):\n\n matched_pids = self.get_matches_commandline(match_pattern)\n for matched_pid in matched_pids:\n matched_pids.extend(self.get_child_processes(matched_pid))\n return list(set(matched_pids))", "def get_a_local_process(self, proc_name=''):\n for p in self.procs.itervalues():\n\n if p.name == proc_name:\n return p\n\n if p.process_type == AGENT_PROCESS_TYPE and p.resource_type == proc_name:\n return p\n\n return None", "def split_name(process_name):\n identifier, box_name = process_name.split(\"-\")\n identifier = int(identifier)\n if Ibox.itersep in box_name:\n box_exec_name = box_name.split(\".\")[0]\n box_iter_name, iteration = box_exec_name.split(Ibox.itersep)\n iteration = int(iteration)\n else:\n box_exec_name = None\n box_iter_name = None\n iteration = None\n return identifier, box_name, box_exec_name, box_iter_name, iteration", "def psa(line):\n from stefco.get_terminal_size import get_terminal_size\n import textwrap\n cmd, paths = _cmd_path_lex(line)\n pids, cmds, procs = _psa(cmd, allmatching=True, paths=paths)\n print(\"Matching processes:\\nPID\\tCOMMAND\\n\" + 80*\"~\" + \"\\n\\n\")\n procdict = dict()\n termwidth = get_terminal_size().columns\n for i, pid in enumerate(pids):\n procdict[pid] = procs[i]\n wrappedcmd = textwrap.wrap(str(cmds[i]), width=(termwidth - 8))\n # print pid on first line of command\n print(\"{}\\t{}\".format(pid, wrappedcmd.pop(0)))\n # print any remaining lines of the command\n if not len(wrappedcmd) == 0:\n print(\"\\t\" + \"\\n\\t\".join(wrappedcmd))\n # print an extra blank line after each process\n print(\"\")\n return procdict", "def _select_processes(self):\n\n # check if at least one process is running\n is_running = False\n for pid in self.__pids:\n if ProcessMonitor.__is_running(pid):\n is_running = True\n break # at least one process is running\n\n if is_running:\n if not self.__aggregate_multiple_processes:\n return self.__pids\n\n # aggregate metrics, check the last discovered time\n if (\n self.__last_discovered\n and time.time() * 1000 - self.__last_discovered\n < self.__process_discovery_interval * 1000\n ):\n return self.__pids\n\n ps = ProcessList()\n if self.__commandline_matcher:\n self.__last_discovered = time.time() * 1000\n if self.__include_child_processes:\n matched_processes = ps.get_matches_commandline_with_children(\n self.__commandline_matcher\n )\n else:\n matched_processes = ps.get_matches_commandline(\n self.__commandline_matcher\n )\n self.__pids = matched_processes\n\n if not self.__aggregate_multiple_processes and len(self.__pids) > 1:\n # old behaviour where multiple processes were not supported for aggregation\n self._logger.warning(\n \"Multiple processes match the command '%s'. Returning existing pid. \"\n \"You can turn on the multi process aggregation support by adding the \"\n \"aggregate_multiple_processes configuration to true\"\n % self.__commandline_matcher,\n limit_once_per_x_secs=300,\n limit_key=\"linux-process-monitor-existing-pid\",\n )\n self.__pids = [self.__pids[0]]\n else:\n # See if the specified target pid is running. If so, then return it.\n # Special cases:\n # '$$' mean this process.\n # '$$TBD' mean that the PID of the target process has not been determined yet and it will be set later.\n pids = []\n if self.__target_pids:\n for t_pid in self.__target_pids:\n if t_pid == \"$$\":\n t_pid = int(os.getpid())\n\n # skip this until it will be replaced with a real PID.\n elif t_pid == \"$$TBD\":\n continue\n else:\n t_pid = int(t_pid)\n pids.append(t_pid)\n self.__pids = pids\n return self.__pids", "def get_processes_running():\r\n p = [] #array of processes\r\n if platform == \"linux\" or platform == \"linux2\":\r\n for proc in psutil.process_iter():\r\n try:\r\n tmp=Process(proc.name(),int(proc.pid),proc.username(),int(0),int(0))\r\n p.append(tmp)\r\n except:\r\n continue\r\n return (p)\r\n\t\t\t\r\n tasks = check_output(['tasklist']).decode('cp866', 'ignore').split(\"\\r\\n\")\r\n for task in tasks:\r\n m = re.match(b'(.*?)\\\\s+(\\\\d+)\\\\s+(\\\\w+)\\\\s+(\\\\w+)\\\\s+(.*?)\\\\s.*', task.encode())\r\n if m is not None:\r\n tmp=Process(m.group(1).decode(),int(m.group(2).decode()),m.group(3).decode(),int(m.group(4).decode()),int(m.group(5).decode('ascii', 'ignore')))\r\n p.append(tmp)\r\n #m.group(1).decode() image name\r\n #m.group(2).decode() process id\r\n #m.group(3).decode() session_name\r\n #m.group(4).decode() session_num\r\n #m.group(5).decode('ascii', 'ignore') memory usage\r\n return(p)", "def _find_running_exe(exe):\n candidates = []\n exe = path.abspath(exe)\n for proc in _get_process_list():\n try:\n pinfo = proc.as_dict(attrs=['pid', 'exe'])\n except psutil.NoSuchProcess:\n pass\n else:\n if pinfo[\"exe\"] and pinfo['exe'] == exe:\n candidates.append(pinfo['pid'])\n return candidates", "def matcher(string):\n rec = re.compile(rexp, re.VERBOSE)\n groups = set(rec.groupindex) # index nos of no interest; discard\n m = rec.search(string)\n if m is None: return None\n # Match succeeded at this point\n # match-data -> Python\n mapped_d = {gname : m.group(gname) for gname in groups}\n # postprocess and done!\n return {k : ppers[k](mapped_d[k]) for k in mapped_d}", "def test_get_application_process(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.applications = {\n 'appli_1': 'first application'}\n self.supervisor.supvisors.context.processes = {\n 'appli_1:proc_1': 'first process'}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test with full namespec\n self.assertTupleEqual(('first application', 'first process'),\n rpc._get_application_process('appli_1:proc_1'))\n # test with applicative namespec\n self.assertTupleEqual(('first application', None),\n rpc._get_application_process('appli_1:*'))", "def procExists(self, procname):\n\n proclist = self.getList( 'proclist' ) # safely get copy of process list\n\n count = 0 # count number of occurrences of 'procname'\n for i in proclist:\n if i.procname == procname:\n count = count + 1\n\n return count", "def check_ambari_server_process_down(self):\n process_name = \"ambari-server\"\n output = self.__find_process(process_name)\n return re.search(process_name, output) is None", "def search(name):\n try:print(f'Searching for {name}...');os.system(f'python -m pip search {name}')\n except Exception as e:print(\"something went wrong\\n{e}\")", "async def _exact_search(self, name, splits_list=None): \n\n # Generates a list if not provided \n if splits_list is None:\n splits_list = await self._get_all_splits()\n\n # Returns index of name, if not found returns -1\n for i in range(len(splits_list)):\n if (splits_list[i][0] == name) and (splits_list[i][1] is not None):\n return i\n return -1", "def fromName(name):\n matches = [nn for nn in instances if nn.name == name]\n if len(matches) != 1:\n raise Exception(\n \"Too many or too few ({}) matches for {}\" \"\".format(len(matches), name)\n )\n return matches[0]", "def find_script(partname, scriptlist=None):\n\n # Helper for scripts that start with 'wp'\n if (partname.startswith('wp') or partname.startswith('^')):\n wpname = None\n else:\n wpname = 'wp{}'.format(partname)\n # Helper for scripts that start with 'update'\n if (partname.startswith('update') or partname.startswith('^')):\n updatename = None\n else:\n updatename = 'update{}'.format(partname)\n\n # only match switches beginning with partname to avoid ambiguous names.\n if not partname.startswith('^'):\n partname = '^{}'.format(partname)\n # compile search pattern into regex\n try:\n # Try compiling a 'wp' pattern if needed\n if wpname:\n repatwp = re.compile('^{}'.format(wpname))\n else:\n repatwp = None\n\n # Try compiling an 'update' pattern if needed.\n if updatename:\n repatupdate = re.compile('^{}'.format(updatename))\n else:\n repatupdate = None\n # Compile the original pattern.\n repat = re.compile(partname)\n except Exception as expat:\n print_fail('Invalid name given!: {}'.format(partname), exc=expat)\n\n # Load scripts if needed\n if scriptlist is None:\n scriptlist = get_scripts()\n if not scriptlist:\n print_fail('\\nUnable to find any scripts!')\n\n # Search scripts\n for scriptpath in scriptlist:\n script = os.path.split(scriptpath)[-1]\n # Match with or without wp\n rematch = repat.search(script)\n if rematch:\n return scriptpath\n else:\n if repatwp:\n # try wpname\n rematch = repatwp.search(script)\n if rematch:\n return scriptpath\n if repatupdate:\n # try updatescript\n rematch = repatupdate.search(script)\n if rematch:\n return scriptpath\n\n # No match\n return None", "def ProcessIterator(pids, process_regex_string, cmdline_regex_string,\n ignore_grr_process, error_list):\n pids = set(pids)\n if ignore_grr_process:\n grr_pid = psutil.Process().pid\n else:\n grr_pid = -1\n\n if process_regex_string:\n process_regex = re.compile(process_regex_string)\n else:\n process_regex = None\n\n if cmdline_regex_string:\n cmdline_regex = re.compile(cmdline_regex_string)\n else:\n cmdline_regex = None\n\n if pids:\n process_iterator = []\n for pid in pids:\n try:\n process_iterator.append(psutil.Process(pid=pid))\n except Exception as e: # pylint: disable=broad-except\n error_list.Append(\n rdf_memory.ProcessMemoryError(\n process=rdf_client.Process(pid=pid), error=str(e)))\n else:\n process_iterator = psutil.process_iter()\n\n for p in process_iterator:\n if process_regex and not process_regex.search(p.name()):\n continue\n\n if cmdline_regex and not cmdline_regex.search(\" \".join(p.cmdline())):\n continue\n\n if p.pid == grr_pid:\n continue\n\n yield p", "def count_processes(pid=None, name=None):\n counter = 0\n for process in win32com.client.GetObject('winmgmts:').InstancesOf('Win32_Process'):\n if ((pid is None or process.Properties_(\"ProcessID\").Value == pid) and\n (name is None or process.Properties_(\"Name\").Value == name)):\n counter += 1\n return counter", "def get_param_re_names(self, param_name_re, syselem):\n\n with self.__connection.cursor() as cursor:\n query = \"SELECT PID FROM %s WHERE NAME LIKE '%s' AND SYSTEM_ELEMENT= '%s'\" % (self.__schema, param_name_re, syselem)\n cursor.execute(query)\n result = cursor.fetchone()\n return result['PID']", "def matches(self, executable, title, handle):\n return True", "def first_part_pid(self,text,pid):\n\n len_max=4\n key_list=pid.keys()\n while 1:\n num=min(len_max,len(text))\n if len_max==0:\n sys.exit('error pid dico not complete or invalid input :'+str([text[:min(3,len(text))]])+'\\\n \\n Complete proc_info.py')\n \n if text[:num].lower() in key_list:\n tag=text[:num].lower()\n text=text[num:]\n return text, pid[tag]\n else:\n len_max+=-1", "def pfind(pid):\n for p in list_foreach(\"allproc\", \"p_list\"):\n if p['p_pid'].cast(gdb.lookup_type(\"int\")) == pid:\n return p\n raise gdb.error(\"No process with pid {} exists\".format(pid))", "def renice(self,process_list,level):\n res = []\n pids = {}\n for process in process_list:\n if hasattr(process,'machine'):\n try:\n worker = self.worker_by_name[process.machine]\n except KeyError:\n worker = self.worker_by_name[process.long_machine]\n pid = process.pid\n else:\n worker = self.workers[process[0]]\n pid = process[1]\n try:\n pids[worker] = pids[worker] + ' ' + str(pid)\n except:\n pids[worker] = str(pid)\n for worker,value in pids.items():\n arg = 'renice %d -p %s' % (level,value)\n res.append(worker.apply(os.system,(arg,)))\n return res", "def processNames(self):\n # MODIFIED 11/1/16 OLD:\n return list(item.process.name for item in self.process_tuples)\n # # MODIFIED 11/1/16 NEW:\n # return sorted(list(item.process.name for item in self.process_tuples))\n # MODIFIED 11/1/16 END", "def find_executable(name, paths):\n for path in paths:\n full_path = os.path.join(path, name)\n if os.path.isfile(full_path):\n return full_path\n # If not found, just assume it's in the PATH.\n return name", "def find_item_by_name(list_, namegetter, name):\n matching_items = [i for i in list_ if namegetter(i) == name]\n if len(matching_items) == 0:\n prog = re.compile(re.escape(name) + '$', re.IGNORECASE)\n matching_items = [i for i in list_ if prog.match(namegetter(i))]\n if len(matching_items) == 0:\n prog = re.compile(re.escape(name))\n matching_items = [i for i in list_ if prog.match(namegetter(i))]\n if len(matching_items) == 0:\n prog = re.compile(re.escape(name), re.IGNORECASE)\n matching_items = [i for i in list_ if prog.match(namegetter(i))]\n if len(matching_items) == 0:\n prog = re.compile(re.escape(name))\n matching_items = [i for i in list_ if prog.search(namegetter(i))]\n if len(matching_items) == 0:\n prog = re.compile(re.escape(name), re.IGNORECASE)\n matching_items = [i for i in list_ if prog.search(namegetter(i))]\n return matching_items", "def match_specific_name(name: str, specific_names: list) -> str:\n c = clean_specific_name(name)\n if c == \"\":\n return c\n else:\n y = \"\"\n for x in specific_names:\n matchlist = x.variations.split(\";\")\n if c in matchlist:\n y = x.name\n return y", "def lookForQueueingCommands():\n for queue, binary in queueBinaryMap.items():\n if checkForBinary(binary):\n return queue\n else:\n raise Exception(\"Cannot locate a queueing system. None of these executables were found in your PATH: %s\" % (queueBinaryMap.values(),))", "def pids():\n stream = os.popen(\"ps aux | grep '[m]itm' | awk '{print $2}'\")\n return stream.read()" ]
[ "0.67669374", "0.6710715", "0.66695976", "0.64684063", "0.64647824", "0.6255509", "0.60690784", "0.6056705", "0.60546", "0.595144", "0.59447294", "0.5906793", "0.5887537", "0.58319217", "0.58199763", "0.57570976", "0.5753294", "0.572602", "0.57223743", "0.5722138", "0.5618653", "0.5584384", "0.55766284", "0.55507064", "0.5537412", "0.5515952", "0.54602623", "0.544717", "0.5399602", "0.5394458", "0.53901285", "0.5388189", "0.53801703", "0.5360252", "0.53542805", "0.5288566", "0.5285713", "0.52752906", "0.5260171", "0.5221359", "0.520435", "0.5166715", "0.5146207", "0.511513", "0.5114305", "0.5101754", "0.5071015", "0.50602907", "0.5058819", "0.5056082", "0.50518066", "0.50511444", "0.50511444", "0.50402915", "0.50402915", "0.50375843", "0.50323826", "0.50286835", "0.5020249", "0.5018165", "0.5009922", "0.50007135", "0.4997392", "0.4992876", "0.49747574", "0.49703586", "0.49424526", "0.49358118", "0.49344513", "0.49328366", "0.49167526", "0.49134514", "0.49121982", "0.48997656", "0.48932317", "0.48917794", "0.48701176", "0.48597485", "0.484704", "0.48443124", "0.48254985", "0.48055777", "0.4803746", "0.4795139", "0.478386", "0.47818872", "0.4779897", "0.47793984", "0.47783822", "0.47769228", "0.47723013", "0.4762578", "0.47623327", "0.47618937", "0.47546086", "0.47488505", "0.47286424", "0.47284922", "0.47039154", "0.46996436" ]
0.611642
6
Given a process id, return all children processes (recursively)
def get_child_processes(self, ppid): all_children = [] children_to_explore = set() for _pid in self.parent_to_children_map[ppid]: all_children.append(_pid) children_to_explore.add(_pid) # get the children 'recursively' while children_to_explore: # the invariant child_to_explore = children_to_explore.pop() if not self.parent_to_children_map.get(child_to_explore): continue unvisited = self.parent_to_children_map[child_to_explore] for node in unvisited: if node not in all_children: children_to_explore.add(node) all_children.append(node) return list(set(all_children))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_children(ppid):\n\n pid_dct = {}\n for proc in build_process_list():\n proc[\"_children\"] = []\n pid_dct[proc[\"pid\"]] = proc\n\n # fill in children array\n for pid in list(pid_dct.keys()):\n parent_pid = pid_dct[pid][\"parent_pid\"]\n\n if parent_pid in pid_dct:\n pid_dct[parent_pid][\"_children\"].append(pid)\n\n # now just walk down the tree\n if ppid is None or ppid not in pid_dct:\n # process has quit, we exit\n return []\n\n accepted = []\n to_accept = collections.deque([ppid, ])\n \n while to_accept:\n head = pid_dct[to_accept.popleft()]\n\n # do not include the monitoring pid\n if head[\"pid\"] != ppid:\n accepted.append(head)\n\n to_accept.extend(head.get(\"_children\", []))\n head[\"children\"] = head[\"_children\"]\n del head[\"_children\"]\n\n # deleting children breaks infinite loops\n # but Dima, can a process tree contain a loop? yes - via race-condition in reading procfs\n\n return accepted", "def get_children(pid):\n try:\n stdout=subprocess.check_output([\"ps\",\"--ppid\",pid,\"-o\",\"pid\"])\n except subprocess.CalledProcessError:\n stdout=[]\n\n pids=[]\n if stdout:\n pids=process_ps_stdout(stdout)\n\n return pids", "def get_child_pids(pid):\n\n wmi = win32com.client.GetObject('winmgmts:')\n # noinspection SqlNoDataSourceInspection,SqlDialectInspection\n children = wmi.ExecQuery('SELECT * FROM Win32_Process WHERE ParentProcessID = %s' % pid)\n return [child.Properties_('ProcessId').Value for child in children]", "def find_all_child_processes(pids_only=False):\n children = psutil.Process().children(recursive=True)\n return [child.pid for child in children] if pids_only else children", "def get_pids(pid):\n\n pids=set([pid])\n for child in get_children(pid):\n pids.update(traverse_tree(child,pids))\n \n return list(pids)", "def Children( cls, pid ):\n\t\tres = []\n\t\tpid = int(pid)\n\t\tfor cpid, cmd in cls.List().items():\n\t\t\tppid = int(cls.Status(cpid)[\"ppid\"])\n\t\t\tif ppid == pid:\n\t\t\t\tres.append( (cpid, None, cmd))\n\t\treturn res", "def children_of(self, pid, all=False):\r\n self._raise_unless_has_pid(pid)\r\n if all:\r\n all_children = set()\r\n self._calculate_children(pid, all_children)\r\n return all_children\r\n else:\r\n return copy(self._pid_to_children[pid])", "def kill_process_children(pid):\n root_process_path = \"/proc/{pid}/task/{pid}/children\".format(pid=pid)\n if not os.path.isfile(root_process_path):\n return\n with open(root_process_path) as children_list_file:\n children_list_pid = children_list_file.read().split()\n\n for child_pid in children_list_pid:\n children_proc_path = \"/proc/%s/task/%s/children\" % (\n child_pid,\n child_pid,\n )\n if not os.path.isfile(children_proc_path):\n continue\n with open(children_proc_path) as children_list_file_2:\n children_list_pid_2 = children_list_file_2.read().split()\n for _pid in children_list_pid_2:\n try:\n os.kill(int(_pid), signal.SIGTERM)\n except ProcessLookupError:\n continue\n try:\n os.kill(int(child_pid), signal.SIGTERM)\n except ProcessLookupError:\n continue", "def getChildPIDs(self):\n\t\treturn self.pids", "def get_children(self, go_id=None):\n rec = self.dict_go[go_id]\n set_parents = rec.get_all_children()\n return set_parents", "def collect_children(self):\n\t\twhile self.active_children:\n\t\t\tif len(self.active_children) < self.max_children:\n\t\t\t\toptions = os.WNOHANG\n\t\t\telse:\n\t\t\t\t# If the maximum number of children are already\n\t\t\t\t# running, block while waiting for a child to exit\n\t\t\t\toptions = 0\n\t\t\ttry:\n\t\t\t\tpid, status = os.waitpid(0, options)\n\t\t\texcept os.error:\n\t\t\t\tpid = None\n\t\t\tif not pid: break\n\t\t\tself.active_children.remove(pid)", "def get_child_elements_by_id(self, id):\n for item in self._elements:\n if item.get_parent_id() == id:\n yield item", "def setChildPIDs(self):\n\t\tpids = []\n\t\tfor proc in process_iter():\n\t\t\tfor child in self.expectedChildren:\n\t\t\t\tif child == proc.name():\n\t\t\t\t\tif proc.parent().name() == \"Python\": # Hardcoded string comparison. Sue me.\n\t\t\t\t\t\tpids.append(proc.pid)\n\t\tself.pids = pids", "def get_child_ids(id,conn):\n\n child_ids = ('WITH RECURSIVE children AS '\n '(SELECT subject_id '\n 'FROM cvterm_relationship '\n 'WHERE object_id = %s '\n 'UNION '\n 'SELECT cr.subject_id '\n 'FROM cvterm_relationship cr '\n 'INNER JOIN children ch ON ch.subject_id = cr.object_id) '\n 'SELECT * FROM children')\n ids = connect(child_ids,id,conn)\n list_of_ids = []\n for item in ids:\n list_of_ids.append(item[0])\n return(list_of_ids)", "def do_select_children(self, node_id):\n try:\n _children = self.tree.children(node_id)\n except NodeIDAbsentError:\n _children = None\n\n return _children", "async def get_child_ids(db, post_id):\n sql = \"SELECT id FROM hive_posts WHERE parent_id = :id AND is_deleted = '0'\"\n return await db.query_col(sql, id=post_id)", "def get_child_package(id):\n\n relationships = []\n try:\n relationships = p.toolkit.get_action(\"package_relationships_list\")(\n data_dict={\"id\": id, \"rel\": \"parent_of\"}\n )\n except Exception, e:\n return {}\n\n children = []\n if relationships:\n for rel in relationships:\n try:\n access = p.toolkit.check_access(\n \"package_show\",\n context={\"user\": c.user},\n data_dict={\"id\": rel[\"object\"]},\n )\n child = p.toolkit.get_action(\"package_show\")(\n data_dict={\"id\": rel[\"object\"]}\n )\n children.append(child)\n except:\n pass\n return children", "def get_matches_commandline_with_children(self, match_pattern):\n\n matched_pids = self.get_matches_commandline(match_pattern)\n for matched_pid in matched_pids:\n matched_pids.extend(self.get_child_processes(matched_pid))\n return list(set(matched_pids))", "def list_processes(pid, name):\n \n if not pid and not name:\n rc, out, err = j.sal.process.execute(\"ps ax\")\n click.echo(out)\n elif name:\n click.echo(j.sal.process.psfind(name))\n elif pid:\n click.echo(j.sal.process.getProcessPid(pid))", "def traverse_tree(pid,nodes):\n\n for child in get_children(pid):\n nodes.update(traverse_tree(child,nodes))\n nodes.add(pid)\n\n return nodes", "def kill_children(timeout=1) -> List[psutil.Process]:\n procs = child_manager.children_pop_all()\n for p in procs:\n try:\n p.terminate()\n except psutil.NoSuchProcess:\n pass\n gone, alive = psutil.wait_procs(procs, timeout=timeout)\n for p in alive:\n logger.warning(\"Cleaning up child: %d\", p.pid)\n p.kill()\n return alive", "def get_jobs_by_process_id(self, process_id):\n\n jobs = list()\n for job in Job.objects.filter(process=process_id):\n jobs.append(job)\n return jobs", "def get_processes():\n yield from psutil.process_iter()", "def pslist(self) -> Generator[dict, None, None]:\n\n # Function to switch fields to represent a parent\n def _convert_to_parent_fields(process: dict) -> dict:\n output = {}\n for left, right in [\n (FieldNames.PROCESS_IMAGE, FieldNames.PARENT_PROCESS_IMAGE),\n (FieldNames.PROCESS_ID, FieldNames.PARENT_PROCESS_ID),\n (FieldNames.COMMAND_LINE, FieldNames.PARENT_COMMAND_LINE),\n (FieldNames.PROCESS_IMAGE_PATH, FieldNames.PARENT_PROCESS_IMAGE_PATH),\n ]:\n output[right] = process[left]\n\n return output\n\n # Use the pstree dict output to get a mapping from pid -> proc\n procs = self.session.plugins.pstree()._make_process_dict()\n\n parent_procs: Dict[int, dict] = {}\n\n # Add the system idle process\n parent_procs[0] = {\n FieldNames.PARENT_PROCESS_ID: 0,\n FieldNames.PARENT_COMMAND_LINE: \"\",\n FieldNames.PARENT_PROCESS_IMAGE: \"System Idle Process\",\n FieldNames.PARENT_PROCESS_IMAGE_PATH: \"\\\\\",\n }\n\n for proc in procs.values():\n\n parent_pid = proc.InheritedFromUniqueProcessId\n\n # Get the current processes info\n command_line = str(proc.Peb.ProcessParameters.CommandLine)\n image_path = str(proc.Peb.ProcessParameters.ImagePathName)\n\n if int(proc.pid) == 4:\n process_image = \"SYSTEM\"\n process_image_path = \"\\\\\"\n else:\n process_image, process_image_path = split_path(image_path)\n\n current_proc = {\n FieldNames.EVENT_TYPE: EventTypes.PROCESS_LAUNCHED,\n FieldNames.PROCESS_ID: int(proc.pid),\n FieldNames.COMMAND_LINE: command_line,\n FieldNames.PROCESS_IMAGE: process_image,\n FieldNames.PROCESS_IMAGE_PATH: process_image_path,\n }\n\n # Keep track of the processes.\n self.processes[int(proc.pid)] = current_proc\n\n current_as_parent = _convert_to_parent_fields(current_proc)\n parent_procs[int(proc.pid)] = current_as_parent\n\n # Parse the parent process\n if parent_pid not in parent_procs:\n\n # Do we the _EPROCESS for this process?\n if int(parent_pid) in procs:\n parent = procs[int(parent_pid)]\n parent_image_path = parent.Peb.ProcessParameters.ImagePathName\n\n parent_process_image, parent_process_image_path = split_path(\n str(parent_image_path)\n )\n\n parent_proc = {\n FieldNames.PARENT_PROCESS_ID: int(parent.pid),\n FieldNames.PARENT_COMMAND_LINE: parent.Peb.ProcessParameters.CommandLine,\n FieldNames.PARENT_PROCESS_IMAGE: parent_process_image,\n FieldNames.PARENT_PROCESS_IMAGE_PATH: parent_process_image_path,\n }\n\n # If not, make a dummy one with the PID\n else:\n parent_proc = {\n FieldNames.PARENT_PROCESS_ID: int(parent_pid),\n FieldNames.PARENT_COMMAND_LINE: \"\",\n FieldNames.PARENT_PROCESS_IMAGE: \"\",\n FieldNames.PARENT_PROCESS_IMAGE_PATH: \"\",\n }\n\n parent_procs[int(parent_pid)] = parent_proc\n\n yield {**current_proc, **parent_procs[int(parent_pid)]}", "def process_iter():\r\n def add(pid):\r\n proc = Process(pid)\r\n _pmap[proc.pid] = proc\r\n return proc\r\n\r\n def remove(pid):\r\n _pmap.pop(pid, None)\r\n\r\n a = set(get_pid_list())\r\n b = set(_pmap.keys())\r\n new_pids = a - b\r\n gone_pids = b - a\r\n\r\n for pid in gone_pids:\r\n remove(pid)\r\n for pid, proc in sorted(list(_pmap.items()) + \\\r\n list(dict.fromkeys(new_pids).items())):\r\n try:\r\n if proc is None: # new process\r\n yield add(pid)\r\n else:\r\n # use is_running() to check whether PID has been reused by\r\n # another process in which case yield a new Process instance\r\n if proc.is_running():\r\n yield proc\r\n else:\r\n yield add(pid)\r\n except NoSuchProcess:\r\n remove(pid)\r\n except AccessDenied:\r\n # Process creation time can't be determined hence there's\r\n # no way to tell whether the pid of the cached process\r\n # has been reused. Just return the cached version.\r\n yield proc", "def children(parent, data):\n\n kids = []\n for pid in data:\n if data[pid][\"parentId1\"] == parent or data[pid][\"parentId2\"] == parent:\n kids.append(pid)\n\n return kids", "def get_children(self, node_id: np.uint64) -> np.ndarray:\n children = self.read_row(node_id, \"children\", dtype=np.uint64)\n\n if children is None:\n return np.empty(0, dtype=np.uint64)\n else:\n return children", "def find_child_containers(self, parent_id: str) -> list:\n try:\n return self.docker.containers.list(\n filters={\n 'label': f'{LABEL_PARENT_ID}={parent_id}',\n },\n )\n\n except requests.exceptions.ConnectionError:\n raise ProviderError('Docker engine unavailable')", "def get_pids(name=None):\n results = []\n for process in win32com.client.GetObject('winmgmts:').InstancesOf('Win32_Process'):\n if name is None or process.Properties_(\"Name\").Value == name:\n results.append(process.Properties_(\"ProcessID\").Value)\n return results", "def get_pid(name: str) -> Set[int]:\n process_pids = set()\n for proc in psutil.process_iter():\n if name == proc.name():\n pid = proc.pid\n process_pids.add(pid)\n return process_pids", "def get_process_info(name):\n process_lst = list()\n all_pid = psutil.pids()\n for pid in all_pid:\n info = psutil.Process(pid)\n if name in info.name():\n process_lst.append(info)\n\n return process_lst", "def processes(self):\n nodes = (self.nodes.exclude(process__isnull=True)\n .values_list('process_id', flat=True))\n return Process.objects.filter(id__in=nodes).distinct()", "def children(self):\n if self._pedigree is None:\n raise Exception(\"Pedigree is not defined\")\n return [self._pedigree.individual(pid) for pid in sorted(self._children_ids, key=self._sort_by_birth)]", "def get_pids(process_name, match_predicate=None):\n # default match predicate\n # why aren't we using psutil ??\n def default_predicate(target, given):\n return target.strip().lower() in given.lower()\n\n if match_predicate is None:\n match_predicate = default_predicate\n\n if process_name is None:\n raise j.exceptions.RuntimeError(\"process cannot be None\")\n if j.data.platform.is_unix():\n pids = set()\n for process in get_processes():\n try:\n pid = process.pid\n if not isinstance(pid, int):\n continue\n name = process.name()\n if match_predicate(process_name, name):\n pids.add(pid)\n elif match_predicate(process_name, process.exe()):\n pids.add(pid)\n else:\n cmdline = process.cmdline()\n if cmdline and cmdline[0]:\n if match_predicate(process_name, cmdline[0]):\n pids.add(pid)\n except (psutil.Error, FileNotFoundError):\n continue\n return list(pids)\n else:\n raise j.exceptions.NotImplemented(\"getProcessPid is only implemented for unix\")", "def get_serials_by_child_recid(recid):\n search = SeriesSearch().query(\n 'bool',\n filter=[\n Q('term', mode_of_issuance='SERIAL'),\n Q('term', _migration__children=recid),\n ]\n )\n for hit in search.scan():\n yield Series.get_record_by_pid(hit.pid)", "def reap_children(children, config, logger):\n to_delete = []\n current_time = time.time()\n for eventid, info in children.items():\n returncode = info['popen'].poll()\n if returncode is not None:\n logger.info('Reaped child for event %s (return code %d)' %\n (eventid, returncode))\n to_delete.append(eventid)\n continue\n #\n # Kill children who take too long\n #\n if info['start_time'] + config['max_process_time'] < current_time:\n logger.warning('Event %s taking too long, killing' % eventid)\n info['popen'].kill()\n info['popen'].wait()\n logger.warning('Reaped child for killed event %s' % eventid)\n to_delete.append(eventid)\n\n for eventid in to_delete:\n del children[eventid]\n\n return", "def get_running_processes(self):\n\n all_processes = []\n for _process in self.processes:\n all_processes.append(_process[\"pid\"])\n return all_processes", "def children(self, path):\n url = u'/'.join(\n [self.conf[\"api\"], \"path\", escape_path(path).strip('/'), \"@children\"])\n params = {}\n self.logger.info(path)\n self.logger.debug(url)\n return self._get_iter(url, params)", "def get_pid_list():\r\n pids = [int(x) for x in os.listdir('/proc') if x.isdigit()]\r\n return pids", "def remove_child_nodes(self, id):\r\n children = self.get_node_by_id(id).children\r\n self.nodes = [ n for n in self.nodes if n.id!=id ]\r\n if len(children)>0:\r\n for c in children:\r\n self.remove_child_nodes(c.id)", "def pid_processes(self):\n return [(process.namespec(), process.infos[self.address_name]['pid'])\n for process in self.processes.values()\n if process.pid_running_on(self.address_name)]", "def children_recursive(self, i):\n result = []\n for child in self.children(i):\n result += [child] + self.children_recursive(child)\n return result", "def children_ids(self):\n return self._children_ids", "def get_asset_children(self, asset_id):\n endpoint = '/assets/{}/children'.format(asset_id)\n return self._api_call('get', endpoint)", "def terminate_process_and_children(self, name):\n if name not in self.jobs:\n print(\"[%s] does not exist as a process!\", name)\n ppid = self.jobs[name]['process'].pid\n try:\n parent_proc = psutil.Process(ppid)\n except psutil.NoSuchProcess:\n return\n children = parent_proc.children(recursive=True)\n for proc in children:\n l.debug(proc)\n try:\n proc.send_signal(signal.SIGKILL)\n except:\n pass", "def kill_proc_tree(pid, including_parent=True):\n parent = psutil.Process(pid)\n for child in parent.children(recursive=True):\n child.kill()\n if including_parent:\n parent.kill()", "def get_all_children(self):\n all_children = set()\n for parent in self.children:\n all_children.add(parent.id)\n all_children |= parent.get_all_children()\n return all_children", "def pids(self):\n return self._pidToProcess.iterkeys()", "def get_child_ids(cur, node):\n sql = \"\"\"\n SELECT\n id\n FROM\n nodes\n WHERE\n parent=%s\n ORDER BY\n position;\n \"\"\"\n cur.execute(sql, (str(node), ))\n for result in cur:\n yield str(result['id'])", "def get_process_list() -> Dict:\n return {proc.pid: proc.name() for proc in psutil.process_iter()}", "def destroy_children(self, parent_id: str) -> list:\n try:\n children = self.find_child_containers(parent_id)\n\n tasks = []\n for child in children:\n tasks += self.destroy(child.labels[LABEL_TASK_ID])\n\n return tasks\n\n except requests.exceptions.ConnectionError:\n raise ProviderError('Docker engine unavailable')", "def kill(pids):\n for pid in pids:\n process = psutil.Process(pid)\n for proc in process.children(recursive=True):\n proc.kill()\n process.kill()\n return", "def getChildren(self, path):\n \n self._sharedState.lock.acquire()\n try:\n try:\n self.update(path)\n children = list()\n entries = self._client.list(self._workingCopyPath + path, recurse=False)\n for entry in entries:\n entryPath = entry[0].path[self._workingPathLength:]\n formerEntry = self._sharedState.getFromCache(path)\n if formerEntry is None:\n newEntry = _Info(entry[0])\n else:\n newEntry = _Info(entry[0])\n newEntry.logMessage = formerEntry.logMessage # creation date and owner do not change\n self._sharedState.addToCache(entryPath, newEntry)\n children.append(entryPath)\n del children[0] # First item is always the queried path\n return children\n except ClientError, error:\n raise SubversionError(error)\n finally:\n self._sharedState.lock.release()", "def get_children(self):\r\n\r\n if not self.has_children:\r\n return []\r\n\r\n if getattr(self, '_child_instances', None) is None:\r\n self._child_instances = [] # pylint: disable=attribute-defined-outside-init\r\n for child_loc in self.children:\r\n try:\r\n child = self.runtime.get_block(child_loc)\r\n child.runtime.export_fs = self.runtime.export_fs\r\n except ItemNotFoundError:\r\n log.exception(u'Unable to load item {loc}, skipping'.format(loc=child_loc))\r\n continue\r\n self._child_instances.append(child)\r\n\r\n return self._child_instances", "def kill_child_processes(parent_pid, sig=signal.SIGTERM):\n try:\n parent = psutil.Process(parent_pid)\n except psutil.NoSuchProcess:\n return\n children = parent.children(recursive=True)\n for process in children:\n try:\n process.send_signal(sig)\n except psutil.NoSuchProcess:\n return", "def get_nodes_for_process(self, uuid, clean=True):\n if clean:\n uuid = Process.strip_uuid(uuid)\n return self._get_tree_queryset().filter(process__uuid_full__startswith=uuid)", "def ListProcesses(self):\n stdout, stderr = self.RunCmdOnDevice(\n ['/bin/ps', '--no-headers', '-A', '-o', 'pid,ppid,args:4096,state'],\n quiet=True)\n assert stderr == '', stderr\n procs = []\n for l in stdout.split('\\n'):\n if l == '':\n continue\n m = re.match(r'^\\s*(\\d+)\\s+(\\d+)\\s+(.+)\\s+(.+)', l, re.DOTALL)\n assert m\n procs.append(\n (int(m.group(1)), m.group(3).rstrip(), int(m.group(2)), m.group(4)))\n logging.debug(\"ListProcesses(<predicate>)->[%i processes]\" % len(procs))\n return procs", "def ListProcesses(self):\n stdout, stderr = self.RunCmdOnDevice(\n [\n '/bin/ps', '--no-headers', '-A', '-o', 'pid,ppid,args:4096,state'\n ],\n quiet=True)\n assert stderr == '', stderr\n procs = []\n for l in stdout.split('\\n'):\n if l == '':\n continue\n m = re.match(r'^\\s*(\\d+)\\s+(\\d+)\\s+(.+)\\s+(.+)', l, re.DOTALL)\n assert m\n procs.append((int(m.group(1)), m.group(3).rstrip(), int(m.group(2)),\n m.group(4)))\n logging.debug(\"ListProcesses(<predicate>)->[%i processes]\" % len(procs))\n return procs", "def children(self):\n return self.hashring_watch.get_children()", "def children(self, node):\n for child_id, _ in self.edges[node.identifier].items():\n yield self._id2node[child_id]", "def fork_children(self, target, amount):\n for _ in range(amount):\n p = Process(target=target)\n p.start()", "def _get_children(self, x):\n try:\n return x._pfp__children\n\n except AttributeError:\n return []", "def get_child_tasks(self) -> List[\"TaskNode\"]:\n r = self.agent_memory.get_triples(pred_text=\"_has_parent_task\", obj=self.memid)\n memids = [m for m, _, _ in r]\n return [TaskNode(self.agent_memory, m) for m in memids]", "def PIDs():\n from ctypes import windll,c_ulong,byref,sizeof\n PIDs = (c_ulong*512)()\n size_of_PIDs = c_ulong()\n windll.psapi.EnumProcesses(byref(PIDs),sizeof(PIDs),byref(size_of_PIDs))\n nPIDs = size_of_PIDs.value/sizeof(c_ulong())\n pidProcess = sorted([int(i) for i in PIDs][:nPIDs])\n return pidProcess", "def get_children(self):\n std = self._std\n bld = self._bld\n cls = self.__class__\n\n root = self.get_sobj()\n cit = std.NewChildIterator(root)\n cit.InitEx(0)\n\n children = []\n while cit.More():\n node = cls(std, bld, cit.Value().GetID(), self)\n if node.is_alive():\n children.append(node)\n cit.Next()\n return children", "def get_object_childs(self, obj_name):\n index = 0\n children_list = []\n child = 0\n parent_handle = self.get_object_handle(obj_name)\n while child != -1:\n res, child = vrep.simxGetObjectChild(self.client_id, parent_handle, index, vrep.simx_opmode_blocking)\n if res == vrep.simx_return_ok:\n children_list.append(child)\n index = index + 1\n else:\n print('Remote fucntion get_object_childs call failed.')\n return []\n del children_list[len(children_list) - 1]\n return children_list", "def get_all_children_seq(self):\n results = []\n queue = []\n children = self.get_immediate_children()\n results.extend(children)\n queue.extend(children)\n while len(queue) > 0:\n node = queue.pop()\n children = node.get_immediate_children()\n results.extend(children)\n queue.extend(children)\n return results", "def get_processes_running():\r\n p = [] #array of processes\r\n if platform == \"linux\" or platform == \"linux2\":\r\n for proc in psutil.process_iter():\r\n try:\r\n tmp=Process(proc.name(),int(proc.pid),proc.username(),int(0),int(0))\r\n p.append(tmp)\r\n except:\r\n continue\r\n return (p)\r\n\t\t\t\r\n tasks = check_output(['tasklist']).decode('cp866', 'ignore').split(\"\\r\\n\")\r\n for task in tasks:\r\n m = re.match(b'(.*?)\\\\s+(\\\\d+)\\\\s+(\\\\w+)\\\\s+(\\\\w+)\\\\s+(.*?)\\\\s.*', task.encode())\r\n if m is not None:\r\n tmp=Process(m.group(1).decode(),int(m.group(2).decode()),m.group(3).decode(),int(m.group(4).decode()),int(m.group(5).decode('ascii', 'ignore')))\r\n p.append(tmp)\r\n #m.group(1).decode() image name\r\n #m.group(2).decode() process id\r\n #m.group(3).decode() session_name\r\n #m.group(4).decode() session_num\r\n #m.group(5).decode('ascii', 'ignore') memory usage\r\n return(p)", "def get_children(self):\n children = []\n for i in self.children_ids:\n child = Comment(self.articleID, self.children_ids[i])\n children.append(child)\n children.extend(child.get_children())\n return children", "def get_all_children_id_list_from_redis_by_pk(gmac_id):\n try:\n gmac = GoogleMapsAddressComponent.objects.get(pk=gmac_id)\n conn = get_redis_connection()\n key = GoogleMapsAddressComponent.get_redis_all_children_key(gmac_id)\n length = conn.llen(key)\n return conn.lrange(key, 0, length)\n except GoogleMapsAddressComponent.DoesNotExist:\n return None", "def get_processes(sort_by_name=True):\r\n if sort_by_name:\r\n return sorted(_list_processes(), key=cmp_to_key(\r\n lambda p1, p2: (cmp(p1.name, p2.name) or cmp(p1.pid, p2.pid))\r\n ))\r\n else:\r\n return sorted(_list_processes(), key=cmp_to_key(\r\n lambda p1, p2: (cmp(p1.pid, p2.pid) or cmp(p1.name, p2.name))\r\n ))", "def pidof(process_name):\n\n\tpids = []\n\n\tif 'licornd' in process_name:\n\t\t# licorn / linux 3.x specifiq : we can match 'licornd/wmi'\n\t\t# faster than 'licornd-wmi', and in some case the 'cmdline'\n\t\t# is empty, whereas the 'comm' is not.\n\t\tnames = [ process_name, process_name.replace('/', '-') ]\n\n\telse:\n\t\tnames = [ process_name ]\n\n\tfor entry in os.listdir('/proc'):\n\t\tif entry.isdigit():\n\t\t\ttry:\n\n\t\t\t\tif cgroup and open('/proc/%s/cpuset' % entry).read().strip() != cgroup:\n\t\t\t\t\tlogging.progress(_(u'Skipped process @{0} which is not '\n\t\t\t\t\t\t\t\t\t\tu'in the same cgroup.').format(entry))\n\t\t\t\t\tcontinue\n\n\t\t\t\ttry:\n\t\t\t\t\t# Linux 3.x only\n\t\t\t\t\tcommand_line1 = open('/proc/%s/comm' % entry).read().strip()\n\t\t\t\texcept:\n\t\t\t\t\tcommand_line1 = ''\n\n\t\t\t\tcommand_line2 = open('/proc/%s/cmdline' % entry).read().strip()\n\n\t\t\t\tfor pname in names:\n\t\t\t\t\tif pname == command_line1 or pname+'\\0' in command_line2:\n\t\t\t\t\t\tpids.append(int(entry))\n\n\t\t\texcept (IOError, OSError), e:\n\t\t\t\t# in rare cases, the process vanishes during iteration. This\n\t\t\t\t# is harmless. Any other error is not cool, raise it.\n\t\t\t\tif e.errno != errno.ENOENT:\n\t\t\t\t\traise e\n\n\treturn pids", "def immediate_children( path ):\n assert( os.path.isdir( path ) )\n CMD = [ \"find\", path, \"-mindepth\", \"1\", \"-maxdepth\", \"1\" ]\n return [ x for x in run_cmd( CMD ).split( \"\\n\" ) if len( x ) > 0 ]", "def pids(self):\n resp = self.server.request(\"get\", \"/jobs/%s/%s/pids\" % (\n self.sessionid, self.name))\n result = self.server.json_body(resp)\n return result['pids']", "def get_children_by_frontend_id(\n message_id: str, api_client: ApiClient = Depends(deps.get_api_client), db: Session = Depends(deps.get_db)\n):\n pr = PromptRepository(db, api_client)\n message = pr.fetch_message_by_frontend_message_id(message_id)\n messages = pr.fetch_message_children(message.id, review_result=None)\n return utils.prepare_message_list(messages)", "def pids(self, node):\n try:\n cmd = \"ps ax | grep -i 'redpanda\\|node' | grep -v grep | awk '{print $1}'\"\n pid_arr = [\n pid for pid in node.account.ssh_capture(\n cmd, allow_fail=True, callback=int)\n ]\n return pid_arr\n except (RemoteCommandError, ValueError):\n return []", "def getChildren(self):\n if not self.Children:\n\n #print 'reached leave node {0}'.format(self.CommID)\n #raw_input()\n return [[], []]\n\n children = deque()\n parent = deque()\n for c in range(len(self.Children)):\n children.append(self.Children[c])\n parent.append(self.CommID)\n retval = (children, parent)\n\n #print 'retval of ID {0} is {1}'.format(self.CommID, retval)\n #raw_input('wait')\n\n return retval", "def getChildren(self):\n if not self.Children:\n\n #print 'reached leave node {0}'.format(self.CommID)\n #raw_input()\n return [[], []]\n\n children = deque()\n parent = deque()\n for c in range(len(self.Children)):\n children.append(self.Children[c])\n parent.append(self.CommID)\n retval = (children, parent)\n\n #print 'retval of ID {0} is {1}'.format(self.CommID, retval)\n #raw_input('wait')\n\n return retval", "def collect_children_by_id(self):\n self.children_by_id = {}\n self.root_by_id = {}\n self.ns_for_root_id = {}\n\n def recursive_fill_root_id(entry):\n root_id = self.root_by_id.get(entry.mount_id)\n if root_id is not None:\n return root_id\n\n if entry.parent_id == entry.mount_id:\n # self-referencing is a root\n root_id = entry.mount_id\n self.root_by_id[root_id] = root_id\n return root_id\n\n parent_entry = self.items.get(entry.parent_id)\n if parent_entry is None:\n # The parent is unknown, so it is an implicit root\n root_id = entry.mount_id\n self.root_by_id[root_id] = root_id\n return root_id\n\n root_id = recursive_fill_root_id(parent_entry)\n self.root_by_id[entry.mount_id] = root_id\n return root_id\n\n for entry in self.items.values():\n if entry.parent_id not in self.children_by_id:\n self.children_by_id[entry.parent_id] = {}\n self.children_by_id[entry.parent_id][entry.mount_id] = entry.abs_mount_point(no_question=True)\n root_id = recursive_fill_root_id(entry)\n if root_id not in self.ns_for_root_id:\n self.ns_for_root_id[root_id] = set()\n self.ns_for_root_id[root_id].add(entry.mount_ns)\n\n # Sanity check\n assert len(self.items) == len(self.root_by_id)", "def get_children(self):\n return self.children", "def get_children(obj):\n ret = obj.to_dict()\n if obj.children.all():\n ret.__setitem__('children',[get_children(j) for j in obj.children.all()])\n return ret", "def _process_children(self, node):\n for kid in node.children:\n self._process_node(kid)", "def Kill(cls, pid, children=False):\n\t\tif pid is not None:\n\t\t\tif children:\n\t\t\t\tfor cpid, _, cmd in cls.Children(pid):\n\t\t\t\t\t# We need to recursively kill the childrens\n\t\t\t\t\tcls.Kill(cpid, children=True)\n\t\t\tLogger.Info(\"Killing process: \" + repr(pid))\n\t\t\treturn popen(\"kill -9 %s\" % (pid))\n\t\telse:\n\t\t\treturn None", "def get_process_detail_expanded(self, _id: str):\n url = self._get_url(subpath=\"processes\", route=\"detail_expanded\", template_args={\"id\": _id})\n response = self.session.get(url)\n return response", "def get_children(self, refobj):\n children = cmds.listConnections(\"%s.children\" % refobj, d=False)\n if not children:\n children = []\n return children", "def getChildren(self):\n return self.directories.values()", "def get_process_id(name):\n child = subprocess.Popen(['pgrep', '-f', name], stdout=subprocess.PIPE, shell=False)\n response = child.communicate()[0]\n return [int(pid) for pid in response.split()]", "def _getChildrenBom(self, component, level=0, currlevel=0):\n result = []\n bufferdata = []\n if level == 0 and currlevel > 1:\n return bufferdata\n for bomid in component.product_tmpl_id.bom_ids:\n for bomline in bomid.bom_line_ids:\n children=self._getChildrenBom(bomline.product_id, level, currlevel+1)\n bufferdata.extend(children)\n bufferdata.append(bomline.product_id.id)\n result.extend(bufferdata)\n return getCleanList(result)", "def find_children(self):\r\n for i in range(len(self.vertices)):\r\n self.vertices[i].children = []\r\n for i in range(len(self.vertices)):\r\n for parent in self.vertices[i].parents:\r\n if i not in self.vertices[parent].children:\r\n self.vertices[parent].children.append(i)", "def getAllProcessInfo(self):\r\n self._update('getAllProcessInfo')\r\n\r\n all_processes = self._getAllProcesses(lexical=True)\r\n\r\n output = []\r\n for group, process in all_processes:\r\n name = make_namespec(group.config.name, process.config.name)\r\n output.append(self.getProcessInfo(name))\r\n return output", "def findChildren(self, name):\n\n # Note: this returns a list of all the children of a given\n # name, irrespective of the depth of look-up.\n \n children = []\n \n for child in self.getAllChildren():\n if child.getName() == name:\n children.append(child)\n\n return children", "def computeChildren(self, root):\n d = deque()\n bag = set()\n d.append(root)\n while d:\n elem = d.pop()\n bag.add(elem)\n newElems = set(elem.children).difference(bag)\n d.extend(newElems)\n return bag", "def get_childs(self):\n\t\treturn self.__childs", "def GetChildren(self, p_int, p_int_1, p_int_2):\n ...", "def get_user_processes(user):\n result = []\n for process in psutil.process_iter():\n if process.username() == user:\n result.append(process.pid)\n return result", "def get_children(self):\r\n return self.children", "def get_child_ids(forum):\n forum_ids = [forum.id]\n if forum.children:\n for child in forum.children:\n forum_ids.extend(\n get_child_ids(child) # Get the children from the children\n )\n return forum_ids", "def get_children(self):\n return self.children", "def get_children(self):\n return self.children", "def get_children(self):\n return self.children" ]
[ "0.8440308", "0.78711176", "0.7738845", "0.73896194", "0.73563373", "0.7110651", "0.6810038", "0.66680056", "0.651955", "0.6309715", "0.62305254", "0.603989", "0.59731925", "0.5933038", "0.5905366", "0.5880942", "0.5819915", "0.57845265", "0.5777258", "0.5750268", "0.574295", "0.57289284", "0.57132065", "0.56682825", "0.56626683", "0.5651312", "0.5644418", "0.5639618", "0.56385756", "0.56256604", "0.5606049", "0.55820084", "0.55811286", "0.5571847", "0.55650103", "0.5564425", "0.5543691", "0.55366236", "0.55362964", "0.5532378", "0.5530986", "0.5521211", "0.55208915", "0.5512525", "0.54975563", "0.5465949", "0.54575527", "0.5433819", "0.5422512", "0.5405289", "0.53999937", "0.53929746", "0.5391013", "0.5387276", "0.5385112", "0.538408", "0.5364776", "0.53577876", "0.5353725", "0.532983", "0.53256166", "0.5318878", "0.53180283", "0.53167915", "0.53166866", "0.53118014", "0.52981484", "0.5284065", "0.52807665", "0.52787805", "0.52651685", "0.5253055", "0.5249061", "0.5247258", "0.52406424", "0.5232152", "0.52244115", "0.52244115", "0.52007186", "0.5193189", "0.51829475", "0.5182161", "0.51599115", "0.5152294", "0.5151806", "0.51516604", "0.5140498", "0.51381487", "0.51344806", "0.5133333", "0.51249456", "0.5119732", "0.51184374", "0.5111192", "0.5110835", "0.5102654", "0.509831", "0.5081358", "0.5081358", "0.5081358" ]
0.7898279
1
Returns a list of all running process ids
def get_running_processes(self): all_processes = [] for _process in self.processes: all_processes.append(_process["pid"]) return all_processes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_pid_list():\r\n pids = [int(x) for x in os.listdir('/proc') if x.isdigit()]\r\n return pids", "def pids(self):\n return self._pidToProcess.iterkeys()", "def running_procs(self) -> List[int]:\n return [p.model_id for p in self.primary_scheduler.queue_nodes.run_q]", "def getActiveProcesses():\n active = []\n\n for p in PROCESSRUNNER_PROCESSES:\n if p.is_alive():\n active.append(p)\n\n return active", "def get_all_current_processes():\n p = subprocess.Popen(['ps', '-A'], stdout=subprocess.PIPE)\n out, err = p.communicate()\n return out", "def get_running_unison_processes(self):\n # Get PIDs\n # Note: throws exception if no instances exist\n try:\n pids = str(subprocess.check_output([\"pidof\", '/usr/bin/unison']))\n\n # Parse command output into list by removing junk chars and exploding\n # string with space delimiter\n pids = pids[2:-3].split(' ')\n\n except subprocess.CalledProcessError:\n # If error caught here, no unison instances are found running\n pids = []\n\n self.logger.debug(\n \"Found \" + str(len(pids)) + \" running instances on this system: PIDs \" +\n \", \".join(pids)\n )\n\n # Return, after converting to ints\n return list(map(int, pids))", "def get_processes_running():\r\n p = [] #array of processes\r\n if platform == \"linux\" or platform == \"linux2\":\r\n for proc in psutil.process_iter():\r\n try:\r\n tmp=Process(proc.name(),int(proc.pid),proc.username(),int(0),int(0))\r\n p.append(tmp)\r\n except:\r\n continue\r\n return (p)\r\n\t\t\t\r\n tasks = check_output(['tasklist']).decode('cp866', 'ignore').split(\"\\r\\n\")\r\n for task in tasks:\r\n m = re.match(b'(.*?)\\\\s+(\\\\d+)\\\\s+(\\\\w+)\\\\s+(\\\\w+)\\\\s+(.*?)\\\\s.*', task.encode())\r\n if m is not None:\r\n tmp=Process(m.group(1).decode(),int(m.group(2).decode()),m.group(3).decode(),int(m.group(4).decode()),int(m.group(5).decode('ascii', 'ignore')))\r\n p.append(tmp)\r\n #m.group(1).decode() image name\r\n #m.group(2).decode() process id\r\n #m.group(3).decode() session_name\r\n #m.group(4).decode() session_num\r\n #m.group(5).decode('ascii', 'ignore') memory usage\r\n return(p)", "def running_processes(self):\n return [process for process in self.processes.values()\n if process.running_on(self.address_name)]", "def pids():\n stream = os.popen(\"ps aux | grep '[m]itm' | awk '{print $2}'\")\n return stream.read()", "def pids(self, node):\n try:\n cmd = \"ps ax | grep -i 'redpanda\\|node' | grep -v grep | awk '{print $1}'\"\n pid_arr = [\n pid for pid in node.account.ssh_capture(\n cmd, allow_fail=True, callback=int)\n ]\n return pid_arr\n except (RemoteCommandError, ValueError):\n return []", "def pid_processes(self):\n return [(process.namespec(), process.infos[self.address_name]['pid'])\n for process in self.processes.values()\n if process.pid_running_on(self.address_name)]", "def PIDs():\n from ctypes import windll,c_ulong,byref,sizeof\n PIDs = (c_ulong*512)()\n size_of_PIDs = c_ulong()\n windll.psapi.EnumProcesses(byref(PIDs),sizeof(PIDs),byref(size_of_PIDs))\n nPIDs = size_of_PIDs.value/sizeof(c_ulong())\n pidProcess = sorted([int(i) for i in PIDs][:nPIDs])\n return pidProcess", "def get_pids(name=None):\n results = []\n for process in win32com.client.GetObject('winmgmts:').InstancesOf('Win32_Process'):\n if name is None or process.Properties_(\"Name\").Value == name:\n results.append(process.Properties_(\"ProcessID\").Value)\n return results", "def ListProcesses(self):\n stdout, stderr = self.RunCmdOnDevice(\n ['/bin/ps', '--no-headers', '-A', '-o', 'pid,ppid,args:4096,state'],\n quiet=True)\n assert stderr == '', stderr\n procs = []\n for l in stdout.split('\\n'):\n if l == '':\n continue\n m = re.match(r'^\\s*(\\d+)\\s+(\\d+)\\s+(.+)\\s+(.+)', l, re.DOTALL)\n assert m\n procs.append(\n (int(m.group(1)), m.group(3).rstrip(), int(m.group(2)), m.group(4)))\n logging.debug(\"ListProcesses(<predicate>)->[%i processes]\" % len(procs))\n return procs", "def ListProcesses(self):\n stdout, stderr = self.RunCmdOnDevice(\n [\n '/bin/ps', '--no-headers', '-A', '-o', 'pid,ppid,args:4096,state'\n ],\n quiet=True)\n assert stderr == '', stderr\n procs = []\n for l in stdout.split('\\n'):\n if l == '':\n continue\n m = re.match(r'^\\s*(\\d+)\\s+(\\d+)\\s+(.+)\\s+(.+)', l, re.DOTALL)\n assert m\n procs.append((int(m.group(1)), m.group(3).rstrip(), int(m.group(2)),\n m.group(4)))\n logging.debug(\"ListProcesses(<predicate>)->[%i processes]\" % len(procs))\n return procs", "def pids(self):\n resp = self.server.request(\"get\", \"/jobs/%s/%s/pids\" % (\n self.sessionid, self.name))\n result = self.server.json_body(resp)\n return result['pids']", "def get_pid(name: str) -> Set[int]:\n process_pids = set()\n for proc in psutil.process_iter():\n if name == proc.name():\n pid = proc.pid\n process_pids.add(pid)\n return process_pids", "def get_process_list() -> Dict:\n return {proc.pid: proc.name() for proc in psutil.process_iter()}", "def list_running_tasks():\n inspector = current_app.control.inspect()\n\n return inspector.active()", "def pids(node, java_class):\n cmd = \"ps -C java -wwo pid,args | grep '%s' | awk -F' ' '{print $1}'\" % java_class\n\n return [int(pid) for pid in node.account.ssh_capture(cmd, allow_fail=True)]", "def pids(self):\r\n return copy(self._pids)", "def processor_ids(self):\n return self._processor_ids", "def get_children(pid):\n try:\n stdout=subprocess.check_output([\"ps\",\"--ppid\",pid,\"-o\",\"pid\"])\n except subprocess.CalledProcessError:\n stdout=[]\n\n pids=[]\n if stdout:\n pids=process_ps_stdout(stdout)\n\n return pids", "def get_process_id(name):\n child = subprocess.Popen(['pgrep', '-f', name], stdout=subprocess.PIPE, shell=False)\n response = child.communicate()[0]\n return [int(pid) for pid in response.split()]", "def monitoredProcs(self):\n return self._pidToProcess.itervalues()", "def get_session_ids(self):\n with self._sessions_lock:\n session_ids = self.sessions.keys()\n\n return session_ids", "def List(cls):\n\t\tres = {}\n\t\tfor p in glob.glob(\"/proc/*/cmdline\"):\n\t\t\tprocess = p.split(\"/\")[2]\n\t\t\tif cls.RE_PID.match(process):\n\t\t\t\tres[int(process)] = cat(p).replace(\"\\x00\", \" \")\n\t\treturn res", "def get_running():\n ps = which('/usr/bin/ps') # avoid the old BSD variant\n lines = sh(ps, '-e', '-f', quiet=True)\n # The first line of the `ps' output is a header line which is\n # used to find the data field columns.\n column = lines[0].index('CMD')\n procs = set()\n for line in lines[1:]:\n cmd_line = line[column:]\n command = cmd_line.split()[0]\n procs.add(os.path.basename(command))\n return procs", "def get_user_processes(user):\n result = []\n for process in psutil.process_iter():\n if process.username() == user:\n result.append(process.pid)\n return result", "def get_process_info(name):\n process_lst = list()\n all_pid = psutil.pids()\n for pid in all_pid:\n info = psutil.Process(pid)\n if name in info.name():\n process_lst.append(info)\n\n return process_lst", "def get_running_pris(self):\n try:\n running_pris_list = []\n output = self.ssh.exec_command(self.check_running_kombu_dialer_command)\n for line in output[1].readlines():\n line = line.split()\n if self.server in line and \"-g\" in line:\n running_pris_list.append(\n int(\n line[line.index(\"-g\")+1][2:]\n )\n )\n return running_pris_list\n except Exception as err:\n self.error_logger.error(err.message + \" PRITester::get_running_pris\")\n return None", "def get_vid_pid_list(self):\n\n return self.vid_pid_s", "def getChildPIDs(self):\n\t\treturn self.pids", "def get_pid_of_all_workers(containers):\n res = []\n for i in containers:\n if \"mongo\" not in i.name and (\"slave\" in i.name or \"master\" in i.name):\n print(i.name, file=sys.stdout)\n pid = i.attrs[\"State\"][\"Pid\"]\n res.append(pid)\n return res", "def get_processes():\n yield from psutil.process_iter()", "def _getAllRunningInstances(self):\n return self._ec2.get_only_instances(filters={\n 'tag:leader_instance_id': self._instanceId,\n 'instance-state-name': 'running'})", "def get_matching_pids(pattern):\n cmd = [\"pgrep\", \"-f\", pattern]\n rc, output, err = run_cmd_output(cmd)\n if rc == 0:\n # One or more processes matched\n pids = [int(p) for p in output.split('\\n') if p != \"\"]\n elif rc == 1:\n # No processes matched\n pids = []\n else:\n raise UserVisibleError(\"Failed to run {}\".format(\" \".join(cmd)))\n return pids", "def get_jobs_in_queue() -> List[int]:\n output = subprocess.check_output([\"qstat\"]).decode().splitlines()\n job_ids = []\n for line in output:\n m = REGEX_QSTAT.match(line)\n if m:\n job_ids.append(int(m.group(1)))\n return job_ids", "def getSessionId(self) -> List[int]:\n return self.pool.getSessionId()", "def get_ceph_pids():\n pids = []\n for srv in get_srv_list():\n cfg = get_srv_config(srv)\n with open(cfg['pid_file'], 'r') as file_fd:\n pids.append((srv, int(file_fd.read())))\n return pids", "def _select_processes(self):\n\n # check if at least one process is running\n is_running = False\n for pid in self.__pids:\n if ProcessMonitor.__is_running(pid):\n is_running = True\n break # at least one process is running\n\n if is_running:\n if not self.__aggregate_multiple_processes:\n return self.__pids\n\n # aggregate metrics, check the last discovered time\n if (\n self.__last_discovered\n and time.time() * 1000 - self.__last_discovered\n < self.__process_discovery_interval * 1000\n ):\n return self.__pids\n\n ps = ProcessList()\n if self.__commandline_matcher:\n self.__last_discovered = time.time() * 1000\n if self.__include_child_processes:\n matched_processes = ps.get_matches_commandline_with_children(\n self.__commandline_matcher\n )\n else:\n matched_processes = ps.get_matches_commandline(\n self.__commandline_matcher\n )\n self.__pids = matched_processes\n\n if not self.__aggregate_multiple_processes and len(self.__pids) > 1:\n # old behaviour where multiple processes were not supported for aggregation\n self._logger.warning(\n \"Multiple processes match the command '%s'. Returning existing pid. \"\n \"You can turn on the multi process aggregation support by adding the \"\n \"aggregate_multiple_processes configuration to true\"\n % self.__commandline_matcher,\n limit_once_per_x_secs=300,\n limit_key=\"linux-process-monitor-existing-pid\",\n )\n self.__pids = [self.__pids[0]]\n else:\n # See if the specified target pid is running. If so, then return it.\n # Special cases:\n # '$$' mean this process.\n # '$$TBD' mean that the PID of the target process has not been determined yet and it will be set later.\n pids = []\n if self.__target_pids:\n for t_pid in self.__target_pids:\n if t_pid == \"$$\":\n t_pid = int(os.getpid())\n\n # skip this until it will be replaced with a real PID.\n elif t_pid == \"$$TBD\":\n continue\n else:\n t_pid = int(t_pid)\n pids.append(t_pid)\n self.__pids = pids\n return self.__pids", "def processes(self):\n return self._getint('processes')", "def find_all_child_processes(pids_only=False):\n children = psutil.Process().children(recursive=True)\n return [child.pid for child in children] if pids_only else children", "def get_instances_ids(self):\n reservations = self.__get_reservations()\n instances_ids = []\n instances,_ = self.__get_multi_instances(reservations)\n for instance in instances:\n instances_ids.append(instance.id.encode(\"latin-1\"))\n return instances_ids", "def processes(self):\n nodes = (self.nodes.exclude(process__isnull=True)\n .values_list('process_id', flat=True))\n return Process.objects.filter(id__in=nodes).distinct()", "def get_child_pids(pid):\n\n wmi = win32com.client.GetObject('winmgmts:')\n # noinspection SqlNoDataSourceInspection,SqlDialectInspection\n children = wmi.ExecQuery('SELECT * FROM Win32_Process WHERE ParentProcessID = %s' % pid)\n return [child.Properties_('ProcessId').Value for child in children]", "def list_java_processes():\n for line in shell_command_output('jps -l').splitlines():\n line = line.strip()\n if len(line) == 0:\n continue\n (pid, class_name) = line.split()\n yield (int(pid), class_name)", "def job_ids(self):\n return self.connection.lrange(self.key, 0, -1)", "def get_ids(self):\n return self._ids", "def list_processes(pid, name):\n \n if not pid and not name:\n rc, out, err = j.sal.process.execute(\"ps ax\")\n click.echo(out)\n elif name:\n click.echo(j.sal.process.psfind(name))\n elif pid:\n click.echo(j.sal.process.getProcessPid(pid))", "def all_env_ids(self) -> np.ndarray:", "def pidof(process_name):\n\n\tpids = []\n\n\tif 'licornd' in process_name:\n\t\t# licorn / linux 3.x specifiq : we can match 'licornd/wmi'\n\t\t# faster than 'licornd-wmi', and in some case the 'cmdline'\n\t\t# is empty, whereas the 'comm' is not.\n\t\tnames = [ process_name, process_name.replace('/', '-') ]\n\n\telse:\n\t\tnames = [ process_name ]\n\n\tfor entry in os.listdir('/proc'):\n\t\tif entry.isdigit():\n\t\t\ttry:\n\n\t\t\t\tif cgroup and open('/proc/%s/cpuset' % entry).read().strip() != cgroup:\n\t\t\t\t\tlogging.progress(_(u'Skipped process @{0} which is not '\n\t\t\t\t\t\t\t\t\t\tu'in the same cgroup.').format(entry))\n\t\t\t\t\tcontinue\n\n\t\t\t\ttry:\n\t\t\t\t\t# Linux 3.x only\n\t\t\t\t\tcommand_line1 = open('/proc/%s/comm' % entry).read().strip()\n\t\t\t\texcept:\n\t\t\t\t\tcommand_line1 = ''\n\n\t\t\t\tcommand_line2 = open('/proc/%s/cmdline' % entry).read().strip()\n\n\t\t\t\tfor pname in names:\n\t\t\t\t\tif pname == command_line1 or pname+'\\0' in command_line2:\n\t\t\t\t\t\tpids.append(int(entry))\n\n\t\t\texcept (IOError, OSError), e:\n\t\t\t\t# in rare cases, the process vanishes during iteration. This\n\t\t\t\t# is harmless. Any other error is not cool, raise it.\n\t\t\t\tif e.errno != errno.ENOENT:\n\t\t\t\t\traise e\n\n\treturn pids", "def procs_running():\n \n return __proc_stat('procs_running')", "def fetch_process_queries(self):\n url = \"/api/investigate/v1/orgs/{}/processes/search_jobs\".format(\n self.credentials.org_key\n )\n ids = self.get_object(url)\n return ids.get(\"query_ids\", [])", "def remote_get_ids(self):\n return self.smultiengine.get_ids()", "def job_ids(self):\n return self.get_job_ids()", "def get_current_server_pidfiles_and_ports():\r\n pidfile_dir = ReportingServerManager._get_pidfile_dir()\r\n # There should only be one pidfile, but there may be errors/race conditions where\r\n # there are multiple of them.\r\n pidfile_names = os.listdir(pidfile_dir) if os.path.exists(pidfile_dir) else []\r\n ret = []\r\n for pidfile_name in pidfile_names:\r\n m = re.match(r'port_(\\d+)\\.pid', pidfile_name)\r\n if m is not None:\r\n ret.append((os.path.join(pidfile_dir, pidfile_name), int(m.group(1))))\r\n return ret", "def findPIDs(name, user = os.getpid()):\n\n pids = []\n\n ps = subprocess.Popen(['ps', '-u', user, 'w'], stdout=subprocess.PIPE).communicate()[0]\n processes = ps.split('\\n')\n\n for line in processes:\n if len(line.split()) < 5:\n continue\n if re.match(name, line.split()[4]):\n #Then we have matching process\n pids.append(line.split()[0])\n\n return pids", "def get_pids(pid):\n\n pids=set([pid])\n for child in get_children(pid):\n pids.update(traverse_tree(child,pids))\n \n return list(pids)", "def get_processes():\n cmd = 'ps -do pid:1,cmd' # linux command to run\n processes = {}\n\n with os.popen(cmd) as out:\n # see https://stackoverflow.com/questions/24362007/\n next(out.__iter__()) # skip header (first line)\n\n for line in out:\n # sepate pid and command in a tuple\n p = line.rstrip('\\n').split(' ', 2)\n\n # skip kernel threads\n if p[1][0] == '[':\n continue\n\n processes[p[0]] = p[1]\n\n return processes", "def ids(self):\n return list(self._id_generator())", "def ids(self):\n return list(self._id_generator())", "def get_coreids(self):\n return range(0, self.get_ncores()) # default behaviour for x86", "def running_nodes(self) -> Set[str]:\n return self._running_nodes.copy()", "def ids(self):\n return self._ids", "def get_unstopped_processes(self):\r\n return [ x for x in self.processes.values() if x.get_state() not in\r\n STOPPED_STATES ]", "def get_worker_id_list(self):\r\n return self._workers_id", "def waiting_procs(self):\n return [p.model_id for p in self.primary_scheduler.queue_nodes.wait_q]", "def getIDs(self):\n return self.multiengine.getIDs()", "def queue_job_ids(self):\n return list(self.queue.keys())", "def allprocs(self):\n\n processes = self.getHash( 'nameHash' ) # safely get copy of process name dictionary\n\n allprocs = {}\n\n for p in processes.keys():\n allprocs[p] = processes[p].procinfo()\n\n return allprocs", "def allprocs(self):\n\n processes = self.getHash( 'nameHash' ) # safely get copy of process name dictionary\n\n allprocs = {}\n\n for p in processes.keys():\n allprocs[p] = processes[p].procinfo()\n\n return allprocs", "def job_ids(self) -> List[str]:\n return self._db_data.job_ids", "def get_pids(extdir):\n\n pid_fnames = glob.glob(extdir + \"/*.pid\")\n\n pids = {}\n for fname in pid_fnames:\n try:\n # Get the pid\n with open(fname, \"r\") as fobj:\n pid = fobj.read().strip()\n pid = int(pid)\n\n # Check if process running\n os.kill(pid, 0)\n except (OSError, IOError, ValueError):\n continue\n\n service = os.path.basename(fname)\n service = service.split(\".\")[0]\n pids[service] = pid\n\n return pids", "def get_running_processes(self, dev_handler):\n # Get the list of running processes on each device\n running_processes = NvmlHandler.exec_nvml_function(nvmlDeviceGetComputeRunningProcesses,dev_handler)\n\n # Turns these process objects into dicts\n running_processes_dicts = [obj.__dict__ for obj in running_processes if obj]\n\n # Enhance these dicts with information from psutil\n new_dicts = []\n for running_processes_dict in running_processes_dicts:\n\n # Init the new dict with the current information\n more_ps_infos = {}\n more_ps_infos.update(running_processes_dict)\n\n # Rename the usedGpuMemory key, if any\n if 'usedGpuMemory' in more_ps_infos:\n more_ps_infos['gpu_memory_used'] = utils.psutil_parse_readable_bytes(\n more_ps_infos.get('usedGpuMemory')\n )\n del more_ps_infos['usedGpuMemory']\n\n # Try to retreive info about the process using psutil\n try:\n pid = running_processes_dict.get('pid')\n more_ps_infos.update(utils.psutil_snapshot_process(pid))\n except Exception as e:\n logger.warning('Cannot gather info from process {}'.format(pid))\n\n new_dicts.append(more_ps_infos)\n\n return new_dicts", "def get_processes(self):\n processes={}\n for (server_ip, server_port) in self.hosts:\n try:\n server = xmlrpclib.ServerProxy(\"http://%s:%d\"%(server_ip, server_port))\n uid = server.get_id()\n if uid != self.uid:\n processes[uid] = server\n except socket.error:\n pass\n return processes", "def existing_pipe_ids():\n ids_list = []\n if not os.path.exists(os.path.dirname(__file__) + LAST_RUN_FILE): # Check if record file exist\n pipe_id_file = open(os.path.dirname(__file__) + LAST_RUN_FILE, \"a+\") # if not then create\n else:\n pipe_id_file = open(os.path.dirname(__file__) + LAST_RUN_FILE, \"r+\") # else, start checking the list\n pipelines = []\n for existing_pipeline in pipe_id_file:\n pipelines = existing_pipeline.split(\",\")\n\n ids_list = [int(pipeline) for pipeline in pipelines]\n\n pipe_id_file.close()\n return ids_list", "def get_node_ids(self):\n \n return self.node_ids", "def get_pid_of_all_slaves(containers):\n res = []\n for i in containers:\n if \"mongo\" not in i.name and \"slave\" in i.name:\n print(i.name, file=sys.stdout)\n pid = i.attrs[\"State\"][\"Pid\"]\n res.append(pid)\n return res", "def get_ids(self):\n return self._graphs.keys()", "def pool_ids(self) -> Sequence[str]:\n return pulumi.get(self, \"pool_ids\")", "def get_current_jobs(ssh):\n stdin, stdout, stderr = ssh.exec_command('qstat')\n\n running_jobs = []\n for line in stdout.readlines():\n if '.awonmgr2' in line:\n jobid = line.split('.awonmgr2')[0]\n running_jobs.append(jobid)\n \n return running_jobs", "def processes(self):\n # MODIFIED 11/1/16 OLD:\n return list(item.process for item in self.process_tuples)\n # # MODIFIED 11/1/16 NEW:\n # return sorted(list(item.process for item in self.process_tuples), key=lambda process: process.name)\n # MODIFIED 11/1/16 END", "def get_pids_filtered_by_regex(regex_list, excludes=None):\n excludes = excludes or []\n res = []\n for process in psutil.process_iter():\n try:\n cmdline = process.cmdline()\n except psutil.NoSuchProcess:\n cmdline = None\n except psutil.AccessDenied:\n cmdline = None\n if cmdline:\n name = \" \".join(cmdline)\n for r in regex_list:\n if name.strip() != \"\" and re.match(r, name):\n res.append(process.pid)\n return res", "def _get_instance_ids(instances):\n instance_ids = []\n for instance in instances:\n instance_ids.append(instance.id)\n return instance_ids", "def get_filtered_pids(filterstr, excludes=None):\n excludes = excludes or []\n cmd = \"ps ax | grep '%s'\" % filterstr\n rc, out, err = j.core.executors.run_local(cmd)\n # print out\n found = []\n\n def checkexclude(c, excludes):\n for item in excludes:\n c = c.lower()\n if c.find(item.lower()) != -1:\n return True\n return False\n\n for line in out.split(\"\\n\"):\n if line.find(\"grep\") != -1 or line.strip() == \"\":\n continue\n if line.strip() != \"\":\n if line.find(filterstr) != -1:\n line = line.strip()\n if not checkexclude(line, excludes):\n # print \"found pidline:%s\"%line\n found.append(int(line.split(\" \")[0]))\n return found", "def get_active_browser_ids(self):\n\n # This relies on some private data structures, but presently\n # there is no other way. There's been a discussion in the\n # robot slack channels about adding a new keyword that does\n # what this keyword does. When that happens, we can remove\n # this keyword.\n driver_ids = []\n try:\n driver_cache = self.selenium._drivers\n except NoOpenBrowser:\n return []\n\n for index, driver in enumerate(driver_cache._connections):\n if driver not in driver_cache._closed:\n # SeleniumLibrary driver ids start at one rather than zero\n driver_ids.append(index + 1)\n return driver_ids", "def get_server_job_ids(self):\n self.server_job_ids = list()\n for server in self.servers:\n if server != 'local':\n with SSHClient(server) as ssh:\n self.server_job_ids.extend(ssh.check_running_jobs_ids())\n else:\n self.server_job_ids.extend(check_running_jobs_ids())", "def list_instances(self):\n LOG.debug(\"list_instances\")\n\n instance_ids = []\n bmms = db.bmm_get_all(None)\n for bmm in bmms:\n if not bmm[\"instance_id\"]:\n continue\n instance_ids.append(self._instance_id_to_name(bmm[\"instance_id\"]))\n\n return instance_ids", "def getTaskIds(self, director):\n # the computation record\n computation = self._getComputationRecord(director)\n \n # search for tasks\n iworker = self.inventory.iworker\n tasks = computation.findTasks(director.clerk.db, iworker)\n\n ids = [t.id for t in tasks]\n return ','.join(ids)", "def get_pids(process_name, match_predicate=None):\n # default match predicate\n # why aren't we using psutil ??\n def default_predicate(target, given):\n return target.strip().lower() in given.lower()\n\n if match_predicate is None:\n match_predicate = default_predicate\n\n if process_name is None:\n raise j.exceptions.RuntimeError(\"process cannot be None\")\n if j.data.platform.is_unix():\n pids = set()\n for process in get_processes():\n try:\n pid = process.pid\n if not isinstance(pid, int):\n continue\n name = process.name()\n if match_predicate(process_name, name):\n pids.add(pid)\n elif match_predicate(process_name, process.exe()):\n pids.add(pid)\n else:\n cmdline = process.cmdline()\n if cmdline and cmdline[0]:\n if match_predicate(process_name, cmdline[0]):\n pids.add(pid)\n except (psutil.Error, FileNotFoundError):\n continue\n return list(pids)\n else:\n raise j.exceptions.NotImplemented(\"getProcessPid is only implemented for unix\")", "async def running(self) -> list[dict[str, Any]]:\n data = await self.controller.request(\"get\", \"watering/program\")\n return cast(list[dict[str, Any]], data[\"programs\"])", "def mpi_procs(self):\n return self._mpi_procs", "def GetPublishedProcesses():\r\n pass", "def module_ids(self, rev=False):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tids = sorted(list(self.shutit_map.keys()),key=lambda module_id: self.shutit_map[module_id].run_order)\n\t\tif rev:\n\t\t\treturn list(reversed(ids))\n\t\treturn ids", "def node_ids(self):\n return [self.node_id]", "def ps():\n for p in psutil.process_iter():\n try:\n pid = p.pid\n name = p.name()\n cmdline = p.cmdline()\n except psutil.AccessDenied:\n continue\n\n print(\"%5d %10s %s\" % (pid, name, cmdline))", "def numRunning(self):\n #with self.__queueLock:\n # The size of the list does not change, only its contents, so I don't\n # think there should be any conflict if we are reading a variable from\n # one thread and updating it on the other thread.\n activeRuns = sum(run is not None for run in self.__running)\n\n return activeRuns", "def cpu_ids() -> List[int]:\n api_file = open('/sys/devices/system/cpu/present', 'r')\n\n cpu_id_tmp = re.findall('\\d+|-', api_file.readline().strip())\n cpu_id_list = []\n for i in range(len(cpu_id_tmp)):\n if cpu_id_tmp[i] == '-':\n for cpu_id in range(int(cpu_id_tmp[i - 1]) + 1, int(cpu_id_tmp[i + 1])):\n cpu_id_list.append(int(cpu_id))\n else:\n cpu_id_list.append(int(cpu_id_tmp[i]))\n return cpu_id_list", "def get_ids(self) -> List[str]:" ]
[ "0.7804924", "0.7730347", "0.7697995", "0.7590113", "0.7520795", "0.7483522", "0.7411062", "0.7385953", "0.7380598", "0.73299384", "0.73251057", "0.7316895", "0.7233392", "0.7152342", "0.71504444", "0.71448386", "0.7116604", "0.7027047", "0.69536257", "0.6934355", "0.6899609", "0.6830717", "0.6713193", "0.667902", "0.66630995", "0.66236854", "0.66233766", "0.66061366", "0.6600933", "0.659585", "0.6586262", "0.6574124", "0.65595657", "0.65563655", "0.6531623", "0.65232", "0.65212804", "0.65184736", "0.64972836", "0.64866364", "0.64842385", "0.6482842", "0.6468857", "0.6460817", "0.6452791", "0.64400494", "0.6436799", "0.64306146", "0.6359324", "0.63521636", "0.63257146", "0.63247967", "0.63146406", "0.6308602", "0.6282787", "0.62817955", "0.6271527", "0.6269407", "0.62583417", "0.62309873", "0.6230233", "0.6230233", "0.6224052", "0.6211778", "0.62073547", "0.6196397", "0.61953366", "0.619023", "0.61851496", "0.6178037", "0.61747485", "0.61747485", "0.6162752", "0.61199796", "0.6116464", "0.6095303", "0.6061507", "0.60560244", "0.6052459", "0.60458785", "0.6040581", "0.6034488", "0.6031289", "0.60294086", "0.6029199", "0.60152084", "0.60027444", "0.5984857", "0.59838915", "0.59752554", "0.5953152", "0.59520984", "0.5949079", "0.5944383", "0.59327877", "0.5926497", "0.5910287", "0.5910187", "0.58923984", "0.58746004" ]
0.813076
0
Like get_matches_commandline method, given a string, match the processes on the name but also returns the matched processes' children
def get_matches_commandline_with_children(self, match_pattern): matched_pids = self.get_matches_commandline(match_pattern) for matched_pid in matched_pids: matched_pids.extend(self.get_child_processes(matched_pid)) return list(set(matched_pids))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_matches_commandline(self, match_pattern):\n\n matches = []\n for _process in self.processes:\n if re.search(match_pattern, _process[\"cmd\"]):\n matches.append(_process[\"pid\"])\n return matches", "def find(name, arg=None):\r\n for p in get_processes():\r\n if p.name.lower().find(name.lower()) != -1:\r\n if arg is not None:\r\n for a in (p.cmdline or []):\r\n if a.lower().find(arg.lower()) != -1:\r\n return p\r\n else:\r\n return p\r\n return None", "def findPIDs(name, user = os.getpid()):\n\n pids = []\n\n ps = subprocess.Popen(['ps', '-u', user, 'w'], stdout=subprocess.PIPE).communicate()[0]\n processes = ps.split('\\n')\n\n for line in processes:\n if len(line.split()) < 5:\n continue\n if re.match(name, line.split()[4]):\n #Then we have matching process\n pids.append(line.split()[0])\n\n return pids", "def find(name, exact=False):\n processes = run(\"ps aux | grep {0}\".format(name))\n res = []\n for line in processes.split(\"\\n\"):\n if not line.strip():\n continue\n line = RE_SPACES.split(line, 10)\n # We skip lines that are not like we expect them (sometimes error\n # message creep up the output)\n if len(line) < 11:\n continue\n user, pid, cpu, mem, vsz, rss, tty, stat, start, time, command = line\n if (exact and command == name) \\\n or (not exact and command.startswith(name)):\n res.append(pid)\n return res", "def get_pids(process_name, match_predicate=None):\n # default match predicate\n # why aren't we using psutil ??\n def default_predicate(target, given):\n return target.strip().lower() in given.lower()\n\n if match_predicate is None:\n match_predicate = default_predicate\n\n if process_name is None:\n raise j.exceptions.RuntimeError(\"process cannot be None\")\n if j.data.platform.is_unix():\n pids = set()\n for process in get_processes():\n try:\n pid = process.pid\n if not isinstance(pid, int):\n continue\n name = process.name()\n if match_predicate(process_name, name):\n pids.add(pid)\n elif match_predicate(process_name, process.exe()):\n pids.add(pid)\n else:\n cmdline = process.cmdline()\n if cmdline and cmdline[0]:\n if match_predicate(process_name, cmdline[0]):\n pids.add(pid)\n except (psutil.Error, FileNotFoundError):\n continue\n return list(pids)\n else:\n raise j.exceptions.NotImplemented(\"getProcessPid is only implemented for unix\")", "def get_similar_processes():\n myprocess = get_my_process()\n result = []\n for item in psutil.process_iter():\n try:\n if item.cmdline() == myprocess.cmdline():\n result.append(item)\n except psutil.NoSuchProcess:\n pass\n return result", "def get_process_cmdline(process_name):\n\n\tfor pretendant in execute(['ps', '-U', 'root', '-u', 'root', '-o', 'args='])[0].split(\n\t\t\t\"\\n\")[:-1]:\n\t\t#print pretendant\n\t\tif pretendant.find(process_name) != -1:\n\t\t\treturn pretendant.split(' ')", "def get_process(proc_name):\n #LOG = log.getLogger(__name__)\n procList = []\n try:\n for pr in psutil.process_iter():\n for args in pr.cmdline():\n if proc_name in args:\n procList.append(pr.pid)\n return procList\n except BaseException as e:\n print(\"Error in fetching process: {}\".format(e))\n return None", "def get_process_info(name):\n process_lst = list()\n all_pid = psutil.pids()\n for pid in all_pid:\n info = psutil.Process(pid)\n if name in info.name():\n process_lst.append(info)\n\n return process_lst", "def list_processes(pid, name):\n \n if not pid and not name:\n rc, out, err = j.sal.process.execute(\"ps ax\")\n click.echo(out)\n elif name:\n click.echo(j.sal.process.psfind(name))\n elif pid:\n click.echo(j.sal.process.getProcessPid(pid))", "def get_matching_pids(pattern):\n cmd = [\"pgrep\", \"-f\", pattern]\n rc, output, err = run_cmd_output(cmd)\n if rc == 0:\n # One or more processes matched\n pids = [int(p) for p in output.split('\\n') if p != \"\"]\n elif rc == 1:\n # No processes matched\n pids = []\n else:\n raise UserVisibleError(\"Failed to run {}\".format(\" \".join(cmd)))\n return pids", "def pidof(process_name):\n\n\tpids = []\n\n\tif 'licornd' in process_name:\n\t\t# licorn / linux 3.x specifiq : we can match 'licornd/wmi'\n\t\t# faster than 'licornd-wmi', and in some case the 'cmdline'\n\t\t# is empty, whereas the 'comm' is not.\n\t\tnames = [ process_name, process_name.replace('/', '-') ]\n\n\telse:\n\t\tnames = [ process_name ]\n\n\tfor entry in os.listdir('/proc'):\n\t\tif entry.isdigit():\n\t\t\ttry:\n\n\t\t\t\tif cgroup and open('/proc/%s/cpuset' % entry).read().strip() != cgroup:\n\t\t\t\t\tlogging.progress(_(u'Skipped process @{0} which is not '\n\t\t\t\t\t\t\t\t\t\tu'in the same cgroup.').format(entry))\n\t\t\t\t\tcontinue\n\n\t\t\t\ttry:\n\t\t\t\t\t# Linux 3.x only\n\t\t\t\t\tcommand_line1 = open('/proc/%s/comm' % entry).read().strip()\n\t\t\t\texcept:\n\t\t\t\t\tcommand_line1 = ''\n\n\t\t\t\tcommand_line2 = open('/proc/%s/cmdline' % entry).read().strip()\n\n\t\t\t\tfor pname in names:\n\t\t\t\t\tif pname == command_line1 or pname+'\\0' in command_line2:\n\t\t\t\t\t\tpids.append(int(entry))\n\n\t\t\texcept (IOError, OSError), e:\n\t\t\t\t# in rare cases, the process vanishes during iteration. This\n\t\t\t\t# is harmless. Any other error is not cool, raise it.\n\t\t\t\tif e.errno != errno.ENOENT:\n\t\t\t\t\traise e\n\n\treturn pids", "def find_all_child_processes(pids_only=False):\n children = psutil.Process().children(recursive=True)\n return [child.pid for child in children] if pids_only else children", "def get_processes(sort_by_name=True):\r\n if sort_by_name:\r\n return sorted(_list_processes(), key=cmp_to_key(\r\n lambda p1, p2: (cmp(p1.name, p2.name) or cmp(p1.pid, p2.pid))\r\n ))\r\n else:\r\n return sorted(_list_processes(), key=cmp_to_key(\r\n lambda p1, p2: (cmp(p1.pid, p2.pid) or cmp(p1.name, p2.name))\r\n ))", "def setChildPIDs(self):\n\t\tpids = []\n\t\tfor proc in process_iter():\n\t\t\tfor child in self.expectedChildren:\n\t\t\t\tif child == proc.name():\n\t\t\t\t\tif proc.parent().name() == \"Python\": # Hardcoded string comparison. Sue me.\n\t\t\t\t\t\tpids.append(proc.pid)\n\t\tself.pids = pids", "def get_children(ppid):\n\n pid_dct = {}\n for proc in build_process_list():\n proc[\"_children\"] = []\n pid_dct[proc[\"pid\"]] = proc\n\n # fill in children array\n for pid in list(pid_dct.keys()):\n parent_pid = pid_dct[pid][\"parent_pid\"]\n\n if parent_pid in pid_dct:\n pid_dct[parent_pid][\"_children\"].append(pid)\n\n # now just walk down the tree\n if ppid is None or ppid not in pid_dct:\n # process has quit, we exit\n return []\n\n accepted = []\n to_accept = collections.deque([ppid, ])\n \n while to_accept:\n head = pid_dct[to_accept.popleft()]\n\n # do not include the monitoring pid\n if head[\"pid\"] != ppid:\n accepted.append(head)\n\n to_accept.extend(head.get(\"_children\", []))\n head[\"children\"] = head[\"_children\"]\n del head[\"_children\"]\n\n # deleting children breaks infinite loops\n # but Dima, can a process tree contain a loop? yes - via race-condition in reading procfs\n\n return accepted", "def get_children(pid):\n try:\n stdout=subprocess.check_output([\"ps\",\"--ppid\",pid,\"-o\",\"pid\"])\n except subprocess.CalledProcessError:\n stdout=[]\n\n pids=[]\n if stdout:\n pids=process_ps_stdout(stdout)\n\n return pids", "def get_pids_filtered_by_regex(regex_list, excludes=None):\n excludes = excludes or []\n res = []\n for process in psutil.process_iter():\n try:\n cmdline = process.cmdline()\n except psutil.NoSuchProcess:\n cmdline = None\n except psutil.AccessDenied:\n cmdline = None\n if cmdline:\n name = \" \".join(cmdline)\n for r in regex_list:\n if name.strip() != \"\" and re.match(r, name):\n res.append(process.pid)\n return res", "def get_child_pids(pid):\n\n wmi = win32com.client.GetObject('winmgmts:')\n # noinspection SqlNoDataSourceInspection,SqlDialectInspection\n children = wmi.ExecQuery('SELECT * FROM Win32_Process WHERE ParentProcessID = %s' % pid)\n return [child.Properties_('ProcessId').Value for child in children]", "def get_pid(name: str) -> Set[int]:\n process_pids = set()\n for proc in psutil.process_iter():\n if name == proc.name():\n pid = proc.pid\n process_pids.add(pid)\n return process_pids", "def get_child_processes(self, ppid):\n\n all_children = []\n children_to_explore = set()\n for _pid in self.parent_to_children_map[ppid]:\n all_children.append(_pid)\n children_to_explore.add(_pid)\n\n # get the children 'recursively'\n while children_to_explore: # the invariant\n child_to_explore = children_to_explore.pop()\n if not self.parent_to_children_map.get(child_to_explore):\n continue\n unvisited = self.parent_to_children_map[child_to_explore]\n for node in unvisited:\n if node not in all_children:\n children_to_explore.add(node)\n all_children.append(node)\n return list(set(all_children))", "def get_pids(name=None):\n results = []\n for process in win32com.client.GetObject('winmgmts:').InstancesOf('Win32_Process'):\n if name is None or process.Properties_(\"Name\").Value == name:\n results.append(process.Properties_(\"ProcessID\").Value)\n return results", "def _complete_processes(self, text):\r\n processes = []\r\n for info in self._get_complete_info():\r\n if ':' in text or info['name'] != info['group']:\r\n processes.append('%s:%s' % (info['group'], info['name']))\r\n if '%s:*' % info['group'] not in processes:\r\n processes.append('%s:*' % info['group'])\r\n else:\r\n processes.append(info['name'])\r\n return [ p + ' ' for p in processes if p.startswith(text) ]", "def _select_processes(self):\n\n # check if at least one process is running\n is_running = False\n for pid in self.__pids:\n if ProcessMonitor.__is_running(pid):\n is_running = True\n break # at least one process is running\n\n if is_running:\n if not self.__aggregate_multiple_processes:\n return self.__pids\n\n # aggregate metrics, check the last discovered time\n if (\n self.__last_discovered\n and time.time() * 1000 - self.__last_discovered\n < self.__process_discovery_interval * 1000\n ):\n return self.__pids\n\n ps = ProcessList()\n if self.__commandline_matcher:\n self.__last_discovered = time.time() * 1000\n if self.__include_child_processes:\n matched_processes = ps.get_matches_commandline_with_children(\n self.__commandline_matcher\n )\n else:\n matched_processes = ps.get_matches_commandline(\n self.__commandline_matcher\n )\n self.__pids = matched_processes\n\n if not self.__aggregate_multiple_processes and len(self.__pids) > 1:\n # old behaviour where multiple processes were not supported for aggregation\n self._logger.warning(\n \"Multiple processes match the command '%s'. Returning existing pid. \"\n \"You can turn on the multi process aggregation support by adding the \"\n \"aggregate_multiple_processes configuration to true\"\n % self.__commandline_matcher,\n limit_once_per_x_secs=300,\n limit_key=\"linux-process-monitor-existing-pid\",\n )\n self.__pids = [self.__pids[0]]\n else:\n # See if the specified target pid is running. If so, then return it.\n # Special cases:\n # '$$' mean this process.\n # '$$TBD' mean that the PID of the target process has not been determined yet and it will be set later.\n pids = []\n if self.__target_pids:\n for t_pid in self.__target_pids:\n if t_pid == \"$$\":\n t_pid = int(os.getpid())\n\n # skip this until it will be replaced with a real PID.\n elif t_pid == \"$$TBD\":\n continue\n else:\n t_pid = int(t_pid)\n pids.append(t_pid)\n self.__pids = pids\n return self.__pids", "def findProcessIdByName(processName):\n listOfProcessObjects = []\n # Iterate over the all the running process\n for proc in psutil.process_iter():\n try:\n pinfo = proc.as_dict(attrs=[\"pid\", \"name\", \"create_time\"])\n # Check if process name contains the given name string.\n if processName.lower() in pinfo[\"name\"].lower():\n listOfProcessObjects.append(pinfo)\n except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):\n pass\n return listOfProcessObjects", "def _psa(cmd, allmatching=True, paths=None):\n import psutil\n pids = list()\n cmdlines = list()\n procs = list()\n cmdline = ''\n bins = _whicha(cmd, paths)\n if not allmatching:\n bins = bins[:1]\n for pid in psutil.pids():\n try:\n proc = psutil.Process(pid)\n cmdline = proc.cmdline()\n if any([bin in cmdline for bin in bins]):\n cmdlines.append(cmdline)\n pids.append(pid)\n procs.append(proc)\n except psutil.ZombieProcess:\n pass\n except psutil.AccessDenied:\n pass\n return (pids, cmdlines, procs)", "def ps_find(name):\n for proc in psutil.process_iter():\n if proc.name() == name:\n return True\n return False", "def get_process_id(name):\n child = subprocess.Popen(['pgrep', '-f', name], stdout=subprocess.PIPE, shell=False)\n response = child.communicate()[0]\n return [int(pid) for pid in response.split()]", "def kill_process_by_name(re_pattern):\n\n user_name = os.getlogin()\n parent_pid = os.getppid()\n current_pid = os.getpid()\n\n stdin = subprocess.check_output([\"ps\", \"-u\", user_name])\n\n processes = []\n\n processes = [(int(re.match(\" *[0-9]+\", line).group()), line.split(' ')[-1]) for line in stdin.split('\\n')[1:-1]]\n\n for process in processes:\n\n if re.match(re_pattern, process[1]) and process[0] != current_pid:\n# print \"KILLING PID: \", process\n os.kill(process[0], signal.SIGKILL)", "def ProcessIterator(pids, process_regex_string, cmdline_regex_string,\n ignore_grr_process, error_list):\n pids = set(pids)\n if ignore_grr_process:\n grr_pid = psutil.Process().pid\n else:\n grr_pid = -1\n\n if process_regex_string:\n process_regex = re.compile(process_regex_string)\n else:\n process_regex = None\n\n if cmdline_regex_string:\n cmdline_regex = re.compile(cmdline_regex_string)\n else:\n cmdline_regex = None\n\n if pids:\n process_iterator = []\n for pid in pids:\n try:\n process_iterator.append(psutil.Process(pid=pid))\n except Exception as e: # pylint: disable=broad-except\n error_list.Append(\n rdf_memory.ProcessMemoryError(\n process=rdf_client.Process(pid=pid), error=str(e)))\n else:\n process_iterator = psutil.process_iter()\n\n for p in process_iterator:\n if process_regex and not process_regex.search(p.name()):\n continue\n\n if cmdline_regex and not cmdline_regex.search(\" \".join(p.cmdline())):\n continue\n\n if p.pid == grr_pid:\n continue\n\n yield p", "def _argsForSubprocess(self) -> list[str]:\n pass", "def ListProcesses(self):\n stdout, stderr = self.RunCmdOnDevice(\n [\n '/bin/ps', '--no-headers', '-A', '-o', 'pid,ppid,args:4096,state'\n ],\n quiet=True)\n assert stderr == '', stderr\n procs = []\n for l in stdout.split('\\n'):\n if l == '':\n continue\n m = re.match(r'^\\s*(\\d+)\\s+(\\d+)\\s+(.+)\\s+(.+)', l, re.DOTALL)\n assert m\n procs.append((int(m.group(1)), m.group(3).rstrip(), int(m.group(2)),\n m.group(4)))\n logging.debug(\"ListProcesses(<predicate>)->[%i processes]\" % len(procs))\n return procs", "def ListProcesses(self):\n stdout, stderr = self.RunCmdOnDevice(\n ['/bin/ps', '--no-headers', '-A', '-o', 'pid,ppid,args:4096,state'],\n quiet=True)\n assert stderr == '', stderr\n procs = []\n for l in stdout.split('\\n'):\n if l == '':\n continue\n m = re.match(r'^\\s*(\\d+)\\s+(\\d+)\\s+(.+)\\s+(.+)', l, re.DOTALL)\n assert m\n procs.append(\n (int(m.group(1)), m.group(3).rstrip(), int(m.group(2)), m.group(4)))\n logging.debug(\"ListProcesses(<predicate>)->[%i processes]\" % len(procs))\n return procs", "def get_processes_running():\r\n p = [] #array of processes\r\n if platform == \"linux\" or platform == \"linux2\":\r\n for proc in psutil.process_iter():\r\n try:\r\n tmp=Process(proc.name(),int(proc.pid),proc.username(),int(0),int(0))\r\n p.append(tmp)\r\n except:\r\n continue\r\n return (p)\r\n\t\t\t\r\n tasks = check_output(['tasklist']).decode('cp866', 'ignore').split(\"\\r\\n\")\r\n for task in tasks:\r\n m = re.match(b'(.*?)\\\\s+(\\\\d+)\\\\s+(\\\\w+)\\\\s+(\\\\w+)\\\\s+(.*?)\\\\s.*', task.encode())\r\n if m is not None:\r\n tmp=Process(m.group(1).decode(),int(m.group(2).decode()),m.group(3).decode(),int(m.group(4).decode()),int(m.group(5).decode('ascii', 'ignore')))\r\n p.append(tmp)\r\n #m.group(1).decode() image name\r\n #m.group(2).decode() process id\r\n #m.group(3).decode() session_name\r\n #m.group(4).decode() session_num\r\n #m.group(5).decode('ascii', 'ignore') memory usage\r\n return(p)", "def GetWith(cls, expression, compare=(lambda a, b: fnmatch.fnmatch(a, b))):\n\t\tres = []\n\t\texpression = \"*\" + expression + \"*\"\n\t\tfor pid, cmdline in cls.List().items():\n\t\t\tif compare(cmdline, expression):\n\t\t\t\tres.append(pid)\n\t\treturn res", "def psa(line):\n from stefco.get_terminal_size import get_terminal_size\n import textwrap\n cmd, paths = _cmd_path_lex(line)\n pids, cmds, procs = _psa(cmd, allmatching=True, paths=paths)\n print(\"Matching processes:\\nPID\\tCOMMAND\\n\" + 80*\"~\" + \"\\n\\n\")\n procdict = dict()\n termwidth = get_terminal_size().columns\n for i, pid in enumerate(pids):\n procdict[pid] = procs[i]\n wrappedcmd = textwrap.wrap(str(cmds[i]), width=(termwidth - 8))\n # print pid on first line of command\n print(\"{}\\t{}\".format(pid, wrappedcmd.pop(0)))\n # print any remaining lines of the command\n if not len(wrappedcmd) == 0:\n print(\"\\t\" + \"\\n\\t\".join(wrappedcmd))\n # print an extra blank line after each process\n print(\"\")\n return procdict", "def matches(self, pid):\n if self._command_wildcards or self._command_regexs:\n # Matchers requiring comm file\n path = P.join(PROC_DIR, str(pid), 'comm')\n try:\n with open(path) as f:\n comm = f.read().rstrip()\n for pattern in self._command_wildcards:\n if fnmatch(comm, pattern):\n return True\n\n for re_obj in self._command_regexs:\n if re_obj.match(comm):\n return True\n except FileNotFoundError:\n # process may have exited before file could be read\n return False\n\n return False", "def get_process_list() -> Dict:\n return {proc.pid: proc.name() for proc in psutil.process_iter()}", "def processes(self):\n # MODIFIED 11/1/16 OLD:\n return list(item.process for item in self.process_tuples)\n # # MODIFIED 11/1/16 NEW:\n # return sorted(list(item.process for item in self.process_tuples), key=lambda process: process.name)\n # MODIFIED 11/1/16 END", "def List(cls):\n\t\tres = {}\n\t\tfor p in glob.glob(\"/proc/*/cmdline\"):\n\t\t\tprocess = p.split(\"/\")[2]\n\t\t\tif cls.RE_PID.match(process):\n\t\t\t\tres[int(process)] = cat(p).replace(\"\\x00\", \" \")\n\t\treturn res", "def get_processes():\n yield from psutil.process_iter()", "def Children( cls, pid ):\n\t\tres = []\n\t\tpid = int(pid)\n\t\tfor cpid, cmd in cls.List().items():\n\t\t\tppid = int(cls.Status(cpid)[\"ppid\"])\n\t\t\tif ppid == pid:\n\t\t\t\tres.append( (cpid, None, cmd))\n\t\treturn res", "def get_processes():\n cmd = 'ps -do pid:1,cmd' # linux command to run\n processes = {}\n\n with os.popen(cmd) as out:\n # see https://stackoverflow.com/questions/24362007/\n next(out.__iter__()) # skip header (first line)\n\n for line in out:\n # sepate pid and command in a tuple\n p = line.rstrip('\\n').split(' ', 2)\n\n # skip kernel threads\n if p[1][0] == '[':\n continue\n\n processes[p[0]] = p[1]\n\n return processes", "def check_processes(self, name: Optional[str] = None) -> str:\n\n for process in self.processes:\n if not process.is_running():\n self.processes.remove(process)\n continue\n\n cmdline = \" \".join(process.cmdline())\n port = re.findall(r\"--port=(\\d+)\", cmdline)\n port = port[0] if port else \"\"\n\n if re.findall(r\"-m\\s+.*streamlit_run|streamlit\", cmdline):\n return f\"http://localhost:{port}/{name}\"\n\n return \"\"", "def process_exists(name):\n for pid in [pid for pid in os.listdir(\"/proc\") if pid.isdigit()]:\n try:\n exe_name = os.readlink(os.path.join(\"/proc/\", pid, \"exe\"))\n except OSError:\n continue\n if exe_name and exe_name.endswith(os.path.join(\"/\", name)):\n return pid\n return None", "def pid_processes(self):\n return [(process.namespec(), process.infos[self.address_name]['pid'])\n for process in self.processes.values()\n if process.pid_running_on(self.address_name)]", "def get_process_mapping():\n with open('/proc/{0}/stat'.format(os.getpid())) as f:\n self_tty = f.read().split()[STAT_TTY]\n processes = {}\n for pid in os.listdir('/proc'):\n if not pid.isdigit():\n continue\n try:\n stat = '/proc/{0}/stat'.format(pid)\n cmdline = '/proc/{0}/cmdline'.format(pid)\n with open(stat) as fstat, open(cmdline) as fcmdline:\n stat = re.findall(r'\\(.+\\)|\\S+', fstat.read())\n cmd = fcmdline.read().split('\\x00')[:-1]\n ppid = stat[STAT_PPID]\n tty = stat[STAT_TTY]\n if tty == self_tty:\n processes[pid] = Process(\n args=tuple(cmd), pid=pid, ppid=ppid,\n )\n except IOError:\n # Process has disappeared - just ignore it.\n continue\n return processes", "def get_filtered_pids(filterstr, excludes=None):\n excludes = excludes or []\n cmd = \"ps ax | grep '%s'\" % filterstr\n rc, out, err = j.core.executors.run_local(cmd)\n # print out\n found = []\n\n def checkexclude(c, excludes):\n for item in excludes:\n c = c.lower()\n if c.find(item.lower()) != -1:\n return True\n return False\n\n for line in out.split(\"\\n\"):\n if line.find(\"grep\") != -1 or line.strip() == \"\":\n continue\n if line.strip() != \"\":\n if line.find(filterstr) != -1:\n line = line.strip()\n if not checkexclude(line, excludes):\n # print \"found pidline:%s\"%line\n found.append(int(line.split(\" \")[0]))\n return found", "def __init__(self, name, process_name, regexes):\n\n self.name = name\n self.process_name = process_name\n self.searches = [re.compile(regex).search for regex in regexes]\n self.count = 0", "def process(cmd_string, stdin=None):\n return process_results(process_run(cmd_string, stdin=stdin))", "def _find_running_exe(exe):\n candidates = []\n exe = path.abspath(exe)\n for proc in _get_process_list():\n try:\n pinfo = proc.as_dict(attrs=['pid', 'exe'])\n except psutil.NoSuchProcess:\n pass\n else:\n if pinfo[\"exe\"] and pinfo['exe'] == exe:\n candidates.append(pinfo['pid'])\n return candidates", "def getChildPIDs(self):\n\t\treturn self.pids", "def pidof(processname = None):\n processname = os.path.basename(processname)\n pidpath = os.path.join(pid_path,processname + \".pid\")\n if processname is not None and os.path.exists(pidpath):\n f = open (pidpath)\n pids = f.readlines()\n f.close()\n return pids\n else:\n return False", "def killall(name, params=None):\n\n if platform.system() == \"Windows\":\n name += \".exe\"\n\n for ps in psutil.process_iter():\n cmdline = \"\"\n try:\n if ps.name() != name:\n continue\n\n if params:\n cmdline = ps.cmdline()\n except psutil.AccessDenied:\n continue\n\n ps_found = True\n\n if params: # If you want to compare command line\n check_list = []\n\n # Data converting\n if params is list:\n check_list = params\n elif params is str:\n check_list = str.split(\",\")\n else:\n check_list.append(str(params))\n\n # Compare command line's parameters\n for item in check_list:\n ps_found = False\n\n for param in cmdline:\n if param.find(item) != -1:\n ps_found = True\n break\n\n if ps_found is False: # Process is not found.\n break\n\n if ps_found:\n try:\n ps.kill()\n except Exception:\n pass", "def get_processes(self):\n processes = {}\n # Get ps output\n cmd = [\"ps\", \"-Z\"]\n # Split by newlines and remove first line (\"LABEL USER PID PPID NAME\")\n # TODO: surround with try/except?\n psz = subprocess.check_output(self.shell + cmd).decode().split('\\n')[1:]\n for line in psz:\n line = line.strip(\"\\r\")\n if line:\n try:\n p = Process(line, self.android_version)\n except ValueError as e:\n self.log.warning(e)\n else:\n processes[p.pid] = p\n return processes", "def containers():\n # TODO: can there be multiple names?\n cmd = [ 'docker', 'ps', '--format', '{{.Names}}' ]\n with popen_text(cmd) as docker:\n for ln in docker.stdout:\n yield ln[:-1]", "def get_running():\n ps = which('/usr/bin/ps') # avoid the old BSD variant\n lines = sh(ps, '-e', '-f', quiet=True)\n # The first line of the `ps' output is a header line which is\n # used to find the data field columns.\n column = lines[0].index('CMD')\n procs = set()\n for line in lines[1:]:\n cmd_line = line[column:]\n command = cmd_line.split()[0]\n procs.add(os.path.basename(command))\n return procs", "def children(word, word_dict):\n res = []\n for i in range(len(word)):\n child = word[:i]+word[i+1:]\n if child in word_dict:\n res.append(child)\n return res", "def pids():\n stream = os.popen(\"ps aux | grep '[m]itm' | awk '{print $2}'\")\n return stream.read()", "def _cmdline(process):\n return \" \".join(process.cmdline())", "async def find_processes(self, msg):\n running_processes = []\n new_embed = DEFAULT_EMBED.copy()\n\n for proc in psutil.process_iter():\n if proc.name() in PROCESSES.keys():\n running_processes.append(proc.name())\n elif proc.name() in [\"java.exe\", \"javaw.exe\"] and proc.cwd() in PROCESSES.keys():\n running_processes.append(proc.cwd())\n\n for process in PROCESSES:\n try:\n if process in running_processes:\n new_embed.add_field(name=PROCESSES.get(process),\n value=\"Online <:GreenTick:592083498534174721>\", inline=self.inline)\n else:\n new_embed.add_field(name=PROCESSES.get(process),\n value=\"Offline <:RedCross:592082557961633877>\", inline=self.inline)\n except PermissionError:\n new_embed.add_field(name=PROCESSES.get(process),\n value=\"Admin Required <:OrangeUnknown:592082676891123722>\", inline=self.inline)\n await msg.edit(content=\"\", embed=new_embed)", "def match(self, name, args, useName=True, useMd5Digest=True):\n if self._config.name is None:\n return False\n\n # SNMP agents return a 'flexible' number of characters,\n # so exact matching isn't always reliable.\n processName = ('%s %s' % (name, args or '')).strip()\n\n # Make the comparison\n result = self._compiled_regex.search(processName) is not None\n\n # We can a match, but it might not be for us\n if result and useMd5Digest:\n # Compare this arg list against the digest of this proc\n digest = md5(args).hexdigest()\n if self.digest and digest != self.digest:\n result = False\n\n if result and useName:\n cleanNameOnly = globalPrepId(name)\n nameMatch = self._compiled_name_regex.search(cleanNameOnly)\n if not nameMatch or nameMatch.group(1) not in ('', '_'):\n log.debug(\"Discarding match based on name mismatch: %s %s\", \n cleanNameOnly, self._name_only)\n result = False\n\n return result", "def ps():\n for p in psutil.process_iter():\n try:\n pid = p.pid\n name = p.name()\n cmdline = p.cmdline()\n except psutil.AccessDenied:\n continue\n\n print(\"%5d %10s %s\" % (pid, name, cmdline))", "def get_process_list(config):\n # get list of processes\n process_list = getlist(config.getstr('config', 'PROCESS_LIST'))\n\n out_process_list = []\n # for each item remove dashes, underscores, and cast to lower-case\n for process in process_list:\n # if instance is specified, extract the text inside parenthesis\n match = re.match(r'(.*)\\((.*)\\)', process)\n if match:\n instance = match.group(2)\n process_name = match.group(1)\n else:\n instance = None\n process_name = process\n\n wrapper_name = get_wrapper_name(process_name)\n if wrapper_name is None:\n config.logger.warning(f\"PROCESS_LIST item {process_name} \"\n \"may be invalid.\")\n wrapper_name = process_name\n\n # if MakePlots is in process list, remove it because\n # it will be called directly from StatAnalysis\n if wrapper_name == 'MakePlots':\n continue\n\n out_process_list.append((wrapper_name, instance))\n\n return out_process_list", "def get_pids(pid):\n\n pids=set([pid])\n for child in get_children(pid):\n pids.update(traverse_tree(child,pids))\n \n return list(pids)", "def get_processes_list_within_container(self):\n items_list = []\n proc_item = []\n procs_dict = {}\n\n try:\n p = Popen(DOCKER_TOP_CMD.format(self.container_id), shell=True, stdout=PIPE, stderr=PIPE)\n stdout_dump, stderr_data = p.communicate()\n except Exception as e:\n log.debug('{}[*]{} {}'.format(DFbase.LOG_ERROR_COLOR,\n DFbase.LOG_INFO_COLOR,e))\n return False\n\n procs_lines = stdout_dump.decode('utf-8')\n procs_lines = procs_lines.split(\"\\n\")\n\n for procs_item in procs_lines:\n if 'USER' in procs_item:\n continue\n elif len(procs_item):\n proc_item.append(procs_item)\n\n for item in proc_item:\n x = item.split(None, 4)\n log.debug('{}[*]{} PID:{}, {}, {}, {}, {}'.format(DFbase.LOG_DEBUG_COLOR, \n DFbase.LOG_INFO_COLOR, x[0], x[1],x[2],x[3],x[4]))\n procs_dict['USER'] = x[0]\n procs_dict['PID'] = x[1]\n procs_dict['PPID'] = x[2]\n procs_dict['STIME'] = x[3]\n procs_dict['CMD'] = x[4]\n\n items_list.append(procs_dict.copy())\n\n procs_path = self.artifacts_path + '/' + 'top_command.json'\n with open(procs_path, 'w') as f:\n json.dump(items_list, f, indent=4)\n\n self.copy_executable(items_list)\n\n return True", "def _get_regex(self, ctx):\n _parts = []\n\n with ctx.processing(self):\n for _child in self.children:\n _parts.append(self._as_regex_obj(_child)._get_regex(ctx))\n return \"\".join(_parts)", "def run_matching(self):\n paradic = self.cfg['param']['paradic']\n print 'in run_matching() n_bins = ' +str(paradic['n_bins'])\n\n f = open(self.work_dir+'matches.txt','w')\n matching = self.run_proc(['match_cli', 'keys_im0.txt',\n 'keys_im1.txt',\n str(paradic['flag_match']),\n str(paradic['C_match']),\n str(paradic['n_hist']),\n str(paradic['n_ori']),\n str(paradic['n_bins'])],\n stdout=f)\n self.wait_proc(matching, timeout=self.timeout)\n return 1", "def filter_process(self, *args, **kwargs):\n if 'uuid' in kwargs:\n kwargs['uuid'] = Process.strip_uuid(kwargs['uuid'])\n\n kwargs = {'process__{}'.format(k): v for k, v in kwargs.items()}\n\n trees = (ProcessNode.objects.filter(*args, **kwargs)\n .order_by('tree_id')\n .values_list('tree_id', flat=True)\n .distinct())\n return self.filter(process_tree__tree_id__in=trees)", "def get_all_current_processes():\n p = subprocess.Popen(['ps', '-A'], stdout=subprocess.PIPE)\n out, err = p.communicate()\n return out", "def match_output(pattern, cmd):\n if isinstance(cmd, str):\n cmd = shlex.split(cmd)\n\n return re.findall(pattern, subprocess.check_output(cmd))", "def renice(self,process_list,level):\n res = []\n pids = {}\n for process in process_list:\n if hasattr(process,'machine'):\n try:\n worker = self.worker_by_name[process.machine]\n except KeyError:\n worker = self.worker_by_name[process.long_machine]\n pid = process.pid\n else:\n worker = self.workers[process[0]]\n pid = process[1]\n try:\n pids[worker] = pids[worker] + ' ' + str(pid)\n except:\n pids[worker] = str(pid)\n for worker,value in pids.items():\n arg = 'renice %d -p %s' % (level,value)\n res.append(worker.apply(os.system,(arg,)))\n return res", "def findChildren(self, name):\n\n # Note: this returns a list of all the children of a given\n # name, irrespective of the depth of look-up.\n \n children = []\n \n for child in self.getAllChildren():\n if child.getName() == name:\n children.append(child)\n\n return children", "def collect_children(self):\n\t\twhile self.active_children:\n\t\t\tif len(self.active_children) < self.max_children:\n\t\t\t\toptions = os.WNOHANG\n\t\t\telse:\n\t\t\t\t# If the maximum number of children are already\n\t\t\t\t# running, block while waiting for a child to exit\n\t\t\t\toptions = 0\n\t\t\ttry:\n\t\t\t\tpid, status = os.waitpid(0, options)\n\t\t\texcept os.error:\n\t\t\t\tpid = None\n\t\t\tif not pid: break\n\t\t\tself.active_children.remove(pid)", "def pslist(self) -> Generator[dict, None, None]:\n\n # Function to switch fields to represent a parent\n def _convert_to_parent_fields(process: dict) -> dict:\n output = {}\n for left, right in [\n (FieldNames.PROCESS_IMAGE, FieldNames.PARENT_PROCESS_IMAGE),\n (FieldNames.PROCESS_ID, FieldNames.PARENT_PROCESS_ID),\n (FieldNames.COMMAND_LINE, FieldNames.PARENT_COMMAND_LINE),\n (FieldNames.PROCESS_IMAGE_PATH, FieldNames.PARENT_PROCESS_IMAGE_PATH),\n ]:\n output[right] = process[left]\n\n return output\n\n # Use the pstree dict output to get a mapping from pid -> proc\n procs = self.session.plugins.pstree()._make_process_dict()\n\n parent_procs: Dict[int, dict] = {}\n\n # Add the system idle process\n parent_procs[0] = {\n FieldNames.PARENT_PROCESS_ID: 0,\n FieldNames.PARENT_COMMAND_LINE: \"\",\n FieldNames.PARENT_PROCESS_IMAGE: \"System Idle Process\",\n FieldNames.PARENT_PROCESS_IMAGE_PATH: \"\\\\\",\n }\n\n for proc in procs.values():\n\n parent_pid = proc.InheritedFromUniqueProcessId\n\n # Get the current processes info\n command_line = str(proc.Peb.ProcessParameters.CommandLine)\n image_path = str(proc.Peb.ProcessParameters.ImagePathName)\n\n if int(proc.pid) == 4:\n process_image = \"SYSTEM\"\n process_image_path = \"\\\\\"\n else:\n process_image, process_image_path = split_path(image_path)\n\n current_proc = {\n FieldNames.EVENT_TYPE: EventTypes.PROCESS_LAUNCHED,\n FieldNames.PROCESS_ID: int(proc.pid),\n FieldNames.COMMAND_LINE: command_line,\n FieldNames.PROCESS_IMAGE: process_image,\n FieldNames.PROCESS_IMAGE_PATH: process_image_path,\n }\n\n # Keep track of the processes.\n self.processes[int(proc.pid)] = current_proc\n\n current_as_parent = _convert_to_parent_fields(current_proc)\n parent_procs[int(proc.pid)] = current_as_parent\n\n # Parse the parent process\n if parent_pid not in parent_procs:\n\n # Do we the _EPROCESS for this process?\n if int(parent_pid) in procs:\n parent = procs[int(parent_pid)]\n parent_image_path = parent.Peb.ProcessParameters.ImagePathName\n\n parent_process_image, parent_process_image_path = split_path(\n str(parent_image_path)\n )\n\n parent_proc = {\n FieldNames.PARENT_PROCESS_ID: int(parent.pid),\n FieldNames.PARENT_COMMAND_LINE: parent.Peb.ProcessParameters.CommandLine,\n FieldNames.PARENT_PROCESS_IMAGE: parent_process_image,\n FieldNames.PARENT_PROCESS_IMAGE_PATH: parent_process_image_path,\n }\n\n # If not, make a dummy one with the PID\n else:\n parent_proc = {\n FieldNames.PARENT_PROCESS_ID: int(parent_pid),\n FieldNames.PARENT_COMMAND_LINE: \"\",\n FieldNames.PARENT_PROCESS_IMAGE: \"\",\n FieldNames.PARENT_PROCESS_IMAGE_PATH: \"\",\n }\n\n parent_procs[int(parent_pid)] = parent_proc\n\n yield {**current_proc, **parent_procs[int(parent_pid)]}", "def command_groups(self, lines):\n for line in lines:\n match = command_regex.match(line)\n if match:\n if self.current_group:\n yield self.current_group\n groupdict = match.groupdict()\n comm = groupdict['comm']\n param = groupdict['param']\n if param:\n param = param[1:-1]\n data = groupdict['data']\n self.current_group = (comm, param, data)\n self.parse_multiline = True\n elif self.parse_multiline:\n match = cont_regex.match(line)\n if cont_regex.match(line):\n new_data, = match.groups()\n if new_data:\n name, param, data = self.current_group\n data += ' ' + new_data\n self.current_group = (name, param, data)\n else:\n self.parse_multiline = False\n else:\n self.parse_multiline = False\n if self.current_group:\n yield self.current_group\n self.current_group = []", "def matching(self, pids):\n for pid in pids:\n if self.matches(pid):\n yield pid", "def match(self):\n\n # We initate this variable which gonna contain the returned data\n result = []\n\n # We compile the regex string\n to_match = comp(self.regex)\n\n # In case we have to use the implementation of ${BASH_REMATCH} we use\n # re.findall otherwise, we use re.search\n if self.rematch: # pylint: disable=no-member\n pre_result = to_match.findall(self.data)\n else:\n pre_result = to_match.search(self.data)\n\n if self.return_data and pre_result is not None: # pylint: disable=no-member\n if self.rematch: # pylint: disable=no-member\n for data in pre_result:\n if isinstance(data, tuple):\n result.extend(list(data))\n else:\n result.append(data)\n\n if self.group != 0: # pylint: disable=no-member\n return result[self.group] # pylint: disable=no-member\n else:\n result = pre_result.group(\n self.group # pylint: disable=no-member\n ).strip()\n\n return result\n elif (\n not self.return_data # pylint: disable=no-member\n and pre_result is not None\n ):\n return True\n return False", "def test_pick_a_process_to_run(self):\n workflow = self.get_workflow(\n \"\"\"file://C <- file://B\n echo C > C\n echo B creates C\n\nfile://B <- file://A\n echo B > B\n echo A creates B\n \"\"\")\n p = workflow.pick_a_process_to_run()\n assert p.id.find(\"_5\") >= 0, p.id", "def SearchRe(context, pattern, arg=None):\n if not arg:\n arg = context.node\n arg = Conversions.StringValue(arg)\n matches = re.findall(pattern, arg)\n proc = context.processor\n matches_nodeset = []\n for groups in matches:\n proc.pushResult()\n proc.writers[-1].startElement('Match', EMPTY_NAMESPACE)\n if type(groups) != type(()):\n groups = (groups,)\n for group in groups:\n proc.writers[-1].startElement('Group', EMPTY_NAMESPACE)\n proc.writers[-1].text(group)\n proc.writers[-1].endElement('Group')\n proc.writers[-1].endElement('Match')\n frag = proc.popResult()\n context.rtfs.append(frag)\n matches_nodeset.append(frag.childNodes[0])\n return matches_nodeset", "def get_connections(resolver = None, process_pid = None, process_name = None):\n\n if not resolver:\n available_resolvers = system_resolvers()\n\n if available_resolvers:\n resolver = available_resolvers[0]\n else:\n raise IOError('Unable to determine a connection resolver')\n\n if not process_pid and not process_name:\n raise ValueError('You must provide a pid or process name to provide connections for')\n\n def _log(msg):\n if LOG_CONNECTION_RESOLUTION:\n log.debug(msg)\n\n _log('=' * 80)\n _log('Querying connections for resolver: %s, pid: %s, name: %s' % (resolver, process_pid, process_name))\n\n if isinstance(process_pid, str):\n try:\n process_pid = int(process_pid)\n except ValueError:\n raise ValueError('Process pid was non-numeric: %s' % process_pid)\n\n if process_pid is None:\n all_pids = stem.util.system.pid_by_name(process_name, True)\n\n if len(all_pids) == 0:\n if resolver in (Resolver.NETSTAT_WINDOWS, Resolver.PROC, Resolver.BSD_PROCSTAT):\n raise IOError(\"Unable to determine the pid of '%s'. %s requires the pid to provide the connections.\" % (process_name, resolver))\n elif len(all_pids) == 1:\n process_pid = all_pids[0]\n else:\n if resolver in (Resolver.NETSTAT_WINDOWS, Resolver.PROC, Resolver.BSD_PROCSTAT):\n raise IOError(\"There's multiple processes named '%s'. %s requires a single pid to provide the connections.\" % (process_name, resolver))\n\n if resolver == Resolver.PROC:\n return stem.util.proc.connections(pid = process_pid)\n\n resolver_command = RESOLVER_COMMAND[resolver].format(pid = process_pid)\n\n try:\n results = stem.util.system.call(resolver_command)\n except OSError as exc:\n raise IOError(\"Unable to query '%s': %s\" % (resolver_command, exc))\n\n resolver_regex_str = RESOLVER_FILTER[resolver].format(\n protocol = '(?P<protocol>\\S+)',\n local = '(?P<local>[\\[\\]0-9a-f.:]+)',\n remote = '(?P<remote>[\\[\\]0-9a-f.:]+)',\n pid = process_pid if process_pid else '[0-9]*',\n name = process_name if process_name else '\\S*',\n )\n\n _log('Resolver regex: %s' % resolver_regex_str)\n _log('Resolver results:\\n%s' % '\\n'.join(results))\n\n connections = []\n resolver_regex = re.compile(resolver_regex_str)\n\n def _parse_address_str(addr_type, addr_str, line):\n addr, port = addr_str.rsplit(':', 1)\n\n if not is_valid_ipv4_address(addr) and not is_valid_ipv6_address(addr, allow_brackets = True):\n _log('Invalid %s address (%s): %s' % (addr_type, addr, line))\n return None, None\n elif not is_valid_port(port):\n _log('Invalid %s port (%s): %s' % (addr_type, port, line))\n return None, None\n else:\n _log('Valid %s:%s: %s' % (addr, port, line))\n return addr.lstrip('[').rstrip(']'), int(port)\n\n for line in results:\n match = resolver_regex.match(line)\n\n if match:\n attr = match.groupdict()\n\n local_addr, local_port = _parse_address_str('local', attr['local'], line)\n remote_addr, remote_port = _parse_address_str('remote', attr['remote'], line)\n\n if not (local_addr and local_port and remote_addr and remote_port):\n continue # missing or malformed field\n\n protocol = attr['protocol'].lower()\n\n if protocol == 'tcp6':\n protocol = 'tcp'\n\n if protocol not in ('tcp', 'udp'):\n _log('Unrecognized protocol (%s): %s' % (protocol, line))\n continue\n\n conn = Connection(local_addr, local_port, remote_addr, remote_port, protocol, is_valid_ipv6_address(local_addr))\n connections.append(conn)\n _log(str(conn))\n\n _log('%i connections found' % len(connections))\n\n if not connections:\n raise IOError('No results found using: %s' % resolver_command)\n\n return connections", "def search(query):\n notes_path = conf.ROOT\n note_path = None\n matches = defaultdict(list)\n\n try:\n # -S smart case\n # -C n n lines of before/after context\n # --ackmate more easily parseable format\n proc = subprocess.Popen(['ag', '-S', '-C 0', '--ackmate',\n '--ignore=*.pdf', query, notes_path],\n stdout=subprocess.PIPE)\n\n while True:\n byte_line = proc.stdout.readline()\n line = byte_line.decode('utf-8').strip()\n if not line and proc.poll() is not None:\n break\n\n # line == '--' separates results from the same file\n # line == '' separates different files\n elif line == '--' or not line:\n continue\n\n # filenames are preceded with ':'\n elif line[0] == ':':\n note_path = line[1:].replace(notes_path, '').strip('/')\n\n # parse the result lines\n else:\n match_info, match = byte_line.split(b':', 1)\n match_locations = []\n if b';' in match_info:\n line_num, match_locs = match_info.split(b';')\n for mloc in match_locs.split(b','):\n start, end = mloc.split(b' ')\n match_locations.append((int(start), int(end)))\n\n # match locations are for the byte string,\n # so don't decode the match\n matches[note_path].append((match, match_locations))\n return matches\n\n except FileNotFoundError:\n raise MissingDependencyException('The silver searcher (ag) is not installed')", "def get_command_line(pid, default=None):\n try:\n return only(\n process.Properties_(\"CommandLine\").Value\n for process in win32com.client.GetObject('winmgmts:').InstancesOf('Win32_Process')\n if process.Properties_(\"ProcessID\").Value == pid\n )\n except TooFewItemsError:\n return default", "def get_process_pid(robot_name):\n\n try:\n result = check_output(['pgrep', 'x{0}'.format(robot_name)])\n return int(result.strip())\n except:\n return None", "def which(self, name):\n\n valid_names = [\n \"{0}.{1}\".format(name, ext).lower() if ext else \"{0}\".format(name).lower()\n for ext in KNOWN_EXTS\n ]\n finder = filter(operator.attrgetter(\"is_executable\"), self.children.values())\n name_getter = operator.attrgetter(\"path.name\")\n return next(\n (child for child in finder if name_getter(child).lower() in valid_names),\n None,\n )", "def get_pid(name, path=None):\n if name not in list_(limit=\"running\", path=path):\n raise CommandExecutionError(\n f\"Container {name} is not running, can't determine PID\"\n )\n info = __salt__[\"cmd.run\"](f\"lxc-info -n {name}\").split(\"\\n\")\n pid = [\n line.split(\":\")[1].strip()\n for line in info\n if re.match(r\"\\s*PID\", line) is not None\n ][0]\n return pid", "def getProcs(**options):\n procSeq = search.ProcSearch.byOptions(**options).procs\n return [Proc(p) for p in procSeq.procs]", "def _parse_args_files(self, filematch):\n files, start_pos = [], 0\n while True:\n pos_a = self.cmd.find(filematch, start_pos)\n if pos_a > 0:\n pos_b = self.cmd.find(' ', pos_a)\n if pos_b > 0:\n files.append(self.cmd[pos_a:pos_b])\n else:\n files.append(self.cmd[pos_a:])\n start_pos = pos_b\n else:\n return files", "def list_java_processes():\n for line in shell_command_output('jps -l').splitlines():\n line = line.strip()\n if len(line) == 0:\n continue\n (pid, class_name) = line.split()\n yield (int(pid), class_name)", "def processNames(self):\n # MODIFIED 11/1/16 OLD:\n return list(item.process.name for item in self.process_tuples)\n # # MODIFIED 11/1/16 NEW:\n # return sorted(list(item.process.name for item in self.process_tuples))\n # MODIFIED 11/1/16 END", "def _search_multiprocessing(self):\n pool = multiprocessing.Pool(self._main_args_.n_jobs)\n _cand_list = pool.map(self._search, self._main_args_._n_process_range)\n\n return _cand_list", "def processes():\n if not check_params(\n request.args.get(\"host\"), request.args.get(\"username\")\n ):\n abort(400)\n\n return get_processes(\n request.args.get(\"host\"),\n request.args.get(\"username\"),\n request.args.get(\"port\"),\n )", "def terminate_process_and_children(self, name):\n if name not in self.jobs:\n print(\"[%s] does not exist as a process!\", name)\n ppid = self.jobs[name]['process'].pid\n try:\n parent_proc = psutil.Process(ppid)\n except psutil.NoSuchProcess:\n return\n children = parent_proc.children(recursive=True)\n for proc in children:\n l.debug(proc)\n try:\n proc.send_signal(signal.SIGKILL)\n except:\n pass", "def PIDs():\n from ctypes import windll,c_ulong,byref,sizeof\n PIDs = (c_ulong*512)()\n size_of_PIDs = c_ulong()\n windll.psapi.EnumProcesses(byref(PIDs),sizeof(PIDs),byref(size_of_PIDs))\n nPIDs = size_of_PIDs.value/sizeof(c_ulong())\n pidProcess = sorted([int(i) for i in PIDs][:nPIDs])\n return pidProcess", "def run_plugins(optlist, args, instances):\n\n\t# XXX This algorithm for forking children and gatheing results is\n\t# primitive - would be better to gather the results from the children\n\t# as they become available - also should handle timeouts for children\n\t# that return slow - would be better to return a descriptive error\n\t# rather than just be listed as a timeout in the nagios interface\n\n\t# Look for the magic substitution tokens in the args\n\tinternal=None\n\ttry:\n\t\tinternal=args.index('_INTERNAL_')\n\texcept ValueError:\n\t\tpass\n\texternal=None\n\ttry:\n\t\texternal=args.index('_EXTERNAL_')\n\texcept ValueError:\n\t\tpass\n\t\n\t# Look for the DNS arguments in the args\n\tinc_int=None\n\texc_int=None\n\tinc_ext=None\n\texc_ext=None\n\n\tfor o, a in optlist:\n\t\ttry:\n\t\t\tif o=='-i':\n\t\t\t\tinc_int=socket.gethostbyname_ex(a)[2]\n\t\t\tif o=='-I':\n\t\t\t\texc_int=socket.gethostbyname_ex(a)[2]\n\t\t\tif o=='-e':\n\t\t\t\tinc_ext=socket.gethostbyname_ex(a)[2]\n\t\t\tif o=='-E':\n\t\t\t\texc_ext=socket.gethostbyname_ex(a)[2]\n\t\texcept socket.gaierror:\n\t\t\tUNKNOWN(\"hostname %s not known\"%(a))\n\t\t\n\t# Walk through the passed in list of instances, and start the plugins\n\tchildren={}\n\tips={}\n\tfor i in instances:\n\t\tif i['Status']!='Running':\n\t\t\t# Don't run tests against systems that aren't live\n\t\t\tcontinue\n\n\t\t# Implement rules for provided DNS args\n\t\tif inc_int and (i['InternalIP'] not in inc_int):\n\t\t\tcontinue\n\t\tif exc_int and (i['InternalIP'] in exc_int):\n\t\t\tcontinue\n\t\tif inc_ext and (i['ExternalIP'] not in inc_ext):\n\t\t\tcontinue\n\t\tif exc_ext and (i['ExternalIP'] in exc_ext):\n\t\t\tcontinue\n\t\t\n\t\targv=list(args) # make a copy\n\t\tif internal!=None:\n\t\t\targv[internal]=i['InternalIP']\n\t\tif external!=None:\n\t\t\targv[external]=i['ExternalIP']\n\t\t\t\n\t\tchildren[i['PlatformProperties']['InstanceID']]=run_plugin(argv)\n\t\tips[i['PlatformProperties']['InstanceID']]=(i['InternalIP'], i['ExternalIP'])\n\n\t# Gather the results from the forked children and summarise\n\tresults={}\n\n\tworst=0\n\tchecks=0\n\tfailures=0\n\tlast_failure=None\n\tsummary=[0, 0, 0, 0] # OK, WARN, CRIT, UNKNOWN\n\n\tfor child in children.keys():\n\t\tresults[child]=children[child].communicate()\n\t\tstatus=children[child].wait()\n\t\tif status > worst: # XXX - this will do for now, but UNKNOWN isn't as bad as CRITICAL\n\t\t\tworst=status\n\t\tif status:\n\t\t\tfailures += 1\n\t\t\tlast_failure=child\n\t\tchecks += 1\n\n\t\ttry:\n\t\t\tsummary[status]+=1\n\t\texcept IndexError:\n\t\t\tpass\n\n\tif checks == 0: # We didn't run any checks?!?!\n\t\tUNKNOWN(\"No hosts found to check!\")\n\n\tif (checks == 1) or (failures == 1): # pass through the one that matters\n\t\tif checks == 1:\n\t\t\tsysid=children.keys()[0]\n\t\t\tprint \"%s says %s\"%(ips[sysid][0], results[sysid][0].strip())\n\t\t\tsys.exit(worst)\n\t\tif worst in [1, 2, 3]:\n\t\t\tsysid=last_failure\n\t\t\tprint \"%s says %s\"%(ips[sysid][0], results[sysid][0].strip())\n\t\t\tsys.exit(worst)\n\t\t# If we made it here, a plugin returned a non standard error\n\t\tUNKNOWN(\"tests: %d, critical: %d, warning: %d, unknown: %d, ok: %d - in addition something returned %d\"%(summary[2], summary[1], summary[3], summary[0], worst))\n\n\tif failures == 0: # No failures - summarise as all OK\n\t\tsysid=children.keys()[0]\n\t\tOK(\"tests: %d, ok: %d - %s said %s \"%(checks, summary[0], ips[sysid][0], results[sysid][0].strip()))\n\n\t# If we made it here, there were multiple failures...\n\t# Build the summary\n\tmsg=[\"tests: %d\"%(checks)]\n\tif summary[2]:\n\t\tmsg+=[\"critical: %d\"%(summary[2])]\n\tif summary[1]:\n\t\tmsg+=[\"warning: %d\"%(summary[1])]\n\tif summary[3]:\n\t\tmsg+=[\"unknown: %d\"%(summary[3])]\n\tif summary[0]:\n\t\tmsg+=[\"ok: %d\"%(summary[0])]\n\n\t# convert the list into a comma seperated string\n\tmsg=\", \".join(msg)\n\n\t# Add the output of one of the failures\n\tmsg+=\" - %s said %s\"%(ips[last_failure][0], results[last_failure][0])\n\n\t# And exit with the summary message\n\tif worst == 1:\n\t\tWARN(msg)\n\tif worst == 2:\n\t\tCRIT(msg)\n\tif worst == 3:\n\t\tUNKNOWN(msg)", "def get_user_processes(user):\n result = []\n for process in psutil.process_iter():\n if process.username() == user:\n result.append(process.pid)\n return result", "def get_pid(name):\n try: \n for process in psutil.process_iter():\n try:\n proc = process.as_dict(attrs=['pid', 'name'])\n if name in proc['name']:\n pid = proc['pid']\n logging.info(f\"Found PID {pid} for {name}\")\n return int(pid) \n except (psutil.NoSuchProcess, psutil.AccessDenied , psutil.ZombieProcess) :\n pass \n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)", "def capture_process(command, process_name=None, closer=None, args=None, kwargs=None):\n verify_callable(command)\n verify_type(process_name, str, non_empty=True, allow_none=True)\n verify_callable(closer, allow_none=True)\n\n if args is None:\n args = ()\n if kwargs is None:\n kwargs = {}\n\n wmi = win32com.client.GetObject('winmgmts:')\n\n before = {\n process.Properties_(\"ProcessID\").Value: process\n for process in wmi.InstancesOf('Win32_Process')\n if (process_name is None or process.Properties_(\"Name\").Value == process_name)\n }\n\n result = command(*args, **kwargs)\n try:\n after = {\n process.Properties_(\"ProcessID\").Value: process\n for process in wmi.InstancesOf('Win32_Process')\n if process.Properties_(\"Name\").Value == process_name\n }\n\n new_pids = set(after) - set(before)\n\n return result, only(new_pids)\n except:\n if closer is not None:\n closer(result)\n raise", "def match(self, string):\n matched = False\n cmd = None\n\n if string in self.commands.keys():\n matched = True\n cmd = string\n\n else:\n for command in self.commands.keys():\n if \"regex\" in self.commands[command].keys() \\\n and re.match(self.commands[command][\"regex\"], string):\n matched = True\n cmd = command\n break\n \n if cmd and len(cmd) > 0:\n self._last_matched_command = cmd\n else:\n self._last_matched_command = None\n\n return matched", "def get_pid_of_all_workers(containers):\n res = []\n for i in containers:\n if \"mongo\" not in i.name and (\"slave\" in i.name or \"master\" in i.name):\n print(i.name, file=sys.stdout)\n pid = i.attrs[\"State\"][\"Pid\"]\n res.append(pid)\n return res" ]
[ "0.69588715", "0.63922125", "0.61809975", "0.6156374", "0.6085242", "0.602419", "0.59251225", "0.5850459", "0.58301437", "0.58174837", "0.58165544", "0.5804579", "0.58037686", "0.57153285", "0.56987065", "0.5696199", "0.5649648", "0.5642261", "0.55726624", "0.5563265", "0.55540144", "0.55440676", "0.5525824", "0.5525218", "0.55177826", "0.54925454", "0.53414667", "0.5338843", "0.53336465", "0.5264208", "0.5223302", "0.5193351", "0.5192593", "0.5188155", "0.5183287", "0.5170196", "0.5164418", "0.51156825", "0.51152295", "0.5103125", "0.5024564", "0.5000118", "0.49858323", "0.49701017", "0.49692336", "0.49570483", "0.4946295", "0.4935198", "0.4920492", "0.49178427", "0.4915472", "0.4913051", "0.49028626", "0.4864779", "0.4823408", "0.48183283", "0.47903362", "0.4759465", "0.47593012", "0.47531155", "0.47374514", "0.47342739", "0.47160468", "0.47148573", "0.47118375", "0.47062615", "0.4706245", "0.47036645", "0.46994022", "0.46979275", "0.46935913", "0.4691055", "0.4686414", "0.46834874", "0.46791938", "0.46753603", "0.4668797", "0.4667366", "0.46672156", "0.4656043", "0.4653143", "0.4651014", "0.46461707", "0.4645767", "0.46452993", "0.46435273", "0.46402875", "0.4637118", "0.46358404", "0.463412", "0.46298507", "0.46076456", "0.46005577", "0.4588498", "0.45795703", "0.45773685", "0.45742303", "0.45674187", "0.45666417", "0.45630282" ]
0.75858635
0
For a process, record the metrics in a historical metrics collector Collects the historical result of each metric per process in __metrics_history
def record_metrics(self, pid, metrics): for _metric, _metric_value in metrics.items(): if not self.__metrics_history[pid].get(_metric): self.__metrics_history[pid][_metric] = [] self.__metrics_history[pid][_metric].append(_metric_value) # only keep the last 2 running history for any metric self.__metrics_history[pid][_metric] = self.__metrics_history[pid][_metric][ -2: ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _calculate_aggregated_metrics(self):\n\n # using the historical values, calculate the aggregate\n # there are two kinds of metrics:\n # a) cumulative metrics - only the delta of the last 2 recorded values is used (eg cpu cycles)\n # b) absolute metrics - the last absolute value is used\n\n running_pids_set = set(self.__pids)\n\n for pid, process_metrics in self.__metrics_history.items():\n for _metric, _metric_values in process_metrics.items():\n if not self.__aggregated_metrics.get(_metric):\n self.__aggregated_metrics[_metric] = 0\n if _metric.is_cumulative:\n if pid in running_pids_set:\n if len(_metric_values) > 1:\n # only report the cumulative metrics for more than one sample\n self.__aggregated_metrics[_metric] += (\n _metric_values[-1] - _metric_values[-2]\n )\n else:\n if pid in running_pids_set:\n # absolute metric - accumulate the last reported value\n self.__aggregated_metrics[_metric] += _metric_values[-1]", "def collect(self):\n if not self._btime:\n return\n\n try:\n with open(\"/proc/self/stat\", 'rb') as stat:\n parts = (stat.read().split(b')')[-1].split())\n\n self.process_metrics[\"vmem\"].set(\"\", float(parts[20]))\n self.process_metrics[\"rss\"].set(\"\", float(parts[21]) * _PAGESIZE)\n start_time_secs = float(parts[19]) / self._ticks\n self.process_metrics[\"start_time\"].set(\n \"\", start_time_secs + self._btime\n )\n utime = float(parts[11]) / self._ticks\n stime = float(parts[12]) / self._ticks\n self.process_metrics[\"cpu\"].set(\"\", utime + stime)\n except IOError:\n pass\n\n try:\n with open(\"/proc/self/limits\", 'rb') as limits:\n for line in limits:\n if line.startswith(b'Max open file'):\n self.process_metrics[\"max_fds\"].set(\n \"\", float(line.split()[3])\n )\n break\n\n self.process_metrics[\"open_fds\"].set(\n \"\", float(len(os.listdir(\"/proc/self/fd\")))\n )\n except (IOError, OSError):\n pass\n\n # Update gc metrics if enabled.\n if \"collected\" in self.process_metrics:\n for generation, stat in enumerate(gc.get_stats()):\n generation = {\"generation\": str(generation)}\n self.process_metrics[\"collected\"].set(\n generation, stat[\"collected\"]\n )\n self.process_metrics[\"uncollectable\"].set(\n generation, stat[\"uncollectable\"]\n )\n self.process_metrics[\"collections\"].set(\n generation, stat[\"collections\"]\n )", "def gather_sample(self):\n\n for _pid in self._select_processes():\n if not self.__trackers.get(_pid):\n self.__trackers[_pid] = ProcessTracker(_pid, self._logger, self.__id)\n\n self._reset_absolute_metrics()\n\n for _tracker in self.__trackers.values():\n _metrics = _tracker.collect()\n self.record_metrics(_tracker.pid, _metrics)\n\n self._calculate_aggregated_metrics()\n self._remove_dead_processes()\n\n self.print_metrics()", "def _reset_absolute_metrics(self):\n\n for pid, process_metrics in self.__metrics_history.items():\n for _metric, _metric_values in process_metrics.items():\n if not _metric.is_cumulative:\n self.__aggregated_metrics[_metric] = 0", "def addMonitoring(process):\n import FWCore.ParameterSet.Config as cms\n \n process.SimpleMemoryCheck = cms.Service(\"SimpleMemoryCheck\",\n jobReportOutputOnly = cms.untracked.bool(True)\n )\n process.Timing = cms.Service(\"Timing\",\n summaryOnly = cms.untracked.bool(True)\n )\n \n return process", "def generate_history(self):\n self.reporter.generate()", "def register_process_statistics():\n if resource is None:\n log.warning(\n 'Unable to import resource module, memory diags not available'\n )\n return\n\n rusage_fields = [\n ('Execution time in user mode (seconds)', 'ru_utime'),\n ('Execution time in kernel mode (seconds)', 'ru_stime'),\n ('Maximum Resident Set Size (KB)', 'ru_maxrss'),\n ('Soft page faults', 'ru_minflt'),\n ('Hard page faults', 'ru_majflt'),\n ('Input events', 'ru_inblock'),\n ('Output events', 'ru_oublock'),\n ('Voluntary context switches', 'ru_nvcsw'),\n ('Involuntary context switches', 'ru_nivcsw'),\n ]\n\n def dump(log):\n process = resource.getrusage(resource.RUSAGE_SELF)\n for name, field in rusage_fields:\n data = getattr(process, field, 'None')\n log.info('%s: %s', name, data)\n\n register_diags('Process Statistics', dump)", "def processStats(self):\n return self._processes.itervalues()", "def prepareAccumulatedMetrics(self):\n displayDF = analyzeMetricsDF(self.resultList)\n displayDF.to_csv(\"data/results.csv\")", "def collect(self):\n\n collector = {}\n for gather in self.gathers:\n try:\n stats = gather.run_single_cycle(collector=collector)\n if stats:\n collector.update(stats)\n except Exception as ex:\n self._logger.exception(\n \"Exception while collecting metrics for PID: %s of type: %s. Details: %s\",\n self.pid,\n type(gather),\n repr(ex),\n )\n return collector", "def compute_metrics(self):\n pass", "def accumulateSubgridMassHistory(self,q):\n pass", "def get_historical_route_metrics():\n\n db = db_session.get_db_read_replica()\n with db.scoped_session() as session:\n return _get_historical_route_metrics(session)", "def mymetrics(): \n _update_metric_counters()\n logging.debug(prom_objects_seen.collect())\n return flask.Response(generate_latest(), mimetype='text/plain')", "def metrics(self):\n raise NotImplementedError(\"metrics\")", "def metrics(self, metrics):\n\n self._metrics = metrics", "def test_get_derived_metric_history(self):\n pass", "def _proc_collect(self) -> None:\n while True:\n self.process_num_threads.set(self._process.num_threads())\n self.process_memory_bytes.set(self._process.memory_info().rss)\n self.process_cpu_percent.set(self._process.cpu_percent())\n\n sleep(self.process_scrape_interval)", "def _measure(\n self, process: multiprocessing.Process\n ) -> Tuple[float, List[ResourceStats], bool]:\n started_time = time.time()\n is_killed = False\n proc_info = psutil.Process(process.pid)\n stats = []\n\n with timeit_context() as timeit:\n while process.is_alive():\n if time.time() - started_time > self.timeout:\n is_killed = True\n break\n stats.append(self._get_stats_record(proc_info))\n\n time.sleep(self.period)\n\n if is_killed:\n process.terminate()\n\n process.join()\n time_usage = timeit.time_passed\n\n return time_usage, stats, is_killed", "def add_process(self):\r\n\r\n proc_dict = dict()\r\n total_count = len(self.newest_connections['pid'].unique())\r\n count = 0\r\n for proc in self.newest_connections['pid'].unique():\r\n count += 1\r\n percent = round((count / total_count * 100))\r\n print('{}{}Identifying processes in progress. Accomplished: {}%{}'.format(Colors.GREEN,Colors.BOLD,percent,Colors.END), end='\\r')\r\n output = subprocess.run([\"powershell.exe\", \"-Command\", f'Get-Process -Id {proc} | select-object -Property ProcessName | ft -HideTableHeaders'], capture_output=True, text=True).stdout.strip()\r\n proc_dict[proc] = output\r\n print()\r\n processes = pd.Series(proc_dict)\r\n processes_df = pd.DataFrame(processes.reset_index())\r\n processes_df.columns = ['pid', 'process_name']\r\n if 'process_name' in self.newest_connections:\r\n self.newest_connections = pd.merge(self.newest_connections, processes_df, on=['pid', 'process_name'], how='right')\r\n else:\r\n self.newest_connections = pd.merge(self.newest_connections, processes_df, on='pid', how='right')\r\n return self.newest_connections", "def __init__(self):\n super().__init__()\n self.printTag = 'POSTPROCESSOR Metrics'\n self.dynamic = False # is it time-dependent?\n self.features = None # list of feature variables\n self.targets = None # list of target variables\n self.metricsDict = {} # dictionary of metrics that are going to be assembled\n self.multiOutput = 'mean'# defines aggregating of multiple outputs for HistorySet\n # currently allow mean, max, min, raw_values\n self.weight = None # 'mean' is provided for self.multiOutput, weights can be used\n # for each individual output when all outputs are averaged\n self.pivotParameter = None\n self.pivotValues = []\n # assembler objects to be requested\n self.addAssemblerObject('Metric', InputData.Quantity.one_to_infinity)", "def get_metric_history(self, metric):\n return self.client._perform_json(\n \"GET\", \"/projects/%s/managedfolders/%s/metrics/history\" % (self.project_key, self.odb_id),\n params={'metricLookup' : metric if isinstance(metric, str) or isinstance(metric, unicode) else json.dumps(metric)})", "def save_to_history(self):\n for stat_type in self.log_book.keys():\n stat = self.get_stat(stat_type)\n self.history[stat_type].append(stat)\n self.init_stat()", "def updateProcess(self, machine, process):\n\n stamp = time.time() - self.initTime\n if machine in self.activity.keys():\n if ((\"processes\" in self.activity[machine].keys()) and \n (process in self.activity[machine][\"processes\"].keys())):\n self.activity[machine][\"processes\"][process].append(stamp)\n else:\n self.activity[machine][\"processes\"] = {process : [stamp]}\n else:\n self.activity[machine] = {\"filtered activity\" : [],\n \"raw activity\" : [],\n \"time\" : [],\n \"processes\" : {process : [stamp]}}", "def _get_stats_record(proc_info: psutil.Process) -> ResourceStats:\n return ResourceStats(\n time.time(),\n proc_info.cpu_percent(),\n memory_profiler.memory_usage(proc_info.pid, max_usage=True),\n )", "def _compute_running_metrics(self,\n model_output: torch.Tensor,\n batch: Tuple[torch.Tensor, torch.Tensor],\n running_metrics: dict) -> None:\n for metric in self.metrics:\n if metric.__name__ == 'word_error_rate' or metric.__name__ == 'character_error_rate':\n metric_result = metric(model_output, batch, self.decoder)\n else:\n metric_result = metric(model_output, batch)\n if type(metric_result) == torch.Tensor:\n metric_result = metric_result.item()\n\n running_metrics[metric.__name__].append(metric_result)", "def log_metrics(self, metrics: dict):\n self.metrics.update(metrics)\n\n self._sync_log_event()", "def agg_history(self):\n cd_list, cr_list = zip(*self._history)\n return pd.concat(cd_list), pd.concat(cr_list)", "def record_apdex_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for metric in metrics:\n self.record_apdex_metric(metric)", "def appendProcessingHistoryItem(context, item):\n projectDir = context.projectDir\n history = GenericMetadata._readEntriesForSection(projectDir, GenericMetadata.HISTORY_SECTION)\n try:\n idx = int(history['numsteps'])\n except KeyError:\n idx = 0\n idx += 1\n \n idxStr = str(idx)\n key = GenericMetadata.HISTORY_PROTO + idxStr\n GenericMetadata._writeEntriesToSection(projectDir, GenericMetadata.HISTORY_SECTION, [key, 'numsteps'], [item, idxStr])", "def metrics(self) -> list:\n my_metrics = [\n FramesMetric(\"frames\"),\n FPSMetric(\"fps\"),\n EpisodeRewardMetric('PMM:episode_rewards'),\n EpisodeRewardMetricQuantile('P09:episode_rewards', quantile=0.9),\n EpisodeRewardMetricQuantile('P01:episode_rewards', quantile=0.1),\n EpisodeLengthMetric(\"episode_length\")\n ]\n\n return my_metrics + self.algo.metrics() + self.env_roller.metrics()", "def metrics(_):\r\n collector = BuildsCollector()\r\n build_metrics, headers = collector.get_metrics_table()\r\n print(tabulate(build_metrics, headers=headers))", "def GenerateHistoricalMetrics(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def setPerfMetrics(self, perf_metrics):\n for event in perf_metrics.metric:\n attr_name = '%s_%s_%s' % (frontendConfig.glidein_perfmetric_prefix,\n perf_metrics.name, event)\n self.adParams[attr_name] = perf_metrics.event_lifetime(event)", "def collect(self): # pylint: disable=no-self-use\n start = time.time()\n for metric in metric_rq():\n yield metric\n\n gauge = GaugeMetricFamily(\n \"nautobot_rq_metrics_processing_ms\", \"Time in ms to generate the app metrics endpoint\"\n )\n duration = time.time() - start\n gauge.add_metric([], format(duration * 1000, \".5f\"))\n yield gauge", "def getProcessingHistoryList(context):\n projectDir = context.projectDir\n steps = []\n history = GenericMetadata._readEntriesForSection(projectDir, GenericMetadata.HISTORY_SECTION)\n try:\n idx = int(history['numsteps']) + 1\n for i in xrange(1, idx):\n key = GenericMetadata.HISTORY_PROTO + str(i)\n steps.append(history[key])\n except KeyError:\n pass\n \n return steps", "def report(config, path, metrics, n, include_message=False):\n logger.debug(\"Running report command\")\n logger.info(f\"-----------History for {metrics}------------\")\n\n data = []\n metric_metas = []\n\n for metric in metrics:\n operator, key = metric.split(\".\")\n metric = resolve_metric(metric)\n # Set the delta colors depending on the metric type\n if metric.measure == MetricType.AimHigh:\n good_color = 32\n bad_color = 31\n elif metric.measure == MetricType.AimLow:\n good_color = 31\n bad_color = 32\n elif metric.measure == MetricType.Informational:\n good_color = 33\n bad_color = 33\n metric_meta = {\n \"key\": key,\n \"operator\": operator,\n \"good_color\": good_color,\n \"bad_color\": bad_color,\n \"title\": metric.description,\n \"type\": metric.type,\n }\n metric_metas.append(metric_meta)\n\n state = State(config)\n for archiver in state.archivers:\n # We have to do it backwards to get the deltas between releases\n history = state.index[archiver].revisions[:n][::-1]\n last = {}\n for rev in history:\n vals = []\n for meta in metric_metas:\n try:\n logger.debug(\n f\"Fetching metric {meta['key']} for {meta['operator']} in {path}\"\n )\n val = rev.get(config, archiver, meta[\"operator\"], path, meta[\"key\"])\n\n last_val = last.get(meta[\"key\"], None)\n # Measure the difference between this value and the last\n if meta[\"type\"] in (int, float):\n if last_val:\n delta = val - last_val\n else:\n delta = 0\n last[meta[\"key\"]] = val\n else:\n # TODO : Measure ranking increases/decreases for str types?\n delta = 0\n\n if delta == 0:\n delta_col = delta\n elif delta < 0:\n delta_col = f\"\\u001b[{meta['good_color']}m{delta:n}\\u001b[0m\"\n else:\n delta_col = f\"\\u001b[{meta['bad_color']}m+{delta:n}\\u001b[0m\"\n\n if meta[\"type\"] in (int, float):\n k = f\"{val:n} ({delta_col})\"\n else:\n k = f\"{val}\"\n except KeyError as e:\n k = f\"Not found {e}\"\n vals.append(k)\n if include_message:\n data.append(\n (\n format_revision(rev.revision.key),\n rev.revision.message[:MAX_MESSAGE_WIDTH],\n rev.revision.author_name,\n format_date(rev.revision.date),\n *vals,\n )\n )\n else:\n data.append(\n (\n format_revision(rev.revision.key),\n rev.revision.author_name,\n format_date(rev.revision.date),\n *vals,\n )\n )\n descriptions = [meta[\"title\"] for meta in metric_metas]\n if include_message:\n headers = (\"Revision\", \"Message\", \"Author\", \"Date\", *descriptions)\n else:\n headers = (\"Revision\", \"Author\", \"Date\", *descriptions)\n print(\n # But it still makes more sense to show the newest at the top, so reverse again\n tabulate.tabulate(\n headers=headers, tabular_data=data[::-1], tablefmt=DEFAULT_GRID_STYLE\n )\n )", "def calculate_times(log):\n log['processing_time'] = 0\n log['multitasking'] = 0\n log = log.to_dict('records')\n log = sorted(log, key=lambda x: (x['source'], x['caseid']))\n for _, group in itertools.groupby(log, key=lambda x: (x['source'], x['caseid'])):\n events = list(group)\n events = sorted(events, key=itemgetter('start_timestamp'))\n for i in range(0, len(events)):\n # In one-timestamp approach the first activity of the trace\n # is taken as instantsince there is no previous timestamp\n # to find a range\n dur = (events[i]['end_timestamp'] -\n events[i]['start_timestamp']).total_seconds()\n if i == 0:\n wit = 0\n else:\n wit = (events[i]['start_timestamp'] -\n events[i-1]['end_timestamp']).total_seconds()\n events[i]['waiting_time'] = wit if wit >= 0 else 0\n events[i]['processing_time'] = dur\n return pd.DataFrame.from_dict(log)", "def audit_remediation_history(self, query=None):\n return self.select(RunHistory).where(query)", "def metrics(self):\r\n if not hasattr(self, '_observable_metrics'):\r\n self._observable_metrics = Metrics()\r\n return self._observable_metrics", "def output(history_lines, results): # pylint: disable=too-many-locals,too-many-branches\n real_merges = {\n 1: Sampler(),\n 14: Sampler(14*60*24),\n }\n active_merges = {\n 1: Sampler(),\n 14: Sampler(14*60*24),\n }\n happy_moments = {d: Sampler(d*60*24) for d in results.happiness}\n\n tick = None\n last_merge = 0 # Number of merges last sample, resets on queue restart\n start_blocked = None\n start_offline = None\n\n for line in history_lines:\n try:\n tick, online, pulls, queue, dummy, blocked, merged = parse_line(\n *line.strip().split(' '))\n except TypeError: # line does not fit expected criteria\n continue\n if tick < datetime.datetime.now() - datetime.timedelta(days=DAYS+14):\n continue\n if not pulls and not queue and not merged: # Bad sample\n continue\n\n if merged >= last_merge:\n did_merge = merged - last_merge\n elif online: # Restarts reset the number to 0\n did_merge = merged\n else:\n did_merge = 0\n\n last_merge = merged\n for moments in happy_moments.values():\n moments.append(int(bool(online and not blocked)))\n\n for val in real_merges.values():\n val += did_merge\n if queue or did_merge:\n for val in active_merges.values():\n val += did_merge\n\n if not start_offline and not online:\n start_offline = tick\n if start_offline and online:\n results.offline_intervals.append((start_offline, tick))\n start_offline = None\n\n if not online: # Skip offline entries\n continue\n\n results.append(\n tick, did_merge, pulls, queue, real_merges, active_merges, happy_moments)\n\n if not start_blocked and blocked:\n start_blocked = tick\n if start_blocked and not blocked:\n results.blocked_intervals.append((start_blocked, tick))\n start_blocked = None\n if tick and not online:\n tick = datetime.datetime.utcnow()\n results.append(\n tick, 0, pulls, queue, real_merges, active_merges, happy_moments)\n if start_blocked:\n results.blocked_intervals.append((start_blocked, tick))\n if start_offline:\n results.offline_intervals.append((start_offline, tick))\n return results", "def process():\n interesting_procs = set(INTERESTING_PROCESSES)\n\n pids = psutil.pids()\n info = {\n \"stats_type\": \"process\",\n \"proc\": {\n \"count\": len(pids),\n }\n }\n proc_root = os.environ.get(\"PROC_ROOT\", \"/proc\")\n for pid in pids:\n proc_info = proc.core.Process.from_path(\n os.path.join(proc_root, str(pid)))\n\n proc_name = get_proc_name(proc_info, interesting_procs)\n if not proc_name:\n continue\n\n if 'sshd' in proc_name and ':' in proc_info.cmdline:\n continue\n\n if proc_name not in info['proc']:\n info['proc'][proc_name] = {\n 'running': proc_info.state in ('R', 'S', 'D', 'T', 'W'),\n 'pid': proc_info.pid,\n 'ppid': proc_info.ppid,\n 'user_time': int(proc_info.stat_fields[16]), # cutime\n 'sys_time': int(proc_info.stat_fields[17]), # cstime\n 'vsize': proc_info.vsize,\n 'rss': proc_info.rss,\n 'voluntary_ctxt_switches': int(proc_info.status_fields[\n 'voluntary_ctxt_switches']),\n 'nonvoluntary_ctxt_switches': int(proc_info.status_fields[\n 'nonvoluntary_ctxt_switches']),\n 'age': proc_info.runtime,\n 'count': 1\n }\n else:\n pinfo = info['proc'][proc_name]\n pinfo['count'] += 1\n\n def append(dest, field, value):\n \"\"\"Append values for an existing process.\"\"\"\n if isinstance(dest[field], list):\n dest[field].append(value)\n else:\n dest[field] = [dest[field], value]\n\n # append('state', proc_info.state)\n append(pinfo, 'pid', proc_info.pid)\n append(pinfo, 'ppid', proc_info.ppid)\n pinfo['user_time'] += int(proc_info.stat_fields[16]) # cutime\n pinfo['sys_time'] += int(proc_info.stat_fields[17]) # cstime\n pinfo['vsize'] += proc_info.vsize\n pinfo['rss'] += proc_info.rss\n pinfo['voluntary_ctxt_switches'] = \\\n int(proc_info.status_fields['voluntary_ctxt_switches'])\n pinfo['nonvoluntary_ctxt_switches'] = \\\n int(proc_info.status_fields['nonvoluntary_ctxt_switches'])\n append(pinfo, 'age', proc_info.runtime)\n\n return info", "def _problem_update_history(self, _):\n self._update_reward_values()\n self.history.curr_reward.append(self.curr_reward)\n self.history.curr_best_reward.append(self.curr_best_reward)", "def test_instant_process_statistics(self):\n import os\n from supvisors.statistics import instant_process_statistics\n stats = instant_process_statistics(os.getpid())\n # test that a pair is returned with values in [0;100]\n self.assertEqual(2, len(stats))\n # test cpu value\n self.assertIs(float, type(stats[0]))\n self.assertGreaterEqual(stats[0], 0)\n self.assertLessEqual(stats[0], 100)\n # test mem value\n self.assertIs(float, type(stats[1]))\n self.assertGreaterEqual(stats[1], 0)\n self.assertLessEqual(stats[1], 100)", "def metrics_group():", "def print_metrics(result):\n logging.log(LOG_LEVEL_OUTPUT_INFO,\n '------------------------------------------------')\n logging.log(LOG_LEVEL_OUTPUT_INFO, ' KEY METRICS: ')\n logging.log(LOG_LEVEL_OUTPUT_INFO,\n '------------------------------------------------')\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* pages_count: %d',\n get_counter_metric(result, 'pages_count'))\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* revisions_count: %d',\n get_counter_metric(result, 'revisions_count'))\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* very_long_page_histories_count: %d',\n get_counter_metric(result, 'very_long_page_histories_count'))\n revisions_per_page_distr = get_distributions_metric(\n result, 'revisions_per_page_distr')\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* revisions_per_page_distr.mean: %d',\n revisions_per_page_distr.mean)\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* revisions_per_page_distr.sum: %d',\n revisions_per_page_distr.sum)\n cumulative_page_rev_size_distr = get_distributions_metric(\n result, 'cumulative_page_rev_size_distr')\n logging.log(LOG_LEVEL_OUTPUT_INFO,\n '* cumulative_page_rev_size_distr.mean: %d',\n cumulative_page_rev_size_distr.mean)\n logging.log(LOG_LEVEL_OUTPUT_INFO, '* cumulative_page_rev_size_distr.sum: %d',\n cumulative_page_rev_size_distr.sum)", "def list_metrics(self):\n pass", "def _storePerfStats(self, results):\n self.state = ZenProcessTask.STATE_STORE_PERF\n byConf = reverseDict(self._deviceStats._pidToProcess)\n for procStat, pids in byConf.iteritems():\n if len(pids) != 1:\n log.debug(\"There are %d pids by the name %s - %s\",\n len(pids), procStat._config.name, procStat._config.originalName)\n procName = procStat._config.name\n for pid in pids:\n if not AS400PLUG in self._device.zCollectorPlugins:\n cpu = results.get(CPU + str(pid), None)\n else:\n cpu = results.get(AS400CPU + str(pid), None) / 10 ## as we get millis vs centis\n mem = results.get(MEM + str(pid), None)\n procStat.updateCpu(pid, cpu)\n procStat.updateMemory(pid, mem)\n self._save(procName, 'cpu_cpu', procStat.getCpu(),\n 'DERIVE', min=0)\n self._save(procName, 'mem_mem',\n procStat.getMemory() * 1024, 'GAUGE')\n return results", "def history(self, chrom):\n return self._hist[chrom]", "def metrics(self):\n self.metrics = []\n \n self.clients()\n\n if len(self.metrics) > 0:\n return self.metrics\n else:\n return []", "def create_system_metrics(system):\n pass", "def calculate_batch_metrics(self):\n pass", "def monitor(self):\n for idx, process in enumerate(self.__process_list):\n process.id_number = idx + 1\n while len(self.__process_list) > 0:\n for process in list(self.__process_list):\n if not process.has_output():\n _return_code = process.return_code\n self.__process_list.remove(process)\n if _return_code == 0:\n logger.info(\"Finished process #{}: there are now {}/{} running\".format(process.id_number, len(self.__process_list), self.__n_initial))\n else:\n logger.warning(\"Process #{} terminated unexpectedly (return code {}): there are now {}/{} running\".format(process.id_number, _return_code, len(self.__process_list), self.__n_initial))", "def getProcessInfo():\n \n blacklist = [\"_Total\",\"Idle\"] #processes we don't care about\n \n #execute wmic command and capture output\n temp = subprocess.check_output([\"wmic\", \"path\", \"Win32_PerfRawData_PerfProc_Process\", \"get\", \n \"Name,PercentProcessorTime\"]) \n \n #iterate over processes and split into lists\n firstline = True\n result = [] #list of lists to contain the final result\n \n for line in temp.splitlines():\n if(firstline):\n firstline = False\n continue\n elif not line: #skip empty lines\n continue\n \n proclist = line.split() #split on whitespace to return a 2 element list\n \n if (proclist[0] not in blacklist ):\n result.append([proclist[0], int(proclist[1])/(10**7)]) #convert times to ints, percent processor time is in 100 nanosecond intervals\n \n \n #sort list on processor time, highest first\n result.sort(key=lambda x: x[1])\n result.reverse()\n \n # narrow process list down\n times = [x[1] for x in result]\n\n nonzero = [x for x in times if x]\n \n ind = min(int(math.ceil(len(times)/5)),len(nonzero)) #reduce processes to top 20% (atleast 1) or to all with nonzero cpu time\n cutoff = max(times[ind],1)\n \n return [x for x in result if x[1] >= cutoff]", "def performance_stats(self):\n current_status = psutil.STATUS_DEAD\n try:\n current_status = self.process.status()\n except psutil.NoSuchProcess:\n pass\n\n self.process_manager.handle_status_change(self.process_index, round(self.ioloop.time(), 2), current_status)\n\n if current_status != psutil.STATUS_DEAD:\n self.ioloop.call_later(0.5, self.performance_stats)", "def calculate_epoch_metrics(self, val_metrics=False):\n metric_names = self.tracked_metrics()\n\n for metric in metric_names:\n if val_metrics:\n mean_val = np.array(self.metrics_history[f\"val_{metric}\"][\"batch_vals\"]).mean()\n self.metrics_history[f\"val_{metric}\"][\"epoch_vals\"].append(mean_val)\n else:\n mean_val = np.array(self.metrics_history[metric][\"batch_vals\"]).mean()\n self.metrics_history[metric][\"epoch_vals\"].append(mean_val)", "def compute_metrics(self):\n self.finalize_output_dict()\n self.metric_dict = {\n key: value(self.output_dict[\"labels\"], self.output_dict[\"pred_probs\"])\n for key, value in self.metric_fns.items()\n }", "def setup_process_stats(pid):\n return psutil.Process(pid)", "def collect_log_output(activity_log, result):\n\n test_name = activity_log.get('identifier')\n if test_name:\n result.append(test_name['_value'])\n\n duration = activity_log.get('duration')\n if duration:\n output = str(\"{:.2f}\".format(float(duration['_value'])))\n result.append(output)\n\n performance_metrics = activity_log.get('performanceMetrics')\n if not performance_metrics is None:\n metrics = performance_metrics.get('_values')\n for metric in metrics:\n measurement = metric.get('measurements')\n values = measurement.get('_values')\n value_sum = 0\n for value in values:\n value_sum += float(value.get('_value'))\n output = str(value_sum / len(values))\n result.append(output)", "def __init__(self,\n name: str = 'Process',\n time_horizon: int = 432000,\n lambda_: float = 0.1,\n beta: float = 0.2,\n epsilon: float = 0.2,\n mu: int = 4,\n stream_speed: int = 100,\n n_features: int = 2,\n gen_plot: bool = False,\n gen_metrics: bool = True):\n self.gen_plot = gen_plot\n self.gen_metrics = gen_metrics\n self.event_index = 0\n self.total_cases = set()\n self.check_point = datetime(2010, 1, 1)\n self.cases = []\n self.name = name\n self.time_horizon = time_horizon\n self.initialized = False\n self.cp_count = 0\n self.nyquist = 0\n self.check_point_cases = 0\n self.process_model_graph = nx.DiGraph()\n self.denstream = DenStream(lambda_, beta, epsilon, mu, stream_speed, n_features)\n self.cluster_metrics = []\n self.case_metrics = []\n self.active_core_clusters = set()\n self.drift_indexes = []\n self.metrics = Metrics(self.name)\n self.feature_space_plot_path = f'visualization/{self.name}_feature_space'\n makedirs(self.feature_space_plot_path, exist_ok=True)", "def _monitor_metrics(self):\n metrics = [\"loss\"]\n try:\n m = U.metrics_from_model(self.model)\n if isinstance(m, list):\n metrics.extend(m)\n except:\n pass\n if self.val_data is not None:\n for m in metrics[:]:\n metrics.append(\"val_%s\" % (m))\n return metrics", "def add_metrics(self,\n metrics_: Optional[Dict[str, Any]] = None,\n add_to_child_: bool = True,\n **kwargs: Any) -> None:\n if self._child_stack and add_to_child_:\n self._child_stack[-1].add_metrics(metrics_, **kwargs)\n else:\n def collect(target: Dict[str, Any]):\n if metrics_:\n for key, val in metrics_.items():\n key = stage_type.add_metric_prefix(key)\n target[key] = to_number_or_numpy(val)\n if kwargs:\n for key, val in kwargs.items():\n key = stage_type.add_metric_prefix(key)\n target[key] = to_number_or_numpy(val)\n\n stage_type = self._stage.type\n if self._stage.batch.is_active:\n collect(self._batch_metrics)\n elif self._stage.epoch is not None and self._stage.epoch.is_active:\n collect(self._epoch_metrics)\n else:\n collect(self._stage_metrics)\n self._stage.push_metrics(self._stage_metrics)", "def sweep_metrics(db, redis):\n with db.scoped_session() as session:\n for key_byte in redis.scan_iter(f\"{metrics_prefix}:*\"):\n key = key_byte.decode(\"utf-8\")\n try:\n parsed_key = parse_metrics_key(key)\n\n if parsed_key is None:\n raise KeyError(f\"index_metrics.py | Unable to parse key {key} | Skipping process key\")\n source, ip, key_date = parsed_key\n\n current_date_time = get_rounded_date_time()\n\n if key_date < current_date_time:\n if source == metrics_routes:\n process_route_keys(session, redis, key, ip, key_date)\n elif source == metrics_application:\n process_app_name_keys(session, redis, key, ip, key_date)\n except KeyError as e:\n logger.warning(e)\n redis.delete(key)\n except Exception as e:\n logger.error(e)\n redis.delete(key)", "def _process(self, activity):", "def plot_metrics(history):\n\n pyplot.plot(history.history['loss'], label='loss')\n\n pyplot.plot(history.history['val_loss'], label='val_loss')\n\n pyplot.legend()\n\n pyplot.show()", "def collect_metrics(application):\n\n try:\n subprocess.check_call(['juju', 'collect-metrics', application])\n except subprocess.CalledProcessError as e:\n raise Exception(\"Unable to collect metrics: {}\".format(e))", "def report_metrics(prefix, metrics):\n series = []\n\n now = time.time()\n for key, value in metrics.iteritems():\n metric = '{prefix}.{key}'.format(prefix=prefix, key=key)\n point = [(now, value)]\n series.append({'metric':metric, 'points':point})\n\n if len(series) > 0:\n print u\"Sending {}\".format(series)\n dog_http_api.metrics(series)", "def _start_proc_collector(self) -> None:\n thread = threading.Thread(target=self._proc_collect, name=\"ProcessMetricsCollector\", daemon=True)\n thread.start()", "def info(self, handle):\n\n # Each process group gathers their output\n\n groupstr = \"\"\n procstr = \"\"\n\n gcomm = self._comm.comm_group\n wcomm = self._comm.comm_world\n rcomm = self._comm.comm_rank\n\n if wcomm.rank == 0:\n handle.write(\"Data distributed over {} processes in {} groups\\n\".format(self._comm.world_size, self._comm.ngroups))\n\n for ob in self.obs:\n id = ob['id']\n tod = ob['tod']\n base = ob['baselines']\n nse = ob['noise']\n intrvl = ob['intervals']\n\n if gcomm.rank == 0:\n groupstr = \"observation {}:\\n\".format(id)\n groupstr = \"{} {} total samples, {} detectors\\n\".format(groupstr, tod.total_samples, len(tod.detectors))\n if intrvl is not None:\n groupstr = \"{} {} intervals:\\n\".format(groupstr, len(intrvl))\n for it in intrvl:\n groupstr = \"{} {} --> {} ({} --> {})\\n\".format(groupstr, it.first, it.last, it.start, it.stop)\n\n # rank zero of the group will print general information,\n # and each process will get its statistics.\n\n nsamp = tod.local_samples[1]\n dets = tod.local_dets\n\n procstr = \" proc {}\\n\".format(gcomm.rank)\n my_chunks = 1\n if tod.local_chunks is not None:\n my_chunks = tod.local_chunks[1]\n procstr = \"{} sample range {} --> {} in {} chunks:\\n\".format(procstr, tod.local_samples[0], (tod.local_samples[0] + nsamp - 1), my_chunks)\n \n if tod.local_chunks is not None:\n chkoff = tod.local_samples[0]\n for chk in range(tod.local_chunks[1]):\n abschk = tod.local_chunks[0] + chk\n chkstart = chkoff\n chkstop = chkstart + tod.total_chunks[abschk] - 1\n procstr = \"{} {} --> {}\\n\".format(procstr, chkstart, chkstop)\n chkoff += tod.total_chunks[abschk]\n\n if nsamp > 0:\n \n stamps = tod.read_times(local_start=0, n=nsamp)\n\n procstr = \"{} timestamps {} --> {}\\n\".format(procstr, stamps[0], stamps[-1])\n\n for dt in dets:\n procstr = \"{} det {}:\\n\".format(procstr, dt)\n\n pdata = tod.read_pntg(detector=dt, local_start=0, n=nsamp)\n\n procstr = \"{} pntg [{:.3e} {:.3e} {:.3e} {:.3e}] --> [{:.3e} {:.3e} {:.3e} {:.3e}]\\n\".format(procstr, pdata[0,0], pdata[0,1], pdata[0,2], pdata[0,3], pdata[-1,0], pdata[-1,1], pdata[-1,2], pdata[-1,3])\n\n data = tod.read(detector=dt, local_start=0, n=nsamp)\n flags, common = tod.read_flags(detector=dt, local_start=0, n=nsamp)\n procstr = \"{} {:.3e} ({}) --> {:.3e} ({})\\n\".format(procstr, data[0], flags[0], data[-1], flags[-1])\n good = np.where((flags | common) == 0)[0]\n procstr = \"{} {} good samples\\n\".format(procstr, len(good))\n min = np.min(data[good])\n max = np.max(data[good])\n mean = np.mean(data[good])\n rms = np.std(data[good])\n procstr = \"{} min = {:.4e}, max = {:.4e}, mean = {:.4e}, rms = {:.4e}\\n\".format(procstr, min, max, mean, rms)\n\n for cname in tod.cache.keys():\n procstr = \"{} cache {}:\\n\".format(procstr, cname)\n ref = tod.cache.reference(cname)\n min = np.min(ref)\n max = np.max(ref)\n mean = np.mean(ref)\n rms = np.std(ref)\n procstr = \"{} min = {:.4e}, max = {:.4e}, mean = {:.4e}, rms = {:.4e}\\n\".format(procstr, min, max, mean, rms)\n\n recvstr = \"\"\n if gcomm.rank == 0:\n groupstr = \"{}{}\".format(groupstr, procstr)\n for p in range(1, gcomm.size):\n if gcomm.rank == 0:\n recvstr = gcomm.recv(source=p, tag=p)\n groupstr = \"{}{}\".format(groupstr, recvstr)\n elif p == gcomm.rank:\n gcomm.send(procstr, dest=0, tag=p)\n gcomm.barrier()\n\n # the world rank 0 process collects output from all groups and\n # writes to the handle\n\n recvgrp = \"\"\n if wcomm.rank == 0:\n handle.write(groupstr)\n for g in range(1, self._comm.ngroups):\n if wcomm.rank == 0:\n recvgrp = rcomm.recv(source=g, tag=g)\n handle.write(recvgrp)\n elif g == self._comm.group:\n if gcomm.rank == 0:\n rcomm.send(groupstr, dest=0, tag=g)\n wcomm.barrier()\n\n return", "def prometheus_metrics(request):\n if not settings.DEBUG:\n return HttpResponseNotFound()\n\n # DEPRECATED: prometheus_multiproc_dir has been replaced by PROMETHEUS_MULTIPROC_DIR\n if \"PROMETHEUS_MULTIPROC_DIR\" in os.environ or \"prometheus_multiproc_dir\" in os.environ:\n registry = prometheus_client.CollectorRegistry()\n multiprocess.MultiProcessCollector(registry)\n else:\n registry = prometheus_client.REGISTRY\n metrics_page = prometheus_client.generate_latest(registry)\n return HttpResponse(\n metrics_page, content_type=prometheus_client.CONTENT_TYPE_LATEST\n )", "def run(self):\r\n history = self.extracter.load_user_history()\r\n self.plot_history(history)\r\n \r\n pp_history = self.analyser.preprocess_history(history)\r\n part_worths, attribute_importance, relative_importance = self.analyser.conjoint_analysis(pp_history)\r\n self.plot_analysis(part_worths, relative_importance)\r\n \r\n return history, pp_history, part_worths, relative_importance", "def compute_metrics(self, results: list) -> dict:", "def metrics(env):\n envs = environments()\n check_env(env, envs)\n\n metrics = get_or_abort(puppetdb._query, 'mbean')\n return render_template('metrics.html',\n metrics=sorted(metrics.keys()),\n envs=envs,\n current_env=env)", "def record_time_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for metric in metrics:\n self.record_time_metric(metric)", "def report_meta_metrics(stat_path):\n collectd_stats = get_self_stats(stat_path)\n backend_stats = read_vsys_data('backend_stats', _VSYS_FRONTEND_VERSION)\n submit_meta('collectd', collectd_stats)\n submit_meta('backend', backend_stats)", "def metrics(self):\n return self._metrics", "def metrics(self):\n return self._metrics", "def activity_process(self, activity_process):\n\n self._activity_process = activity_process", "def __add_to_hist(self):\n pieces, _ = self.get_pieces()\n self.hist.append([pieces, self.current_dice, self.current_player, self.round])", "def metrics_proc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_proc(cmd_ctx, cpc, options))", "def appendProcess(self, pid: int, numberOfVariables: int, processTable, diagnostics) -> int:\n self.memory[pid] = []\n\n for _i in range(numberOfVariables):\n self.memory[pid].append(MemoryItem())\n\n return 0", "def __get_metrics_list(self):\n metrics = metrics_calculator.MetricsCalculator(self.processor)\n metric_list = []\n # Populate the list\n for key in metrics.get_raw_metrics().keys():\n name = metrics.get_raw_metrics()[key][\"NAME\"]\n formula = metrics.get_raw_metrics()[key][\"FORMULA\"]\n description = metrics.get_raw_metrics()[key][\"DESCRIPTION\"]\n metric = Metric(name, formula, description)\n metric_list.append(metric)\n return metric_list", "def metrics(self):\n return self.__metrics", "def generate_latest(registry=Registry):\n\n def sample_line(line, metric_type):\n if line.labels:\n labelstr = '{{{0}}}'.format(','.join(\n ['{0}=\"{1}\"'.format(\n k, v.replace('\\\\', r'\\\\').replace('\\n', r'\\n').replace('\"', r'\\\"'))\n for k, v in sorted(line.labels.items())]))\n else:\n labelstr = ''\n timestamp = ''\n if line.timestamp is not None:\n # Convert to milliseconds.\n timestamp = ' {0:d}'.format(int(float(line.timestamp) * 1000))\n name = line.name\n if metric_type == 'counter' and name.endswith('_total'):\n name = name[:-6]\n return '{0}{1} {2}{3}\\n'.format(\n name, labelstr, int(line.value), timestamp)\n\n output = []\n for metric in registry.collect():\n try:\n mname = metric.name\n mtype = metric.type\n # Munging from OpenMetrics into Prometheus format.\n if mtype == 'counter':\n mname = mname\n elif mtype == 'info':\n mname = mname + '_info'\n mtype = 'gauge'\n elif mtype == 'stateset':\n mtype = 'gauge'\n elif mtype == 'gaugehistogram':\n # A gauge histogram is really a gauge,\n # but this captures the structure better.\n mtype = 'histogram'\n elif mtype == 'unknown':\n mtype = 'untyped'\n help_str = '# HELP {0} {1}\\n'.format(mname, metric.documentation.replace('\\\\', r'\\\\').replace('\\n', r'\\n'))\n if 'Multiprocess' not in help_str:\n continue\n output.append('# HELP {0} {1}\\n'.format(\n mname, metric.documentation.replace('\\\\', r'\\\\').replace('\\n', r'\\n')))\n output.append('# TYPE {0} {1}\\n'.format(mname, mtype))\n\n for s in metric.samples:\n for suffix in ['_created', '_gsum', '_gcount']:\n if s.name == metric.name + suffix:\n break\n else:\n line = sample_line(s, mtype)\n if not line:\n continue\n output.append(line)\n except Exception as exception:\n exception.args = (exception.args or ('',)) + (metric,)\n raise\n\n return ''.join(output).encode('utf-8')", "def on_episode_end(self, episode, logs):\n duration = timeit.default_timer() - self.episode_start[episode]\n episode_steps = len(self.observations[episode])\n\n metrics = np.array(self.metrics[episode])\n metrics_dict = {}\n with warnings.catch_warnings():\n warnings.filterwarnings('error')\n for idx, name in enumerate(self.metrics_names):\n try:\n metrics_dict[name] = np.nanmean(metrics[:, idx])\n except Warning:\n metrics_dict[name] = float('nan')\n\n wandb.log(\n {\n 'step': self.step,\n 'episode': episode + 1,\n # 'duration': duration,\n 'episode_steps': episode_steps,\n # 'sps': float(episode_steps) / duration,\n 'episode_reward_sum': np.sum(self.rewards[episode]),\n 'reward_mean': np.mean(self.rewards[episode]),\n 'reward_min': np.min(self.rewards[episode]),\n 'reward_max': np.max(self.rewards[episode]),\n 'action_mean': np.mean(self.actions[episode]),\n # 'action_min': np.min(self.actions[episode]),\n # 'action_max': np.max(self.actions[episode]),\n # 'obs_mean': np.mean(self.observations[episode]),\n # 'obs_min': np.min(self.observations[episode]),\n # 'obs_max': np.max(self.observations[episode]),\n 'steps': logs['steps'],\n 'target_reached': logs['target_reached'],\n 'target_reached_in_steps': logs['target_reached_in_steps'],\n 'episode_reward': logs['episode_reward'],\n **metrics_dict\n }\n )\n\n # Free up resources.\n del self.episode_start[episode]\n del self.observations[episode]\n del self.rewards[episode]\n del self.actions[episode]\n del self.metrics[episode]", "def monitor(self):\n procdata = self.collect_userprocs_info()\n now = int(time.time())\n #-------------------\n proclist = []\n for name in procdata:\n mem = procdata[name]['rss']\n pcode = self.DB.get_code(name)\n proclist.append((now, pcode, mem))\n self.DB.add_proc_info(proclist)\n #-------------------\n totmem = psutil.virtual_memory()\n self.DB.add_total_mem_info(now, totmem.used, totmem.available, totmem.free)\n #-------------------\n disk = psutil.disk_usage('/')\n dinfo = {\n \"utime\" : now,\n \"total\" : disk.total,\n \"used\" : disk.used,\n \"free\" : disk.free,\n \"percent\" : disk.percent\n }\n self.DB.add_diskuse_info(dinfo)\n #-------------------\n cpu = json.dumps(psutil.cpu_percent(None, True))\n self.DB.add_total_cpu(now, cpu)\n #-------------------\n net = psutil.net_io_counters()\n ninfo = {\n \"utime\" : now,\n \"brecv\" : net.bytes_recv,\n \"bsent\" : net.bytes_sent,\n \"precv\" : net.packets_recv,\n \"psent\" : net.packets_sent,\n \"errin\" : net.errin,\n \"errin\" : net.errout\n }\n self.DB.add_net_info(ninfo)", "def _plot_metrics(self):\n if len(self._episode_q_means) > 0:\n mean_q = np.asscalar(np.mean(self._episode_q_means))\n self._metrics_writer.write_value('Mean Q per ep.', mean_q, self._num_actions_taken)\n\n if len(self._episode_q_stddev) > 0:\n std_q = np.asscalar(np.mean(self._episode_q_stddev))\n self._metrics_writer.write_value('Mean Std Q per ep.', std_q, self._num_actions_taken)\n\n self._metrics_writer.write_value('Sum rewards per ep.', sum(self._episode_rewards), self._num_actions_taken)", "def set_metrics(self):", "def status( self ):\n duration = datetime.datetime.now() - self.startTime\n status = {\n 'start': self.startTime.isoformat(),\n 'now': datetime.datetime.now().isoformat(),\n 'duration': duration.total_seconds(),\n 'bookmark': 0,\n 'events': 0,\n 'cumulative_rate': 0,\n 'processes': [],\n 'state': {\n 'id': self.state,\n 'description': definitions.STATE_STRING[self.state]\n }\n }\n\n # Sending pipes to processes which are not running or shutting down\n # will lead to errors and deadlocks. Loop through to detect errors.\n if self.state == definitions.STATE_RUNNING:\n # Loop through all processes and just check we're running properly\n for proxy in self.processes:\n if not proxy.process.is_alive():\n self.logger.info( 'Process {0} is dead.'.format( proxy.name ))\n self.state = definitions.STATE_ERROR\n break\n\n if proxy.request( 'status' )['state'] == definitions.STATE_ERROR:\n self.logger.info( 'Process {0} state is {1}.'.format(\n proxy.name,\n definitions.STATE_STRING[ definitions.STATE_ERROR ]\n ))\n\n self.state = definitions.STATE_ERROR\n break\n\n # Now do the actual status checks\n if self.state == definitions.STATE_RUNNING:\n # Loop through processes in order\n for proxy in self.processes:\n response = proxy.request('status')\n\n proc = {\n 'name': proxy.name,\n 'pid': proxy.process.pid,\n 'count': response['count'],\n 'sleep': response['sleep']\n }\n\n status['events'] = proc['count']\n status['processes'].append( proc )\n\n if 'bookmark' in response:\n status['bookmark'] = response['bookmark']\n\n status['cumulative_rate'] = round(\n status['events'] / duration.total_seconds(), 2)\n\n return status", "def generate_process_statistics(collectl_playback_cli, pid, statistics=DEFAULT_STATISTICS):\n with tempfile.NamedTemporaryFile() as tmp_tsv:\n collectl_playback_cli.run(stdout=tmp_tsv)\n with open(tmp_tsv.name, \"r\") as tsv_file:\n return _read_process_statistics(tsv_file, pid, statistics)", "def alloc_proc(self, process, delta_t):\n\t\tself._process_list.append(process)", "def record_metrics(metrics, args):\n with open('interpretation_metrics/model_metrics_{}'.format(args.file_num), 'a') as f:\n f.write(\"META DATA\\n\")\n f.write(\"---------\\n\")\n f.write(\"Model Name: {}\\n\".format(args.model_name))\n f.write(\"Attack Target: {}\\n\".format(args.attack_target))\n f.write(\"Gradient Model File: {}\\n\".format(args.gradient_model_file))\n f.write(\"Predictive Model File: {}\\n\".format(args.predictive_model_file))\n f.write(\"Cuda: {}\\n\".format(args.cuda))\n\n f.write(\"\\nSIMPLE GRADIENT COMBINED MODEL METRICS\\n\")\n f.write(\"----------------------------------------\\n\")\n for key, val in metrics['simple_gradient_combined'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nSIMPLE GRADIENT BASELINE MODEL METRICS\\n\")\n f.write(\"----------------------------------------\\n\")\n for key, val in metrics['simple_gradient_baseline'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nSMOOTH GRADIENT COMBINED MODEL METRICS\\n\")\n f.write(\"----------------------------------------\\n\")\n for key, val in metrics['smooth_gradient_combined'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nSMOOTH GRADIENT BASELINE MODEL METRICS\\n\")\n f.write(\"----------------------------------------\\n\")\n for key, val in metrics['smooth_gradient_baseline'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nINTEGRATED GRADIENT COMBINED MODEL METRICS\\n\")\n f.write(\"--------------------------------------------\\n\")\n for key, val in metrics['integr_gradient_combined'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))\n\n f.write(\"\\nINTEGRATED GRADIENT BASELINE MODEL METRICS\\n\")\n f.write(\"--------------------------------------------\\n\")\n for key, val in metrics['integr_gradient_baseline'].items():\n f.write(\"{}: {:.3f}\\n\".format(key, val))", "def _log_process(ip_or_url,\n username,\n password,\n database,\n series_name,\n port=8086,\n polling_interval=1,\n retention_duration=MEASUREMENTS_RETENTION_DURATION,\n **tags):\n logger = _logger()\n logger.info('Logging GPU to Database {}'.format(ip_or_url))\n\n kwargs = {'series_name': series_name,\n 'polling_interval': polling_interval,\n 'retention_duration': retention_duration}\n kwargs.update(tags)\n p = Process(target=_start_logger_process,\n args=(ip_or_url,\n port,\n username,\n password,\n database),\n kwargs=kwargs)\n p.start()\n yield p\n p.terminate()\n p.join()", "def phist():\n history = hist();\n for line in history:\n print(line, \":\", history[line])", "def process_details(self) -> List[ClaraProcessDetails]:\r\n return self._process_details", "def collect_metrics(params, start, uuid=str(uuid4())):\n list_of_metrics = ['AWS/EC2/CPUUtilization',\n 'CGCloud/MemUsage',\n 'CGCloud/DiskUsage_mnt_ephemeral',\n 'CGCloud/DiskUsage_root',\n 'AWS/EC2/NetworkIn',\n 'AWS/EC2/NetworkOut',\n 'AWS/EC2/DiskWriteOps',\n 'AWS/EC2/DiskReadOps']\n\n ids = get_instance_ids(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-worker')\n\n while ids:\n # metrics = {metric: [] for metric in list_of_metrics}\n for instance_id in ids:\n for metric in list_of_metrics:\n averages = []\n try:\n s = start\n while s < stop:\n e = s + (4 * 24 * 3600)\n aws_start = datetime.utcfromtimestamp(s)\n aws_stop = datetime.utcfromtimestamp(e)\n met_object = get_metric(metric, instance_id, aws_start, aws_stop)\n averages.extend([x['Average'] for x in get_datapoints(met_object)])\n s = e\n if averages:\n metrics[metric].append(averages)\n logging.info('# of Datapoints for metric {} is {}'.format(metric, len(metrics[metric][0])))\n except RuntimeError:\n if instance_id in instance_ids:\n instance_ids.remove(instance_id)\n # Remove metrics if no datapoints were collected\n metrics = dict((k, v) for k, v in metrics.iteritems() if v)\n # Save CSV of data\n mkdir_p('{}_{}'.format(uuid, str(datetime.utcnow()).split()[0]))\n for metric in metrics:\n with open('{}_{}/{}.csv'.format(uuid, str(datetime.utcnow()).split()[0], metric.rsplit('/', 1)[1]), 'wb') as f:\n writer = csv.writer(f)\n writer.writerows(metrics[metric])", "def fit_history(self) -> FitResultHelper:\n pass", "def retrieve_execution_info(self, process):\n self._start = process.start\n self._end = process.end\n self._success = process.success\n self.log_stdout = process.log_stdout\n self.log_stderr = process.log_stderr\n self._reserved_path = process._reserved_path", "def get_app_history_metrics(self, cluster_id, app_alias):\n\n resp = self.http.get(url_maker(\"/clusters\", cluster_id, \"apps\",\n app_alias, \"monitor\"))\n\n return self.process_data(resp)", "def visdom_send_metrics(vis, metrics, update='replace'):\n visited = {}\n\n sorted_metrics = sorted(metrics.columns, key=_column_original_name)\n for metric_basename, metric_list in it.groupby(sorted_metrics, key=_column_original_name):\n metric_list = list(metric_list)\n\n for metric in metric_list:\n if vis.win_exists(metric_basename) and (not visited.get(metric, False)):\n update = update\n elif not vis.win_exists(metric_basename):\n update = None\n else:\n update = 'append'\n\n vis.line(\n metrics[metric].values,\n metrics.index.values,\n win=metric_basename,\n name=metric,\n opts={\n 'title': metric_basename,\n 'showlegend': True\n },\n update=update\n )\n\n if metric_basename != metric and len(metric_list) > 1:\n if vis.win_exists(metric):\n update = update\n else:\n update = None\n\n vis.line(\n metrics[metric].values,\n metrics.index.values,\n win=metric,\n name=metric,\n opts={\n 'title': metric,\n 'showlegend': True\n },\n update=update\n )" ]
[ "0.6638309", "0.61203325", "0.6019831", "0.59060955", "0.59058285", "0.55844504", "0.54852", "0.54657155", "0.53771794", "0.53647846", "0.5356617", "0.5345713", "0.5341496", "0.53098404", "0.52967745", "0.5279151", "0.5278557", "0.52188367", "0.52059686", "0.51821625", "0.5178401", "0.5152216", "0.514435", "0.5143143", "0.5109523", "0.5090498", "0.5074378", "0.5074166", "0.5062749", "0.506044", "0.5056128", "0.50535876", "0.50473636", "0.5047177", "0.5040051", "0.50246114", "0.50116587", "0.5004661", "0.49928007", "0.49833524", "0.49828324", "0.49775076", "0.49549216", "0.49462426", "0.49381846", "0.4935586", "0.49346375", "0.4932442", "0.49323902", "0.4928483", "0.49256298", "0.49212977", "0.4918537", "0.49124977", "0.49112773", "0.49050876", "0.49040034", "0.48866156", "0.48691803", "0.48665774", "0.48647392", "0.48541212", "0.4852221", "0.48494646", "0.48425427", "0.4835012", "0.4833814", "0.48319367", "0.4824286", "0.48204595", "0.48194665", "0.48169306", "0.48116452", "0.48084137", "0.48059964", "0.4804874", "0.4804874", "0.48041958", "0.48040575", "0.47981668", "0.47939447", "0.47923812", "0.4791459", "0.4790116", "0.4786359", "0.47822955", "0.47723907", "0.47707638", "0.47619444", "0.4761592", "0.47597197", "0.47532016", "0.4749985", "0.47457066", "0.4743009", "0.4732138", "0.4720513", "0.47200802", "0.4719913", "0.47173506" ]
0.7013191
0
At the beginning of each process metric calculation, the absolute (noncumulative) metrics need to be overwritten to the combined process(es) result. Only the cumulative metrics need the previous value to calculate delta. We should set the absolute metric to 0 in the beginning of this "epoch"
def _reset_absolute_metrics(self): for pid, process_metrics in self.__metrics_history.items(): for _metric, _metric_values in process_metrics.items(): if not _metric.is_cumulative: self.__aggregated_metrics[_metric] = 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _calculate_aggregated_metrics(self):\n\n # using the historical values, calculate the aggregate\n # there are two kinds of metrics:\n # a) cumulative metrics - only the delta of the last 2 recorded values is used (eg cpu cycles)\n # b) absolute metrics - the last absolute value is used\n\n running_pids_set = set(self.__pids)\n\n for pid, process_metrics in self.__metrics_history.items():\n for _metric, _metric_values in process_metrics.items():\n if not self.__aggregated_metrics.get(_metric):\n self.__aggregated_metrics[_metric] = 0\n if _metric.is_cumulative:\n if pid in running_pids_set:\n if len(_metric_values) > 1:\n # only report the cumulative metrics for more than one sample\n self.__aggregated_metrics[_metric] += (\n _metric_values[-1] - _metric_values[-2]\n )\n else:\n if pid in running_pids_set:\n # absolute metric - accumulate the last reported value\n self.__aggregated_metrics[_metric] += _metric_values[-1]", "def _momentum_update(self):\n for param_ol, param_tgt in zip(self.online_net.parameters(),\n self.target_net.parameters()):\n param_tgt.data = param_tgt.data * self.momentum + \\\n param_ol.data * (1. - self.momentum)", "def getCummulativeValues(self):\n self.cumulativePhaseHeightInRing1 = np.cumsum(self.phaseHeightInRing1)\n self.cumulativePhaseHeightInRing2 = np.cumsum(self.phaseHeightInRing2)\n self.cumulativeLeftCriticalPointsRing1 = np.cumsum(self.leftCriticalPointsRing1)\n self.cumulativeRightCriticalPointsRing1 = np.cumsum(self.rightCriticalPointsRing1)\n self.cumulativeLeftCriticalPointsRing2 = np.cumsum(self.leftCriticalPointsRing2)\n self.cumulativeRightCriticalPointsRing2 = np.cumsum(self.rightCriticalPointsRing2)\n\n if(self.init1 > 0):\n for index, value in enumerate(self.cumulativeLeftCriticalPointsRing1):\n self.cumulativeLeftCriticalPointsRing1[index] = value + self.init1\n for index, value in enumerate(self.cumulativeRightCriticalPointsRing1):\n self.cumulativeRightCriticalPointsRing1[index] = value + self.init1\n\n if(self.init2 > 0):\n for index, value in enumerate(self.cumulativeLeftCriticalPointsRing2):\n self.cumulativeLeftCriticalPointsRing2[index] = value + self.init2\n for index, value in enumerate(self.cumulativeRightCriticalPointsRing2):\n self.cumulativeRightCriticalPointsRing2[index] = value + self.init2\n\n self.cumulativePhaseHeightInRing1 = np.insert(self.cumulativePhaseHeightInRing1, 0, 0.0)\n self.cumulativePhaseHeightInRing2 = np.insert(self.cumulativePhaseHeightInRing2, 0, 0.0)\n self.cumulativeLeftCriticalPointsRing1 = np.insert(self.cumulativeLeftCriticalPointsRing1, 0, 0.0)\n self.cumulativeRightCriticalPointsRing1 = np.insert(self.cumulativeRightCriticalPointsRing1, 0, 0.0)\n self.cumulativeLeftCriticalPointsRing2 = np.insert(self.cumulativeLeftCriticalPointsRing2, 0, 0.0)\n self.cumulativeRightCriticalPointsRing2 = np.insert(self.cumulativeRightCriticalPointsRing2, 0, 0.0)", "def calculate_epoch_metrics(self, val_metrics=False):\n metric_names = self.tracked_metrics()\n\n for metric in metric_names:\n if val_metrics:\n mean_val = np.array(self.metrics_history[f\"val_{metric}\"][\"batch_vals\"]).mean()\n self.metrics_history[f\"val_{metric}\"][\"epoch_vals\"].append(mean_val)\n else:\n mean_val = np.array(self.metrics_history[metric][\"batch_vals\"]).mean()\n self.metrics_history[metric][\"epoch_vals\"].append(mean_val)", "def ModifyInitialProperties(self):\n super().ModifyInitialProperties()\n for aux_process in self.project_parameters[\"processes\"][\"auxiliar_process_list\"]:\n if aux_process[\"python_module\"].GetString() == \"temporal_statistics_process\":\n aux_process[\"Parameters\"][\"statistics_start_point_control_value\"].SetDouble(self.project_parameters[\"problem_data\"][\"burnin_time\"].GetDouble())", "def compute_values(self, update_statistics=False):\n\n self.compute_iterations()\n self.axsec = sum([one.axsec for one in self])\n self.xsec = sum([one.xsec for one in self])\n self.xerrc = sum([one.xerrc for one in self])\n self.xerru = math.sqrt(sum([one.xerru**2 for one in self]))\n\n self.nevents = sum([one.nevents for one in self])\n self.nw = sum([one.nw for one in self])\n self.maxit = len(self.yerr_iter) # \n self.nunwgt = sum([one.nunwgt for one in self]) \n self.wgt = 0\n self.luminosity = min([0]+[one.luminosity for one in self])\n if update_statistics:\n self.run_statistics.aggregate_statistics([_.run_statistics for _ in self])", "def accumulateSubgridMassHistory(self,q):\n if self.trackSubScales:\n for ci in range(self.nc):\n self.subgridTmp[ci][:] = self.subgridError_last[ci]\n #would be nice to have dt^{n+1} alone\n dt = self.timeIntegration.dt\n assert dt > 0.0\n dtInv = old_div(1.0,dt)\n self.subgridTmp[ci] *= dtInv\n self.subgridTmp[ci] *= self.subgridErrorMassCoef_last[ci]#figure this out\n #mwf debug\n logEvent(\"HaukeSangalliTrackSubScales accumulating delta u^n.abs.max= %s dm.max=%s \" % (max(numpy.absolute(self.subgridTmp[ci].flat)),max(numpy.absolute(self.subgridErrorMassCoef_last[ci].flat))),1)\n #mwf should be\n q[('mt',ci)] -= self.subgridTmp[ci]\n #don't think this matters right now because called after calculateSubgridError\n self.subgridTmp_ip[ci][:] = self.subgridError_ip_last[ci]\n self.subgridTmp_ip[ci] *= dtInv\n self.subgridTmp_ip[ci] *= self.subgridErrorMassCoef_ip_last[ci]#figure this out\n self.cip[('mt',ci)] -= self.subgridTmp_ip[ci]", "def accumulateSubgridMassHistory(self,q):\n if self.trackSubScales:\n for ci in range(self.nc):\n self.subgridTmp[ci][:] = self.subgridError_last[ci]\n dt = self.timeIntegration.dt\n assert dt > 0.0\n dtInv = old_div(1.0,dt)\n self.subgridTmp[ci] *= dtInv\n self.subgridTmp[ci] *= self.subgridErrorMassCoef_last[ci]#decide how to approximate\n logEvent(\"ADR trackSubScales accumulating delta u^n.abs.max= %s dm.max=%s \" % (max(numpy.absolute(self.subgridTmp[ci].flat)),\n max(numpy.absolute(self.subgridErrorMassCoef_last[ci].flat))),10)\n\n q[('mt',ci)] -= self.subgridTmp[ci]", "def processed_cum_overall(self):\n self.processed_cum_overall = (\n self.cumulative_stats_for_team_each_year\n [['Season','TeamID','win_rate','total_score','total_opponent_score','fgp','fg3p','ftp', 'total_rebounds','total_off_rebounds','total_def_rebounds',\n 'total_off_rebounds_percent','total_def_rebounds_percent','total_rebound_possession_percent','total_rebound_possessiongain_percent','total_blocks',\n 'total_assists','total_steals','total_turnover','total_personalfoul','total_block_opp_FGA_percent','total_assist_per_fgm','total_assist_turnover_ratio',\n 'expectation_per_game','avg_lose_score_by','avg_win_score_by']]\n )", "def calculate_batch_metrics(self):\n pass", "def collect(self):\n if not self._btime:\n return\n\n try:\n with open(\"/proc/self/stat\", 'rb') as stat:\n parts = (stat.read().split(b')')[-1].split())\n\n self.process_metrics[\"vmem\"].set(\"\", float(parts[20]))\n self.process_metrics[\"rss\"].set(\"\", float(parts[21]) * _PAGESIZE)\n start_time_secs = float(parts[19]) / self._ticks\n self.process_metrics[\"start_time\"].set(\n \"\", start_time_secs + self._btime\n )\n utime = float(parts[11]) / self._ticks\n stime = float(parts[12]) / self._ticks\n self.process_metrics[\"cpu\"].set(\"\", utime + stime)\n except IOError:\n pass\n\n try:\n with open(\"/proc/self/limits\", 'rb') as limits:\n for line in limits:\n if line.startswith(b'Max open file'):\n self.process_metrics[\"max_fds\"].set(\n \"\", float(line.split()[3])\n )\n break\n\n self.process_metrics[\"open_fds\"].set(\n \"\", float(len(os.listdir(\"/proc/self/fd\")))\n )\n except (IOError, OSError):\n pass\n\n # Update gc metrics if enabled.\n if \"collected\" in self.process_metrics:\n for generation, stat in enumerate(gc.get_stats()):\n generation = {\"generation\": str(generation)}\n self.process_metrics[\"collected\"].set(\n generation, stat[\"collected\"]\n )\n self.process_metrics[\"uncollectable\"].set(\n generation, stat[\"uncollectable\"]\n )\n self.process_metrics[\"collections\"].set(\n generation, stat[\"collections\"]\n )", "def accumulateSubgridMassHistory(self,q):\n if self.trackSubScales:\n for ci in range(1,self.nc):\n self.subgridTmp[ci][:] = self.subgridError_last[ci]\n dt = self.timeIntegration.dt\n assert dt > 0.0\n dtInv = old_div(1.0,dt)\n self.subgridTmp[ci] *= dtInv\n self.subgridTmp[ci] *= self.subgridErrorMassCoef_last[ci]#decide how to approximate\n logEvent(\"NS_ASGS trackSubScales accumulating delta u^n ci=%s .abs.max= %s dm.max=%s \" % (ci,max(numpy.absolute(self.subgridTmp[ci].flat)),\n max(numpy.absolute(self.subgridErrorMassCoef_last[ci].flat))),1)\n\n q[('mt',ci)] -= self.subgridTmp[ci]", "def calc_stat_values(self):", "def PostTrainingStepUpdate(self):\n p = self.params\n # Get sufficient stats that accumulates over microbatches.\n counts = self.accumulators.counts.GetValue()\n mean_ss = self.accumulators.mean_ss.GetValue()\n variance_ss = self.accumulators.variance_ss.GetValue()\n # Compute batch mean and batch variance from sufficient stats\n mean, variance = tf.nn.normalize_moments(counts, mean_ss, variance_ss, None)\n decay = tf.convert_to_tensor(1.0 - p.decay, p.dtype)\n # Update moving_mean, moving_variance from batch mean and batch variance.\n with tf.name_scope(p.name) as scope:\n with tf.ops.colocate_with(self.vars.moving_mean):\n mean_update = tf.assign_sub(\n self.vars.moving_mean,\n tf.where(\n tf.greater(counts, 0.5),\n (self.vars.moving_mean - tf.cast(mean, p.dtype)) * decay,\n tf.zeros_like(self.vars.moving_mean)),\n name='moving_mean_update')\n with tf.ops.colocate_with(self.vars.moving_variance):\n var_update = tf.assign_sub(\n self.vars.moving_variance,\n tf.where(\n tf.greater(counts, 0.5),\n (self.vars.moving_variance - tf.cast(variance, p.dtype)) *\n decay, tf.zeros_like(self.vars.moving_variance)),\n name='moving_variance_update')\n py_utils.CheckNumerics(\n self.vars.moving_mean,\n 'moving mean of {} failed numeric check'.format(scope))\n py_utils.CheckNumerics(\n self.vars.moving_variance,\n 'moving variance of {} failed numeric check'.format(scope))\n self.accumulators.counts.Reset()\n self.accumulators.mean_ss.Reset()\n self.accumulators.variance_ss.Reset()\n return tf.group(mean_update, var_update)", "def on_epoch_begin(self, epoch, logs={}):\n self.current_progress = 0\n self.loss = 0\n self.accuracy = 0", "def calc(self):\n self.proc_blocks = [cluster.cells for cluster in self.clusters]\n self.cell_loads = [sum([len(cell) for cell in self.proc_blocks])]\n self.particle_loads = [cluster.np for cluster in self.clusters]\n self.imbalance = LoadBalancer.get_load_imbalance(self.particle_loads)", "def compute_audit(self):\r\n \r\n time = datetime.now()\r\n H0_dist = []\r\n Ha_dist = []\r\n\r\n for i in range(0, self.m):\r\n #print(\"CURRENT H0 dist: \", H0_dist)\r\n #try:\r\n H0_dist = self.next_round_dist(True, H0_dist, i)\r\n Ha_dist = self.next_round_dist(False, Ha_dist, i)\r\n '''\r\n except Exception as e:\r\n \r\n print(e)\r\n self.bad = H0_dist\r\n self.bad2 = Ha_dist\r\n return\r\n '''\r\n self.decide_k_min(H0_dist, Ha_dist, i)\r\n #print('ROUND INDEX: ',i,'kminschedl: ',self.k_min_sched[i])\r\n\r\n #self.truncate_dist(H0_dist, i)\r\n H0_dist = H0_dist[:self.k_min_sched[i]]\r\n #self.truncate_dist(Ha_dist, i)\r\n Ha_dist = Ha_dist[:self.k_min_sched[i]]\r\n \r\n #print(\"The outputs: k_mins, LR denominator, LR numerator, 1 / LR (or alpha').\")\r\n #print(self.k_min_sched, '\\n', self.pr_H0_sched, '\\n', self.pr_Ha_sched, '\\n', \r\n #self.risk_sched)\r\n #print(\"Output suppressed. Use instance variables k_min_sched, pr_H0_sched, pr_Ha_sched, risk_sched\")\r\n\r\n #print(\"Time elapsed:\", datetime.now() - time)\r", "def __apply_accumulators():\n self.__xdata = np.array([])\n self.__ydata = np.array([])\n for acc in self.signal_accumulators:\n self.__xdata = __array_append(self.__xdata,acc.attempt)\n self.__ydata = __array_append(self.__ydata,acc.count)\n self.__applied = True", "def prepareAccumulatedMetrics(self):\n displayDF = analyzeMetricsDF(self.resultList)\n displayDF.to_csv(\"data/results.csv\")", "def __reset(self):\n\t\tself.__highest = -float('inf')\n\t\tself.__lowest = float('inf')\n\t\tself.__total = 0\n\t\tself.__steps = 0\n\t\tself.__cold_days = 0", "def _compute_running_metrics(self,\n model_output: torch.Tensor,\n batch: Tuple[torch.Tensor, torch.Tensor],\n running_metrics: dict) -> None:\n for metric in self.metrics:\n if metric.__name__ == 'word_error_rate' or metric.__name__ == 'character_error_rate':\n metric_result = metric(model_output, batch, self.decoder)\n else:\n metric_result = metric(model_output, batch)\n if type(metric_result) == torch.Tensor:\n metric_result = metric_result.item()\n\n running_metrics[metric.__name__].append(metric_result)", "def precalculate():\n pass", "def precalculate():\n pass", "def compute_metrics(self):\n pass", "def office_calculate_cumulative_misfit(parser, args, params):\n local_args = parser.parse_known_args(args)\n control.calculate_cumulative_misfit(params)", "def initRunningVals(self):\n self.r_Vm = [0.0]*self.mirror.dataPoints\n self.r_Va = [0.0]*self.mirror.dataPoints", "def reduce_run():", "def calculate_before_process(self):\n typ = self.duration.get('type')\n val = self.duration.get('value')\n\n if self.process == \"Like\":\n if typ == \"by_time\":\n self.media_to_like = round(val*self.limits_per_hour.get('like'))\n elif typ == \"by_likes\":\n self.media_to_like = round(val)\n\n elif self.process == \"Like-and-follow\":\n if typ == \"by_time\":\n self.users_to_follow = round(val*self.limits_per_hour.get('follow'))\n elif typ == \"by_users\":\n self.users_to_follow = round(val)", "def calculate(self):\n #runs = [ai\n # for ei in self.experiment_queues\n # for ai in ei.cleaned_automated_runs]\n #\n #ni = len(runs)\n #self.nruns = ni\n # for ei in self.experiment_queues:\n # dur=ei.stats.calculate_duration(ei.cleaned_automated_runs)\n # if\n\n\n tt = sum([ei.stats.calculate_duration(ei.cleaned_automated_runs)\n for ei in self.experiment_queues])\n self._total_time = tt\n offset = 0\n if self._start_time:\n offset = time.time() - self._start_time\n\n self.etf = self.format_duration(tt - offset)", "def preinitialize(self):\n for group in self.param_groups:\n for p in group['params']:\n if group['momentum'] != 0:\n self.state[p][\"momentum_buffer\"] = torch.zeros_like(\n p, device=\"cpu\"\n ).to(p.device)", "def on_epoch_end(self):\n self.current_metric = self.model.metric(self.metric_fn, self.data)\n self.current_epoch += 1\n self.metrics += [self.current_metric]\n self.epochs += [self.current_epoch]\n if self.verbose:\n print('Epoch {} \\t{}: {}'.format(\n self.current_epoch,\n self.metric_name,\n self.current_metric))", "def cum_sum(self):\n\n # create cdo command and runit\n cdo_command = \"cdo -timcumsum\"\n run_this(cdo_command, self, output=\"ensemble\")", "def set_offset_for_processes(self):\n processes = self.get_processes()\n if (len(processes) == 0):\n print \"Not enough servers up yet. Cannot synchronize clocks.\"\n return \"Cannot synchronize clocks yet.\"\n servers = list(processes.itervalues())\n\n local_time = time.time()\n times = [server.get_time_in_seconds() for server in servers]\n avg_time = (sum(times) + local_time)/(len(times) + 1.0)\n\n self.offset = avg_time - local_time\n for s, t in zip(servers, times):\n s.set_offset(avg_time - t)\n\n t = Timer(5, self.set_offset_for_processes)\n t.daemon = True\n t.start()\n return \"Clocks synchronized.\"", "def preinitialize(self):\n for group in self.param_groups:\n for p in group['params']:\n self.state[p][\"sum\"] = torch.full_like(\n p,\n group[\"initial_accumulator_value\"],\n memory_format=torch.preserve_format,\n device=\"cpu\",\n ).to(p.device)", "def do_counter_conversion(self):\r\n if self.is_counter:\r\n if self._previous_counter_value is None:\r\n prev_value = self.latest_value\r\n else:\r\n prev_value = self._previous_counter_value\r\n self._previous_counter_value = self.latest_value\r\n self.latest_value = self.latest_value - prev_value", "def pre_process(self):\n t1_start = perf_counter()\n wav_arr_raw = np.array(self.raw_data['spectrum_0'].attrs['wavelengths'])\n self.wavelengths = wav_arr_raw\n self.back_spectra_arr = np.array(self.raw_data['spectrum_0'].attrs['background'])\n\n corr_data = []\n times_proc = []\n\n # extract reference point for 0 seconds\n time_ref = str(self.raw_data['spectrum_0'].attrs['creation_timestamp'])\n\n # spectrometer adds 'b' and quotation marks to timestamps that must be removed\n # some spectra are taken on X.000000s which does not have a .%f component - use try and except\n try:\n time_ref = datetime.strptime((time_ref.replace('b','')).replace('\\'',''),\"%Y-%m-%dT%H:%M:%S.%f\")\n except ValueError:\n time_ref = datetime.strptime((time_ref.replace('b','')).replace('\\'',''),\"%Y-%m-%dT%H:%M:%S\")\n\n print('Measurement was started at {}, \\n normalising times and applying a background correction \\n'.format(time_ref))\n\n # applies background correction\n for counter, spectra in enumerate(self.raw_data.keys()):\n corr_data.append(self.raw_data[spectra]-self.back_spectra_arr)\n time = str(self.raw_data[spectra].attrs['creation_timestamp'])\n try:\n time = datetime.strptime((time.replace('b','')).replace('\\'',''),\"%Y-%m-%dT%H:%M:%S.%f\")\n except ValueError:\n time = datetime.strptime((time.replace('b','')).replace('\\'',''),\"%Y-%m-%dT%H:%M:%S\")\n deltatime = time - time_ref\n times_proc.append(deltatime.total_seconds())\n\n self.times = np.array(times_proc)\n print('Measurement contains {} spectra with {} wavelengths \\n'.format(len(self.times),len(self.wavelengths)))\n\n # data is stored as a pd Dataframe with elapsed times as indices and wavelengths as columns\n pre_proc_data = pd.DataFrame(corr_data, index = self.times, columns = self.wavelengths)\n\n # data may be disordered in time when iterated through\n # sort the data by elapsed time\n self.pre_proc_data = pre_proc_data.sort_index(axis=0)\n self.times = np.sort(self.times)\n\n t1_stop = perf_counter()\n print(\"Elapsed time for pre-processing:\", t1_stop-t1_start)\n\n return self.pre_proc_data", "def reset(self):\n self.num_inst = 0\n self.sum_metric = 0.0", "def _calpara(self):\n self.up = math.exp(self.sigma*math.sqrt(self.deltatime))\n self.down = math.exp(-self.sigma*math.sqrt(self.deltatime))\n self.upprob = (math.exp((self.r-self.d)*self.deltatime)-self.down)/(self.up-self.down)", "def update_relative_weight(self):\n self.relative_weight = 1\n # Add up all of the historical cpu datapoints (higher CPU = more weight)\n for i in self.cpu_datapoints:\n self.relative_weight += i\n # Multiply by the status value (so VMs with red alarm have most weight)\n self.relative_weight *= (self.heartbeat_status * 10)", "def PostTrainingStepUpdate(self, global_step):\n p = self.params\n # Get sufficient stats that accumulates over microbatches.\n counts = self.accumulators.counts.GetValue()\n mean_ss = self.accumulators.mean_ss.GetValue()\n variance_ss = self.accumulators.variance_ss.GetValue()\n # Compute batch mean and batch variance from sufficient stats\n mean, variance = tf.nn.normalize_moments(counts, mean_ss, variance_ss, None)\n decay = tf.convert_to_tensor(1.0 - p.decay, p.dtype)\n # Update moving_mean, moving_variance from batch mean and batch variance.\n with tf.name_scope(p.name) as scope:\n with tf.colocate_with(self.vars.moving_mean):\n mean_update = tf.assign_sub(\n self.vars.moving_mean,\n tf.where(\n tf.greater(counts, 0.5),\n (self.vars.moving_mean - tf.cast(mean, p.dtype)) * decay,\n tf.zeros_like(self.vars.moving_mean)),\n name='moving_mean_update')\n with tf.colocate_with(self.vars.moving_variance):\n var_update = tf.assign_sub(\n self.vars.moving_variance,\n tf.where(\n tf.greater(counts, 0.5),\n (self.vars.moving_variance - tf.cast(variance, p.dtype)) *\n decay, tf.zeros_like(self.vars.moving_variance)),\n name='moving_variance_update')\n py_utils.CheckNumerics(\n self.vars.moving_mean,\n 'moving mean of {} failed numeric check'.format(scope))\n py_utils.CheckNumerics(\n self.vars.moving_variance,\n 'moving variance of {} failed numeric check'.format(scope))\n self.accumulators.counts.Reset()\n self.accumulators.mean_ss.Reset()\n self.accumulators.variance_ss.Reset()\n return tf.group(mean_update, var_update)", "def _trigger(self):\n if len(self._stat_now):\n self._stat_now['epoch_num'] = self.epoch_num\n self._stat_now['global_step'] = self.global_step\n\n self._stats.append(self._stat_now)\n self._stat_now = {}\n self._write_stat()", "def update_total_fpmu_dict(self):\n # identical for each long-range connection\n # extract parameters\n deltat = self.dt\n trise = self.tau_r\n tdamp = self.tau_d\n\n tr = deltat/trise\n etr = np.exp(-tr) \n td = deltat/tdamp\n etd = np.exp(-td)\n cst = trise/(tdamp-trise)\n\n # nmda should keep in memory which could not be reset to zerooooooo!!!\n \"\"\"\n no resetting to zero --> go directly to refreshing !!! based on pre-value\n \"\"\"\n for c in self.source_connection_list:\n if (c.conn_type == 'LongRange'):\n self.total_INMDA_dict[c.connection_distribution] = self.total_INMDA_dict[c.connection_distribution] * etd + self.total_HNMDA_dict[c.connection_distribution] * cst\n self.total_HNMDA_dict[c.connection_distribution] = self.total_HNMDA_dict[c.connection_distribution] * etr + c.curr_firing_rate * c.nsyn * c.weights * self.tau_r\n\n print 'Change HNMDA: ', c.curr_firing_rate * c.nsyn ,' \\n'\n print 'Inputlr dict: ', self.total_inputlr_dict[c.connection_distribution]\n\n\n\n # for curr_CD in self.source_connection_list:\n # have already exist\n for c in self.source_connection_list:\n if(c.conn_type == 'ShortRange'):\n self.total_fpmu_dict[c.connection_distribution] = 0.0\n # have already clear up all the short range connections\n for c in self.source_connection_list:\n if(c.conn_type == 'ShortRange'):\n self.total_fpmu_dict[c.connection_distribution] += c.curr_firing_rate * c.nsyn * c.weights\n\n # summation\n self.total_fp_vslave = 0.0\n for key,val in self.total_fpmu_dict.items():\n \n try:\n self.total_fp_vslave += val\n except:\n key.initialize()\n self.total_fp_vslave += val\n # and then, summation of Inmda\n for key,val in self.total_INMDA_dict.items():\n try:\n self.total_fp_vslave += val\n except:\n key.initialize()\n self.total_fp_vslave += val\n # and then divided by gL or multiply tau_m\n self.total_fp_vslave = self.total_fp_vslave * self.tau_m", "def aggregate_statistics(self, new_stats):\n \n if isinstance(new_stats,RunStatistics):\n new_stats = [new_stats, ]\n elif isinstance(new_stats,list):\n if any(not isinstance(_,RunStatistics) for _ in new_stats):\n raise MadGraph5Error, \"The 'new_stats' argument of the function \"+\\\n \"'updtate_statistics' must be a (possibly list of) \"+\\\n \"RunStatistics instance.\"\n \n keys = set([])\n for stat in [self,]+new_stats:\n keys |= set(stat.keys())\n\n new_stats = new_stats+[self,]\n for key in keys:\n # Define special rules\n if key=='max_precision':\n # The minimal precision corresponds to the maximal value for PREC\n self[key] = min( _[key] for _ in new_stats if key in _)\n elif key=='min_precision':\n # The maximal precision corresponds to the minimal value for PREC\n self[key] = max( _[key] for _ in new_stats if key in _)\n elif key=='averaged_timing':\n n_madloop_calls = sum(_['n_madloop_calls'] for _ in new_stats if\n 'n_madloop_calls' in _)\n if n_madloop_calls > 0 :\n self[key] = sum(_[key]*_['n_madloop_calls'] for _ in \n new_stats if (key in _ and 'n_madloop_calls' in _) )/n_madloop_calls\n else:\n # Now assume all other quantities are cumulative\n self[key] = sum(_[key] for _ in new_stats if key in _)", "def reset(self):\n self.sum_metric = 0.\n self.num_inst = 0.\n self.metrics.reset_stats()", "def update_dev_scores(self, results):\n for metric in results:\n print(\"Validation {} -> {} | lr: {}\".format(self.vctr, metric, self.optimizer.param_groups[0]['lr']), flush=True)\n self.val_scores[metric.name].append(metric)\n self.cur_bests[metric.name] = self.best_score(self.val_scores[metric.name])\n\n self.tensorboard_writter.add_scalar(\n f\"/dev/{metric.name}\", metric.score, self.vctr\n )\n\n # Decrease learning rate according to the scheduler policy\n if self.opti_scheduler and metric.name == self.early_metric:\n mode = \"max\" if metric.higher_better else \"min\"\n self.opti_scheduler._init_is_better(mode,\n self.opti_scheduler.threshold,\n self.opti_scheduler.threshold_mode)\n\n self.opti_scheduler.step(metric.score)", "def _force_updates():\n float_stats_iter = tf.cast(stats_iter, tf.float32)\n update_moving_mean = tf.assign(moving_mean,\n ((float_stats_iter / (float_stats_iter + 1)) * moving_mean) + (\n (1 / (float_stats_iter + 1)) * batch_mean))\n update_moving_variance = tf.assign(moving_variance,\n ((float_stats_iter / (float_stats_iter + 1)) * moving_variance) + (\n (1 / (float_stats_iter + 1)) * batch_var))\n with tf.control_dependencies([update_moving_mean, update_moving_variance]):\n return tf.identity(outputs)", "def compute_average(self, error=None):\n\n nbjobs = len(self)\n if not nbjobs:\n return\n max_xsec = max(one.xsec for one in self)\n min_xsec = min(one.xsec for one in self)\n self.axsec = sum([one.axsec for one in self]) / nbjobs\n self.xsec = sum([one.xsec for one in self]) /nbjobs\n self.xerrc = sum([one.xerrc for one in self]) /nbjobs\n self.xerru = math.sqrt(sum([one.xerru**2 for one in self])) /nbjobs\n if error:\n self.xerrc = error\n self.xerru = error\n\n self.nevents = sum([one.nevents for one in self])\n self.nw = 0#sum([one.nw for one in self])\n self.maxit = 0#len(self.yerr_iter) # \n self.nunwgt = sum([one.nunwgt for one in self]) \n self.wgt = 0\n self.luminosity = sum([one.luminosity for one in self])\n self.ysec_iter = []\n self.yerr_iter = []\n self.th_maxwgt = 0.0\n self.th_nunwgt = 0 \n for result in self:\n self.ysec_iter+=result.ysec_iter\n self.yerr_iter+=result.yerr_iter\n self.yasec_iter += result.yasec_iter\n self.eff_iter += result.eff_iter\n self.maxwgt_iter += result.maxwgt_iter\n\n #check full consistency\n onefail = False\n for one in list(self):\n if one.xsec < (self.xsec - 25* one.xerru):\n if not onefail:\n logger.debug('multi run are inconsistent: %s < %s - 25* %s: assign error %s', one.xsec, self.xsec, one.xerru, error if error else max_xsec-min_xsec)\n onefail = True\n self.remove(one)\n if onefail:\n if error:\n return self.compute_average(error)\n else:\n return self.compute_average((max_xsec-min_xsec)/2.)", "def calc(self):\n np = 0\n for cell in self.cells:\n n = self.cell_np[cell]\n np += n\n self.dnp = np - self.np\n self.np = np", "def compute_stats(self):\n if self.stats is not None:\n return\n self.stats = np.zeros(STEPS_MAX + 1)\n for m in self.missions:\n m.compute_stats()\n self.stats += 100 * m.stats\n self.stats /= len(self.missions)", "def calculate_abs(self):\n ref_spectra_raw = np.array(self.raw_data['spectrum_0'].attrs['reference'])\n self.ref_spectra_arr = np.subtract(ref_spectra_raw,self.back_spectra_arr)\n abs=-np.log10(self.pre_proc_data.div(self.ref_spectra_arr))\n self.abs_data=abs\n return self.abs_data", "def update(self, process_value, current_time): \n delta_time = current_time - self.__last_time\n # Don't update if the datapoint is the same or older than the previous one\n if delta_time <= 0:\n return self.output\n error = process_value - self.__SetPoint\n delta_error = error - self.__last_error\n\n self.__PTerm = self.__Kp * error\n\n self.__ITerm += error * delta_time\n\n if (self.__ITerm < -self.__windup_guard):\n self.__ITerm = -self.__windup_guard\n elif (self.__ITerm > self.__windup_guard):\n self.__ITerm = self.__windup_guard\n\n self.__DTerm = delta_error / delta_time\n\n # Update time and error values\n self.__last_time = current_time\n self.__last_error = error\n\n self.output = self.__PTerm + (self.__Ki * self.__ITerm) + (self.__Kd * self.__DTerm)\n return self.output", "def on_epoch_end(self):\n self.current_params = self.model.posterior_mean(self.params)\n self.current_epoch += 1\n self.parameter_values += [self.current_params]\n self.epochs += [self.current_epoch]", "def moment_update(model, model_ema, m):\r\n for p1, p2 in zip(model.parameters(), model_ema.parameters()):\r\n p2.data.mul_(m).add_(1 - m, p1.detach().data)\r\n # p2.data.mul_(m).add_(1 - m, p1.data)", "def calculate(self):\n avg = self.sum / self.n if self.n != 0 else 0\n self.running_avg.append(avg)\n return avg", "def _update_stats(self) -> None:\n # Mass (average height in m), only consider pixels with ice\n self.mass = np.append(self.mass, np.mean(self.h[self.h > 0]))\n\n # Difference in mass 'mass balance'\n self.mass_balance = np.append(\n self.mass_balance, (self.mass[-1] - self.mass[-2])\n )\n\n # Calculate trend of mass balance (take last MODEL_TREND_SIZE elements)\n self.mass_balance_trend = np.append(\n self.mass_balance_trend,\n np.mean(self.mass_balance[-self.MODEL_TREND_SIZE :]), # noqa: E203\n )", "def step(self, actions):\n\n representative_data = []\n original_data = []\n\n actions = np.array(actions).reshape(3, -1)\n\n for np_data, df_data, calculator, som, action in zip(self.np_data_list, self.df_data_list, self.calculators, self.som_objects, actions):\n\n representative_days, cluster_numbers = calculator.get_representative_days(\n som, np_data, action)\n\n representative_days = pd.DataFrame(representative_days)\n\n representative_days = self.wide_to_long(representative_days)\n approximation_calc = ApproximateData(df_data, 4)\n representative_days = ApproximateData(df_data, 4).get_load_duration_curve(\n representative_days, cluster_numbers)\n\n representative_data.append(representative_days)\n\n # original_days = approximation_calc.get_load_duration_curve(\n # year=\"2013\")\n\n\n\n # original_data.append(original_days)\n\n # metrics_calculator = Metrics(original_data[0], representative_data[0], original_data[1],\n # representative_data[1], original_data[2], representative_data[2], \"dc\")\n\n pv_original = pd.read_csv(\n '{}data/processed/resources/pv_processed.csv'.format(project_dir))\n wind_original = pd.read_csv(\n '{}data/processed/resources/onshore_processed.csv'.format(project_dir))\n load_original = pd.read_csv(\n '{}data/processed/demand/load_NG/load_processed_normalised.csv'.format(project_dir))\n\n pv_original_ldcs, wind_original_ldcs, load_original_ldcs = get_each_ldc(pv_original, wind_original, load_original)\n\n multi_year_metrics_calculator = MultiYearMetrics(pv_original_ldcs, representative_data[0], wind_original_ldcs, representative_data[1], load_original_ldcs, representative_data[2], self.year_start)\n multi_year_metrics = multi_year_metrics_calculator.get_multi_year_average_metrics(\"dc\")\n multi_year_metrics = multi_year_metrics.reset_index()\n # logger.debug(\"multi_year_metrics: \\n{}\".format(multi_year_metrics))\n\n nrmse = multi_year_metrics[multi_year_metrics['metric'] == 'nrmse dc'].iloc[0].value\n rae = multi_year_metrics[multi_year_metrics['metric'] == 'rae dc'].iloc[0].value\n correlation = multi_year_metrics[multi_year_metrics['metric'] == 'correlation'].iloc[0].value\n\n # error_metrics = metrics_calculator.get_mean_error_metrics()\n # nrmse = error_metrics.iloc[1].value\n # rae = error_metrics.iloc[2].value\n # correlation = error_metrics.iloc[0].value\n # reward = -error_metrics.value.sum()\n # logger.info(\"error_metrics: {}\".format(error_metrics))\n # logger.info(\"error_metrics: {}\".format(error_metrics.iloc[0]))\n\n # return reward\n return nrmse, rae, correlation", "def __deal_per_ep(self):\r\n if self.ep > self.ep_last:\r\n self.ep_last = self.ep\r\n\r\n val_loss, val_eer = self.eval_val(logs_print=True)\r\n\r\n self.upd_checkpoints(val_eer)\r\n\r\n with self.writers['ep']['val_eer'].as_default():\r\n tf.summary.scalar('ep', data=val_eer, step=self.ep)\r\n\r\n with self.writers['batch']['val_loss'].as_default():\r\n tf.summary.scalar('batch', data=val_loss, step=self.b_gl)\r\n with self.writers['batch']['val_eer'].as_default():\r\n tf.summary.scalar('batch', data=val_eer, step=self.b_gl)\r\n else:\r\n assert self.ep == self.ep_last", "def update_and_calculate(self, value):\r\n retval = -1\r\n diff = abs(self.ewma - value)\r\n if self.n >= 5: # only calculate meandevs if collected > 5 data pts.\r\n if self.ewmmd > 0:\r\n meandevs = diff/self.ewmmd\r\n else:\r\n meandevs = diff/.00001\r\n retval = meandevs\r\n \r\n # update ewma/ewmmd\r\n self.n += 1\r\n if self.n > 1:\r\n if self.n > 2:\r\n self.ewmmd = (.125*diff) + (.875*self.ewmmd)\r\n else:\r\n self.ewmmd = diff\r\n self.ewma = (.125*value) + (.875*self.ewma)\r\n else:\r\n self.ewma = value\r\n return retval", "def analysis_dev_sec(self):\n #calc the date\n time_now = int(time.time())\n time_local = time.localtime(time_now)\n date = time.strftime(\"%Y-%m-%d\",time_local)\n sum_cpu_ratio = 0\n sum_gpu_mem_size = 0\n # key: time key\n key_re_time = \"[0-9]+ [0-9]+:[0-9]+:[0-9]+ 20[12][][0-9]\"\n # key: temperature key\n key_re_temper = \"[0-9]+C\"\n # key: gpu percent key\n key_re_percent = \"[0-9]+%\"\n # key: gpu mem key\n key_re_mem = \"%s\" % self.pid\n key_re_mem_null = \"No running processes found\"\n # key: line ending key\n key_ending = \"====ending====\"\n\n new_gpu_data_count = 0\n sum_gpu_usage_percent_all = 0\n for line in self.file_gpu.readlines():\n if re.search(key_re_time, line):\n # time own unit\n # 1. colect the gpu time info\n final_time = date + \" \" + line.split()[3]\n self.gpu_pertime.append(final_time)\n elif re.search(key_re_temper, line) and re.search(key_re_percent, line):\n #print \"2222, data_line: %s\" % line\n # 2. colect the gpu temperature info\n # 3. colect the gpu usage percentage info\n temper = float(line.split()[2].rstrip(\"C\"))\n gpu_usage = float(line.split()[12].rstrip(\"%\"))\n if new_gpu_data_count == 0:\n self.gpu_temper_1.append(temper)\n self.gpu_usage_percent_1.append(gpu_usage)\n elif new_gpu_data_count == 1:\n self.gpu_temper_2.append(temper)\n self.gpu_usage_percent_2.append(gpu_usage)\n elif new_gpu_data_count == 2:\n self.gpu_temper_3.append(temper)\n self.gpu_usage_percent_3.append(gpu_usage)\n elif new_gpu_data_count == 3:\n self.gpu_temper_4.append(temper)\n self.gpu_usage_percent_4.append(gpu_usage)\n new_gpu_data_count += 1\n elif re.search(key_re_mem, line) or re.search(key_re_mem_null, line):\n # 4. colect the gpu mem info\n this_gpu_num = line.split()[1]\n if \"MiB\" in line.split()[5]:\n this_gpu_mem = float(line.split()[5].strip(\"MiB\"))\n # TODO_this: if there have other unit\n\n if this_gpu_num == \"0\":\n self.gpu_mem_1.append(this_gpu_mem)\n elif this_gpu_num == \"1\":\n self.gpu_mem_2.append(this_gpu_mem)\n elif this_gpu_num == \"2\":\n self.gpu_mem_3.append(this_gpu_mem)\n elif this_gpu_num == \"3\":\n self.gpu_mem_4.append(this_gpu_mem)\n elif this_gpu_num == \"No\":\n self.gpu_mem_1.append(0)\n self.gpu_mem_2.append(0)\n self.gpu_mem_3.append(0)\n self.gpu_mem_4.append(0)\n \n elif re.search(key_ending, line):\n # control unit\n # 1.complete the gpu_mem list\n max_len_gpu_mem = max(len(self.gpu_mem_4), len(self.gpu_mem_3), len(self.gpu_mem_2), len(self.gpu_mem_1))\n min_len_gpu_mem = min(len(self.gpu_mem_4), len(self.gpu_mem_3), len(self.gpu_mem_2), len(self.gpu_mem_1))\n if max_len_gpu_mem != min_len_gpu_mem:\n if len(self.gpu_mem_1) != max_len_gpu_mem:\n self.gpu_mem_1.append(0)\n if len(self.gpu_mem_2) != max_len_gpu_mem:\n self.gpu_mem_2.append(0)\n if len(self.gpu_mem_3) != max_len_gpu_mem:\n self.gpu_mem_3.append(0)\n if len(self.gpu_mem_4) != max_len_gpu_mem:\n self.gpu_mem_4.append(0)\n new_gpu_data_count = 0\n\n # ! because all the list is equal\n for i in range(len(self.gpu_mem_1)):\n self.gpu_usage_percent_all.append(self.gpu_usage_percent_1[i] + self.gpu_usage_percent_2[i] + self.gpu_usage_percent_3[i] + self.gpu_usage_percent_4[i])\n\n #self.gpu_mem_all.append(self.gpu_mem_1[i] + self.gpu_mem_2[i] + self.gpu_mem_3[i] + self.gpu_mem_4[i])\n self.gpu_mem_all.append(self.gpu_mem_1[i] + self.gpu_mem_2[i] + self.gpu_mem_3[i] + self.gpu_mem_4[i])\n sum_gpu_mem_size += max(self.gpu_mem_1[i], self.gpu_mem_2[i], self.gpu_mem_3[i], self.gpu_mem_4[i])\n\n self.gpu_temper_max.append(max(self.gpu_temper_1[i] ,self.gpu_temper_2[i] ,self.gpu_temper_3[i] ,self.gpu_temper_4[i]))\n\n version_gpu_usage_percent_all = max(self.gpu_usage_percent_all)\n\n version_gpu_mem_all = max(self.gpu_mem_all)\n version_gpu_mem_avg = round(sum_gpu_mem_size/len(self.gpu_mem_all), 2)\n\n version_gpu_temper_max = max(self.gpu_temper_max)\n\n print \"version_gpu_usage_percent_all: %s\" % version_gpu_usage_percent_all\n print \"version_gpu_mem_all: %s\" % version_gpu_mem_all\n print \"version_gpu_mem_avg: %s\" % version_gpu_mem_avg\n print \"version_gpu_temper_max: %s\" % version_gpu_temper_max\n\n # insert into database: nvidia_list_1sec\n if self.db_onoff == \"on\":\n # insert into database: nvidia_list_1sec_avg\n self.mysql.insert_table_sql_nvidia_version(self.time_sql, version_gpu_usage_percent_all, version_gpu_mem_avg, version_gpu_temper_max)\n # insert into database: nvidia_list_1sec_max\n #self.mysql.insert_table_sql_nvidia_version(self.time_sql, version_gpu_usage_percent_all, version_gpu_mem_all, version_gpu_temper_max)", "def update(self, phase, targets, outputs):\n iou, dice, dice_neg, dice_pos, _, _ = self.metric(outputs, targets)\n self.base_dice_scores[phase].append(dice)\n self.dice_pos_scores[phase].append(dice_pos)\n self.dice_neg_scores[phase].append(dice_neg)\n self.iou_scores[phase].append(iou)", "def make_metrics(self):\n num_batches = self.data_loader.number_of_batches()\n dose_score_vec = np.zeros(num_batches)\n\n # Only make calculations if data_loader is not empty\n if not self.data_loader.file_paths_list:\n print('No patient information was given to calculate metrics')\n else:\n # Change batch size to 1\n self.data_loader.batch_size = 1 # Loads data related to ground truth patient information\n if self.dose_loader is not None:\n self.dose_loader.batch_size = 1 # Loads data related to ground truth patient information\n\n for idx in tqdm.tqdm(range(num_batches)):\n # Get roi masks for patient\n self.get_constant_patient_features(idx)\n # Get dose tensors for reference dose and evaluate criteria\n reference_dose = self.get_patient_dose_tensor(self.data_loader)\n if reference_dose is not None:\n self.reference_dose_metric_df = self.calculate_metrics(self.reference_dose_metric_df, reference_dose)\n # If a dose loader was provided, calculate the score\n if self.dose_loader is not None:\n new_dose = self.get_patient_dose_tensor(self.dose_loader)\n # Make metric data frames\n self.new_dose_metric_df = self.calculate_metrics(self.new_dose_metric_df, new_dose)\n # Evaluate mean absolute error of 3D dose\n dose_score_vec[idx] = np.sum(np.abs(reference_dose - new_dose)) / np.sum(self.possible_dose_mask)\n # Save metrics at the patient level (this is a template for how DVH stream participants could save\n # their files\n # self.dose_metric_df.loc[self.patient_list[0]].to_csv('{}.csv'.format(self.patient_list[0]))\n\n if self.dose_loader is not None:\n dvh_score = np.nanmean(np.abs(self.reference_dose_metric_df - self.new_dose_metric_df).values)\n dose_score = dose_score_vec.mean()\n return dvh_score, dose_score\n else:\n print('No new dose provided. Metrics were only calculated for the provided dose.')", "def reset(self) -> None:\n self.is_run = False\n self.env_step = 0\n if self.resume_from_log:\n self.start_epoch, self.env_step, self.gradient_step = \\\n self.logger.restore_data()\n\n self.last_rew, self.last_len = 0.0, 0\n self.start_time = time.time()\n if self.train_collector is not None:\n self.train_collector.reset_stat()\n\n if self.train_collector.policy != self.policy:\n self.test_in_train = False\n elif self.test_collector is None:\n self.test_in_train = False\n\n if self.test_collector is not None:\n assert self.episode_per_test is not None\n assert not isinstance(self.test_collector, AsyncCollector) # Issue 700\n self.test_collector.reset_stat()\n test_result = test_episode(\n self.policy, self.test_collector, self.test_fn, self.start_epoch,\n self.episode_per_test, self.logger, self.env_step, self.reward_metric\n )\n self.best_epoch = self.start_epoch\n self.best_reward, self.best_reward_std = \\\n test_result[\"rew\"], test_result[\"rew_std\"]\n if self.save_best_fn:\n self.save_best_fn(self.policy)\n\n self.epoch = self.start_epoch\n self.stop_fn_flag = False\n self.iter_num = 0", "def update(self):\n\n for Particle in self.swarm: \n Particle.compareToLocalBest()\n\n self.prevBestVal= self.overBestVal\n self.overBestVal= max(self.overBestVal, max([Particle.bestXYZ[2] for Particle in self.swarm]))\n tempPos= [particle.bestXYZ[0:2] for particle in self.swarm if particle.bestXYZ[2]==self.overBestVal]\n if len(tempPos)>0:\n self.overBestPos = tempPos[0] #In the case that multiple particles have the same max value, use position of 1st particle\n print('Global Best value')\n print(self.overBestVal)\n\n for index in range(len(self.swarm)): \n if self.disp:\n plt.hold(True)\n c = mapping(int(255/(index+1)))\n plt.plot(self.swarm[index].position[0], self.swarm[index].position[1], '*', mfc = c, mec = c) \n self.swarm[index].updateVelocity(self, self.latency) #multiplies velocity by #time-steps until update\n if self.reset != None:\n if self.iteration % self.reset == 0:\n self.swarm[index].position = self.swarm[index].bestXYZ[0:2]\n else:\n self.swarm[index].updatePosition()\n else:\n self.swarm[index].updatePosition()\n self.swarm[index].isFeasible()\n self.swarm[index].evaluateFunction()", "def _cache_values(self):\n width = self.width.current\n center = self.horizontal.current\n\n # If center would need to be rounded, increase the width by 1 to make a smoother fade between LEDs.\n if center % 2 != 0:\n width += 1\n # Moving right\n if center < self.horizontal.target:\n center += 1\n # Moving left\n else:\n center -= 1\n\n self.pi_inc = PI_2 / width\n self.first_led = round(center - (width / 2))\n self.last_led = self.first_led + width\n\n # Start at the bottom of the curve, to provide a smooth fade up\n self.start_x = (width * -1.25) * self.pi_inc", "def before_epoch(self):\n if self.trainer._mode == 'train':\n with open(os.path.join(self.root_path, 'metrics.txt'), 'a+') as fout:\n if hasattr(self.trainer, '_metrics'):\n fout.write(\n str(self.trainer._epoch - 1) + '\\t' +\n str(self.trainer._metrics) + '\\n')", "def _init_calculation(self):\n # count number of previous calculations\n self.ctx.running_calc += 1\n\n # set the structure\n self.ctx.inputs.structure = self.inputs.structure\n\n # # deal with oxidation states\n # if self.ctx.running_calc > 1 and self.ctx.try_oxi:\n # self.report('Trying to guess oxidation states')\n # self.ctx.inputs.guess_oxistates = Bool(True)\n # self.ctx.inputs.high_spin_preferred = Bool(self.ctx.high_spin_preferred)\n\n # set metadata\n label = self.inputs.metadata.get('label', DEFAULT_TITLE)\n description = self.inputs.metadata.get('description', '')\n self.ctx.inputs.metadata = AttributeDict({'options': self.ctx.options,\n 'label': '{} [{}]'.format(label, self.ctx.running_calc),\n 'description': description})", "def epoch_logs( self, progress_bar, iteration:int, output: SimpleNamespace, prev_mechanism_weights: List[float], next_mechanism_weights: List[float] ):\n self_uid = self.metagraph.hotkeys.index( self.wallet.hotkey.public_key )\n stake = self.metagraph.S[ self_uid ].item()\n rank = self.metagraph.R[ self_uid ].item()\n incentive = self.metagraph.I[ self_uid ].item()\n info = {\n 'GS': colored('{}'.format(self.global_step), 'red'),\n 'LS': colored('{}'.format(iteration), 'blue'),\n 'Epoch': colored('{}'.format(self.epoch+1), 'green'),\n 'Loss': colored('{:.4f}'.format(self.epoch_loss), 'yellow'),\n 'Best': colored('{:.4f}'.format(self.best_epoch_loss), 'red'),\n 'L-loss': colored('{:.4f}'.format(output.local_target_loss.item()), 'blue'),\n 'R-loss': colored('{:.4f}'.format(output.remote_target_loss.item()), 'green'),\n 'D-loss': colored('{:.4f}'.format(output.distillation_loss.item()), 'yellow'),\n 'nPeers': colored(self.metagraph.n.item(), 'red'),\n 'Stake(\\u03C4)': colored('{:.3f}'.format(stake), 'green'),\n 'Rank(\\u03C4)': colored('{:.3f}'.format(rank), 'blue'),\n 'Incentive(\\u03C4/block)': colored('{:.6f}'.format(incentive), 'yellow'),\n }\n for uid in self.metagraph.uids.tolist():\n if next_mechanism_weights[uid] != 0:\n weight_dif = next_mechanism_weights[uid] - prev_mechanism_weights[uid]\n if weight_dif > 0:\n info[colored(str(uid), 'green')] = colored('{:.4f}'.format(next_mechanism_weights[uid]), 'green')\n elif weight_dif == 0:\n info[str(uid)] = colored('{:.4f}'.format(next_mechanism_weights[uid]), 'white')\n else:\n info[colored(str(uid), 'red')] = colored('{:.4f}'.format(next_mechanism_weights[uid]), 'red')\n\n progress_bar.set_infos( info )\n\n if self.config.neuron.use_tensorboard:\n self.tensorboard.add_scalar('R-loss', output.remote_target_loss.item(), self.global_step)\n self.tensorboard.add_scalar('L-loss', output.local_target_loss.item(), self.global_step)\n self.tensorboard.add_scalar('D-loss', output.distillation_loss.item(), self.global_step)", "def calc(self, current_epoch):\n # calc metrics\n self.avg_loss = self.loss / self.nb_batches\n self.acc = accuracy_score(self.gold_classes, self.pred_classes)\n self.mcc = matthews_corrcoef(self.gold_classes, self.pred_classes)\n *self.prec_rec_f1, _ = precision_recall_fscore_support(\n self.gold_classes,\n self.pred_classes,\n average=self.average,\n pos_label=self.pos_label\n )\n\n # keep track of the best stats\n if self.avg_loss < self.best_loss.value:\n self.best_loss.value = self.avg_loss\n self.best_loss.epoch = current_epoch\n\n if self.prec_rec_f1[2] > self.best_prec_rec_f1.value[2]:\n self.best_prec_rec_f1.value[0] = self.prec_rec_f1[0]\n self.best_prec_rec_f1.value[1] = self.prec_rec_f1[1]\n self.best_prec_rec_f1.value[2] = self.prec_rec_f1[2]\n self.best_prec_rec_f1.epoch = current_epoch\n\n if self.acc > self.best_acc.value:\n self.best_acc.value = self.acc\n self.best_acc.epoch = current_epoch\n\n if self.mcc > self.best_mcc.value:\n self.best_mcc.value = self.mcc\n self.best_mcc.epoch = current_epoch\n\n # useful for debugging:\n # from sklearn.metrics import confusion_matrix\n # print(confusion_matrix(self.gold_classes, self.pred_classes))", "def run_cumulative_pipeline_damage(self):\n\t\t\"\"\" PWP1 = brittle\n\t\t\tPWP2 = ductile \"\"\"\n\n\t\trt = [100, 250, 500, 1000, 2500, 5000, 10000]\n\t\t# rt = [100]\n\n\t\tfor rt_val in rt:\n\t\t\tprint('\\tmc_pipe_dmg: cumulative rt_{}' .format(rt_val))\n\t\t\t# --- reading in damage results from above analysis\n\t\t\teq_damage_results_csv = os.path.join(self.mc_path, \n\t\t\t\t\t\t\t\t\t\t\t\t 'pipe_DS_eq_{}yr_{}.csv' \n\t\t\t\t\t\t\t\t\t\t\t\t .format(rt_val, retrofit_key))\n\t\t\ttsu_damage_results_csv = os.path.join(self.mc_path, \n\t\t\t\t\t\t\t\t\t\t\t\t 'pipe_DS_tsu_{}yr_{}.csv'\n\t\t\t\t\t\t\t\t\t\t\t\t .format(rt_val, retrofit_key))\n\t\t\teq_df = pd.read_csv(eq_damage_results_csv)\n\t\t\ttsu_df = pd.read_csv(tsu_damage_results_csv)\n\n\t\t\teq_df.set_index('guid', inplace=True)\n\t\t\ttsu_df.set_index('guid', inplace=True)\n\n\t\t\tcolumn_keys = list(eq_df.columns)\n\n\t\t\tcum_df = np.logical_or(eq_df.values, tsu_df.values).astype(int)\n\t\t\tcum_df = pd.DataFrame(cum_df, index=eq_df.index, columns=column_keys)\n\t\t\t\n\n\t\t\tresult_name = os.path.join(self.mc_path, \n\t\t\t\t\t\t\t\t\t 'pipe_DS_cumulative_{}yr_{}.csv' \n\t\t\t\t\t\t\t\t\t\t.format(rt_val, retrofit_key))\n\n\t\t\tcum_df.to_csv(result_name, index=True)", "def _compute_results(self):\n self.Y_best = best_value(self.Y)\n self.x_opt = self.X[np.argmin(self.Y),:]\n self.fx_opt = np.min(self.Y)\n self.distance = self._compute_distance_betw_consecutive_x()", "def reset(self, complete=False):\n self.sum = 0\n self.n = 0\n if complete:\n self.running_avg = []", "def calc(self):\n\t\tfor neuron in self.neurons.items():\n\t\t\tneuron.calculate()", "def update(self, current, values=None, finalize=None):\n if finalize is None:\n if self.target is None:\n finalize = False\n else:\n finalize = current >= self.target\n\n values = values or []\n for k, v in values:\n if k not in self._values_order:\n self._values_order.append(k)\n if k not in self.stateful_metrics:\n # In the case that progress bar doesn't have a target value in the first\n # epoch, both on_batch_end and on_epoch_end will be called, which will\n # cause 'current' and 'self._seen_so_far' to have the same value. Force\n # the minimal value to 1 here, otherwise stateful_metric will be 0s.\n value_base = max(current - self._seen_so_far, 1)\n if k not in self._values:\n self._values[k] = [v * value_base, value_base]\n else:\n self._values[k][0] += v * value_base\n self._values[k][1] += value_base\n else:\n # Stateful metrics output a numeric value. This representation\n # means \"take an average from a single value\" but keeps the\n # numeric formatting.\n self._values[k] = [v, 1]\n self._seen_so_far = current\n\n now = time.time()\n info = ' - %.0fs' % (now - self._start)\n if self.verbose == 1:\n if now - self._last_update < self.interval and not finalize:\n return\n\n prev_total_width = self._total_width\n if self._dynamic_display:\n sys.stdout.write('\\b' * prev_total_width)\n sys.stdout.write('\\r')\n else:\n sys.stdout.write('\\n')\n\n if self.target is not None:\n numdigits = int(np.log10(self.target)) + 1\n bar = ('%' + str(numdigits) + 'd/%d [') % (current, self.target)\n prog = float(current) / self.target\n prog_width = int(self.width * prog)\n if prog_width > 0:\n bar += ('=' * (prog_width - 1))\n if current < self.target:\n bar += '>'\n else:\n bar += '='\n bar += ('.' * (self.width - prog_width))\n bar += ']'\n else:\n bar = '%7d/Unknown' % current\n\n self._total_width = len(bar)\n sys.stdout.write(bar)\n\n if current:\n time_per_unit = (now - self._start) / current\n else:\n time_per_unit = 0\n\n if self.target is None or finalize:\n if time_per_unit >= 1 or time_per_unit == 0:\n info += ' %.0fs/%s' % (time_per_unit, self.unit_name)\n elif time_per_unit >= 1e-3:\n info += ' %.0fms/%s' % (time_per_unit * 1e3, self.unit_name)\n else:\n info += ' %.0fus/%s' % (time_per_unit * 1e6, self.unit_name)\n else:\n eta = time_per_unit * (self.target - current)\n if eta > 3600:\n eta_format = '%d:%02d:%02d' % (eta // 3600,\n (eta % 3600) // 60, eta % 60)\n elif eta > 60:\n eta_format = '%d:%02d' % (eta // 60, eta % 60)\n else:\n eta_format = '%ds' % eta\n\n info = ' - ETA: %s' % eta_format\n\n for k in self._values_order:\n info += ' - %s:' % k\n if isinstance(self._values[k], list):\n avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))\n if abs(avg) > 1e-3:\n info += ' %.4f' % avg\n else:\n info += ' %.4e' % avg\n else:\n info += ' %s' % self._values[k]\n\n self._total_width += len(info)\n if prev_total_width > self._total_width:\n info += (' ' * (prev_total_width - self._total_width))\n\n if finalize:\n info += '\\n'\n\n sys.stdout.write(info)\n sys.stdout.flush()\n\n elif self.verbose == 2:\n if finalize:\n numdigits = int(np.log10(self.target)) + 1\n count = ('%' + str(numdigits) + 'd/%d') % (current, self.target)\n info = count + info\n for k in self._values_order:\n info += ' - %s:' % k\n avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))\n if avg > 1e-3:\n info += ' %.4f' % avg\n else:\n info += ' %.4e' % avg\n info += '\\n'\n\n sys.stdout.write(info)\n sys.stdout.flush()\n\n self._last_update = now", "def _compute_raw_update(self):\n\n self.print(\"SGD with Momentum: Computing raw update...\", line_above=True)\n # Read task toml\n\n iteration_number = self.task_dict[\"iteration_number\"] + 1\n\n indices = self.get_parameter_indices(self.raw_gradient_path)\n # scale the gradients, because they can be tiny and this leads to issues\n g_t = self.get_h5_data(self.raw_gradient_path) * self.grad_scaling_fac\n\n if np.sum(np.isnan(g_t)) > 1:\n raise Exception(\n \"NaNs were found in the raw gradient.\" \"Something must be wrong.\"\n )\n\n if iteration_number == 1: # Initialize moments if needed\n shutil.copy(self.raw_gradient_path, self.moment_path)\n write_xdmf(self.moment_path)\n\n with h5py.File(self.moment_path, \"r+\") as h5:\n data = h5[\"MODEL/data\"]\n\n # initialize with zeros\n for i in indices:\n data[:, i, :] = np.zeros_like(data[:, i, :])\n\n v_t = self.beta * self.get_h5_data(self.moment_path) + (1 - self.beta) * g_t\n\n # Store first moment\n shutil.copy(\n self.moment_path,\n self._get_path_for_iteration(self.iteration_number + 1, self.moment_path),\n )\n self.set_h5_data(\n self._get_path_for_iteration(self.iteration_number + 1, self.moment_path),\n v_t,\n )\n\n # Correct bias\n v_t = v_t / (1 - self.beta ** (self.iteration_number + 1))\n update = self.alpha * v_t\n\n if np.sum(np.isnan(update)) > 1:\n raise Exception(\n \"NaNs were found in the raw update.\"\n \"Check if the gradient is not excessively small\"\n )\n\n # Write raw update to file for smoothing\n shutil.copy(self.raw_gradient_path, self.raw_update_path)\n self.set_h5_data(self.raw_update_path, update)", "def zero_timings(self):\r\n self.step = 0\r\n self.current_T = 0.0", "def normalize_obs(self):\n def _compute_traj_stats(traj_obs_dict):\n \"\"\"\n Helper function to compute statistics over a single trajectory of observations.\n \"\"\"\n traj_stats = { k : {} for k in traj_obs_dict }\n for k in traj_obs_dict:\n traj_stats[k][\"n\"] = traj_obs_dict[k].shape[0]\n traj_stats[k][\"mean\"] = traj_obs_dict[k].mean(axis=0, keepdims=True) # [1, ...]\n traj_stats[k][\"sqdiff\"] = ((traj_obs_dict[k] - traj_stats[k][\"mean\"]) ** 2).sum(axis=0, keepdims=True) # [1, ...]\n return traj_stats\n\n def _aggregate_traj_stats(traj_stats_a, traj_stats_b):\n \"\"\"\n Helper function to aggregate trajectory statistics.\n See https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm\n for more information.\n \"\"\"\n merged_stats = {}\n for k in traj_stats_a:\n n_a, avg_a, M2_a = traj_stats_a[k][\"n\"], traj_stats_a[k][\"mean\"], traj_stats_a[k][\"sqdiff\"]\n n_b, avg_b, M2_b = traj_stats_b[k][\"n\"], traj_stats_b[k][\"mean\"], traj_stats_b[k][\"sqdiff\"]\n n = n_a + n_b\n mean = (n_a * avg_a + n_b * avg_b) / n\n delta = (avg_b - avg_a)\n M2 = M2_a + M2_b + (delta ** 2) * (n_a * n_b) / n\n merged_stats[k] = dict(n=n, mean=mean, sqdiff=M2)\n return merged_stats\n\n # Run through all trajectories. For each one, compute minimal observation statistics, and then aggregate\n # with the previous statistics.\n ep = self.demos[0]\n obs_traj = {k: self.hdf5_file[\"data/{}/obs/{}\".format(ep, k)][()].astype('float32') for k in self.obs_keys}\n obs_traj = ObsUtils.process_obs(obs_traj)\n merged_stats = _compute_traj_stats(obs_traj)\n print(\"SequenceDataset: normalizing observations...\")\n for ep in LogUtils.custom_tqdm(self.demos[1:]):\n obs_traj = {k: self.hdf5_file[\"data/{}/obs/{}\".format(ep, k)][()].astype('float32') for k in self.obs_keys}\n obs_traj = ObsUtils.process_obs(obs_traj)\n traj_stats = _compute_traj_stats(obs_traj)\n merged_stats = _aggregate_traj_stats(merged_stats, traj_stats)\n\n obs_normalization_stats = { k : {} for k in merged_stats }\n for k in merged_stats:\n # note we add a small tolerance of 1e-3 for std\n obs_normalization_stats[k][\"mean\"] = merged_stats[k][\"mean\"]\n obs_normalization_stats[k][\"std\"] = np.sqrt(merged_stats[k][\"sqdiff\"] / merged_stats[k][\"n\"]) + 1e-3\n return obs_normalization_stats", "def __set_defaults_to_runtime_variables(self) -> None:\n self.current_time_in_eighths = N_EIGHTHS_PER_MEASURE\n self.current_measure_durations = []\n self.past_movements = []\n self.current_motion_start_element = self.counterpoint[0]\n self.is_last_element_consonant = True", "def process(self,timeStamp,values,queueNo):\n\t\tdatain = values[0]\n\t\tcurValue = float(datain)\n\t\tif self.isBaselineRunning:\n\t\t\tself.testMinMax(curValue)\n\t\t\n\t\tif self.hasBaselineEnded:\n\t\t\tscaledValue = self.scale(curValue)\n\t\t\t#print scaledValue\n\t\t\tself.addProcessedValues(scaledValue)", "def __init__(self, total_time, *args, **kwargs):\n super(AdjustingFSNN, self).__init__(*args, **kwargs)\n\n self._adjustment_distribution = [\n (.05, 32, .6),\n (.15, 64, .75),\n (.70, 1024, .98),\n (.10, 4096, .996)\n ]\n self._start_time = None\n self._next_adjustment_index = 0\n\n self._adjustments = [\n (0,\n self._adjustment_distribution[0][1],\n self._adjustment_distribution[0][2])]\n\n time_alloted_so_far = total_time * self._adjustment_distribution[0][0]\n idx = 1\n while idx < len(self._adjustment_distribution):\n self._adjustments.append(\n (time_alloted_so_far,\n self._adjustment_distribution[idx][1],\n self._adjustment_distribution[idx][2]))\n this_allotment = self._adjustment_distribution[idx][0]\n time_alloted_so_far += total_time * this_allotment\n idx += 1", "def run(self):\n self.evaluate()\n self.accumulate()\n self.summarize()", "def do_normal():\n\n tracking = get_tracking()\n for unit in (\"ppm\", \"sec\"):\n\ttunit = unit\n\tif unit == \"sec\":\n\t tunit = \"seconds\"\n\tprint \"multigraph chrony_%s\" % unit\n\tfor key in tracking[tunit].keys():\n\t value = tracking[tunit][key][\"value\"]\n\t print \"%s.value %f\" % (key, float(value))\n\tprint\n return 0", "def AddInfoAfterRecursive(self):\n \n print('Info about channel:' + str(self.sig))\n startpoints = np.uint64(self.AnalysisResults[self.sig]['RoughEventLocations'][:, 0])\n endpoints = np.uint64(self.AnalysisResults[self.sig]['RoughEventLocations'][:, 1])\n localBaseline = self.AnalysisResults[self.sig]['RoughEventLocations'][:, 2]\n localVariance = self.AnalysisResults[self.sig]['RoughEventLocations'][:, 3]\n for (j,k) in enumerate(startpoints): print(\"%10.7f\"% float(startpoints[j]/self.outputsamplerate))\n CusumBaseline=500\n numberofevents = len(startpoints)\n self.AnalysisResults[self.sig]['StartPoints'] = startpoints\n self.AnalysisResults[self.sig]['EndPoints'] = endpoints\n self.AnalysisResults[self.sig]['LocalBaseline'] = localBaseline\n self.AnalysisResults[self.sig]['LocalVariance'] = localVariance\n self.AnalysisResults[self.sig]['NumberOfEvents'] = len(startpoints)\n\n #### Now we want to move the endpoints to be the last minimum for each ####\n #### event so we find all minimas for each event, and set endpoint to last ####\n\n deli = np.zeros(numberofevents)\n dwell = np.zeros(numberofevents)\n limit=500e-6*self.outputsamplerate #0.5 ms\n AllFits={}\n\n for i in range(numberofevents):\n length = endpoints[i] - startpoints[i]\n if length <= limit and length>3:\n # Impulsion Fit to minimal value\n deli[i] = localBaseline[i] - np.min(self.data[self.sig][int(startpoints[i]+1):int(endpoints[i]-1)]) #current drop cuurrent at starting point - current minimal velue\n dwell[i] = (endpoints[i] - startpoints[i]) / self.outputsamplerate #length of event in seconds\n elif length > limit:\n deli[i] = localBaseline[i] - np.mean(self.data[self.sig][int(startpoints[i]+5):int(endpoints[i]-5)])\n dwell[i] = (endpoints[i] - startpoints[i]) / self.outputsamplerate\n # # Cusum Fit\n # sigma = np.sqrt(localVariance[i])\n # delta = 2e-9\n # h = 1 * delta / sigma\n # (mc, kd, krmv) = CUSUM(self.out[self.sig][startpoints[i]-CusumBaseline:endpoints[i]+CusumBaseline], delta, h)\n # zeroPoint = startpoints[i]-CusumBaseline\n # krmv = krmv+zeroPoint+1\n # AllFits['Event' + str(i)] = {}\n # AllFits['Event' + str(i)]['mc'] = mc\n # AllFits['Event' + str(i)]['krmv'] = krmv\n else:\n deli[i] = localBaseline[i] - np.min(self.data[self.sig][startpoints[i]:endpoints[i]])\n dwell[i] = (endpoints[i] - startpoints[i]) / self.outputsamplerate\n\n frac = deli / localBaseline #fraction: current drop / current at start\n dt = np.array(0)\n dt = np.append(dt, np.diff(startpoints) / self.outputsamplerate) # differences between starts of different events (Frequency of events)\n numberofevents = len(dt)\n\n #self.AnalysisResults[self.sig]['CusumFits'] = AllFits\n self.AnalysisResults[self.sig]['FractionalCurrentDrop'] = frac # current drop / current at start \n self.AnalysisResults[self.sig]['DeltaI'] = deli #current drop in nA\n self.AnalysisResults[self.sig]['DwellTime'] = dwell #end[i] - start[i] in sec.\n self.AnalysisResults[self.sig]['Frequency'] = dt # start[i+1] - start[i] in sec.", "def _before_res_compute(abs_data):\n\n data = topi.multiply(abs_data, 1.0 / CONST_LIMIT)\n data_square = mul(data, data, target=utils.CCE)\n before_res = topi.multiply(data_square, ITR_BEFORE[LEN_BEFORE - 1])\n before_res = topi.add(before_res, ITR_BEFORE[LEN_BEFORE - 2])\n for iter_number in ITR_BEFORE[LEN_BEFORE-3::-1]:\n before_res = mul(before_res, data_square, target=utils.CCE)\n before_res = topi.add(before_res, iter_number)\n exp_value = exp(neg(abs_data, target=utils.CCE), target=utils.CCE)\n before_res = mul(before_res, exp_value, target=utils.CCE)\n before_res = mul(before_res, abs_data, target=utils.CCE)\n return before_res", "def _push(self):\n if len(self._stat_now):\n self._stat_now['epoch_num'] = self.epoch_num\n self._stat_now['global_step'] = self.global_step\n\n self._stats.append(self._stat_now)\n self._stat_now = {}\n self._write_stat()", "def _fill_moment_results(self):\n toprocess = [('stock_tom', self.c_stock, 2),\n ('stock_woody', self.c_stock, 3),\n ('stock_non_woody', self.c_stock, 4),\n ('stock_acid', self.c_stock, 5),\n ('stock_water', self.c_stock, 6),\n ('stock_ethanol', self.c_stock, 7),\n ('stock_non_soluble', self.c_stock, 8),\n ('stock_humus', self.c_stock, 9),\n ('change_tom', self.c_change, 2),\n ('change_woody', self.c_change, 3),\n ('change_non_woody', self.c_change, 4),\n ('change_acid', self.c_change, 5),\n ('change_water', self.c_change, 6),\n ('change_ethanol', self.c_change, 7),\n ('change_non_soluble', self.c_change, 8),\n ('change_humus', self.c_change, 9),\n ('co2', self.co2_yield, 2)]\n for (resto, dataarr, dataind) in toprocess:\n # filter time steps\n ts = numpy.unique(dataarr[:,1])\n # extract data for the timestep\n for timestep in ts:\n ind = numpy.where(dataarr[:,1]==timestep)\n mean = stats.mean(dataarr[ind[0], dataind])\n mode_res = stats.mode(dataarr[ind[0], dataind])\n mode = mode_res[0]\n var = stats.var(dataarr[ind[0], dataind])\n skew = stats.skew(dataarr[ind[0], dataind])\n kurtosis = stats.kurtosis(dataarr[ind[0], dataind])\n if var>0.0:\n sd2 = 2 * math.sqrt(var)\n else:\n sd2 = var\n res = [[timestep, mean, mode[0], var, skew, kurtosis,\n mean - sd2, mean + sd2]]\n if resto=='stock_tom':\n self.md.stock_tom = numpy.append(self.md.stock_tom,\n res, axis=0)\n elif resto=='stock_woody':\n self.md.stock_woody = numpy.append(self.md.stock_woody,\n res, axis=0)\n elif resto=='stock_non_woody':\n self.md.stock_non_woody = numpy.append(\\\n self.md.stock_non_woody, res, axis=0)\n elif resto=='stock_acid':\n self.md.stock_acid = numpy.append(self.md.stock_acid,\n res, axis=0)\n elif resto=='stock_water':\n self.md.stock_water = numpy.append(self.md.stock_water,\n res, axis=0)\n elif resto=='stock_ethanol':\n self.md.stock_ethanol = numpy.append(self.md.stock_ethanol,\n res, axis=0)\n elif resto=='stock_non_soluble':\n self.md.stock_non_soluble= numpy.append(\n self.md.stock_non_soluble, res, axis=0)\n elif resto=='stock_humus':\n self.md.stock_humus = numpy.append(self.md.stock_humus,\n res, axis=0)\n elif resto=='change_tom':\n self.md.change_tom = numpy.append(self.md.change_tom,\n res, axis=0)\n elif resto=='change_woody':\n self.md.change_woody = numpy.append(self.md.change_woody,\n res, axis=0)\n elif resto=='change_non_woody':\n self.md.change_non_woody = numpy.append(\\\n self.md.change_non_woody, res, axis=0)\n elif resto=='change_acid':\n self.md.change_acid = numpy.append(self.md.change_acid,\n res, axis=0)\n elif resto=='change_water':\n self.md.change_water = numpy.append(self.md.change_water,\n res, axis=0)\n elif resto=='change_ethanol':\n self.md.change_ethanol = numpy.append(\n self.md.change_ethanol, res, axis=0)\n elif resto=='change_non_soluble':\n self.md.change_non_soluble=numpy.append(\n self.md.change_non_soluble, res, axis=0)\n elif resto=='change_humus':\n self.md.change_humus = numpy.append(self.md.change_humus,\n res, axis=0)\n elif resto=='co2':\n self.md.co2 = numpy.append(self.md.co2, res, axis=0)", "def test_epoch_end(self, outputs: Any) -> None:\n self.log_dict(self.test_metrics.compute())\n self.test_metrics.reset()", "def process_batch(self, batch):\n # shapes are [time, ...original dims...]\n v_global = np.stack(batch[:,0]) # [time, agents, l_state_one_agent]\n # note that *_local objects have shape\n # [time, agents, ...original dim...]\n obs_others = np.stack(batch[:,1]) # [time,agents,h,w,c] or [time, agents, obs_others]\n v_local = np.stack(batch[:,2]) # [time,agents,l]\n actions = np.stack(batch[:,3]) # [time,agents]\n reward = np.stack(batch[:,4]) # [time]\n reward_local = np.stack(batch[:,5]) # [time,agents]\n v_global_next = np.stack(batch[:,6]) # [time, agents, l_state_one_agent]\n obs_others_next = np.stack(batch[:,7]) # [time,agents,h,w,c]\n v_local_next = np.stack(batch[:,8]) # [time,agents,l]\n done = np.stack(batch[:,9]) # [time]\n goals = np.stack(batch[:,10]) # [time, agents, l_goal]\n\n batch = None\n \n n_steps = v_global.shape[0]\n \n # For all global quantities, for each time step,\n # duplicate values <n_agents> times for\n # batch processing of all agents\n reward = np.repeat(reward, self.n_agents, axis=0)\n\n # In-place reshape for *_local quantities,\n # so that one time step for one agent is considered\n # one batch entry\n if self.experiment == 'sumo':\n obs_others.shape = (n_steps*self.n_agents, self.h_obs,\n self.w_obs, self.c_obs)\n obs_others_next.shape = (n_steps*self.n_agents, self.h_obs,\n self.w_obs, self.c_obs)\n elif self.experiment == 'particle':\n obs_others.shape = (n_steps*self.n_agents, self.l_obs_others)\n obs_others_next.shape = (n_steps*self.n_agents, self.l_obs_others)\n v_local.shape = (n_steps*self.n_agents, self.l_obs)\n reward_local.shape = (n_steps*self.n_agents)\n v_local_next.shape = (n_steps*self.n_agents, self.l_obs)\n\n actions_1hot, actions_others_1hot = self.process_actions(n_steps, actions)\n \n return n_steps, v_global, obs_others, v_local, actions_1hot, actions_others_1hot, reward, reward_local, v_global_next, obs_others_next, v_local_next, done, goals", "def reset_epoch_cache(self):\n self.epoch_cache = {\"train\":PerformanceBatch(), \n \"val\":PerformanceBatch(), \n \"test\":PerformanceBatch()}", "def zeroise_results(self):\n \n self.result_total_peak.set(0.0)\n self.result_total_avge.set(0.0)\n self.result_total_sd.set(0.0)\n return", "def _calculate(self):\n source = self.source\n res = {}\n l_cols = [[], [], [], []]\n r_lines = {}\n dateline=None\n ###delete the below code when fetch data from database(assume: data in database has been pretreatment)\n if source[t.ror].min() > -99.0:\n pass\n else:\n source[t.ror] = np.where(\n source[t.ror] > -99.0, source[t.ror], -99.0)\n ###\n for account in self.accounts:\n source_account = source[source[t.account] == account]\n source_account = source_account.reset_index(drop=True)\n dateline=source_account[t.effective_date]\n ror=source_account[t.ror]/100\n returns_cum = ROR.ror_cum_ann(source_account, self.annualized)\n # double_return_cum=round(double_return_cum,2)+1\n returns_cum = returns_cum + 1\n growth_amounts = returns_cum * self.starting_value\n returns_cum, growth_amounts = round(returns_cum - 1, 4), \\\n round(growth_amounts, 2)\n l_cols[0].append(growth_amounts.iloc[-1, 0])#account growth amount\n l_cols[1].append(growth_amounts.iloc[-1, 1])#bench growth amount\n l_cols[2].append(returns_cum.iloc[-1, 0])#account return\n l_cols[3].append(returns_cum.iloc[-1, 1])#bench return\n r_lines[account] = [list(returns_cum.iloc[:,0]), list(growth_amounts.iloc[:, 0]),#list(returns_cum.iloc[:, 0])\n list(growth_amounts.iloc[:, 1])]#account return, account growth amount, bench growth amount\n res['account_vs_benchmark'] = {'xAxis': self.accounts,\n 'series': l_cols}\n res['growth_of_unit'] = {'xAxis': list(dateline),\n 'series': r_lines}\n return res\n # ret_dict = self._ret(accounts, starting_value, source, annualized)\n # return ret_dict", "def update(self):\n #*** Get dictionary of NICs with results from psutil:\n os_net = psutil.net_io_counters(pernic=True)\n #*** Update our variables including delta values:\n for interface in os_net:\n #*** Packets in:\n pkts_in = os_net[interface].packets_recv\n\n #*** Ensure keys in dicts:\n if not interface in self.prev_pkts_in:\n self.prev_pkts_in[interface] = 0\n if not interface in self.delta_pkts_in:\n self.delta_pkts_in[interface] = 0\n\n #*** Calculate difference in packets in:\n if self.prev_pkts_in[interface]:\n self.delta_pkts_in[interface] = \\\n pkts_in - self.prev_pkts_in[interface]\n else:\n self.delta_pkts_in[interface] = 0\n self.prev_pkts_in[interface] = pkts_in\n\n #*** Packets out:\n pkts_out = os_net[interface].packets_sent\n\n #*** Ensure keys in dicts:\n if not interface in self.prev_pkts_out:\n self.prev_pkts_out[interface] = 0\n if not interface in self.delta_pkts_out:\n self.delta_pkts_out[interface] = 0\n\n #*** Calculate difference in packets out:\n if self.prev_pkts_out[interface]:\n self.delta_pkts_out[interface] = \\\n pkts_out - self.prev_pkts_out[interface]\n else:\n self.delta_pkts_out[interface] = 0\n self.prev_pkts_out[interface] = pkts_out\n\n #*** Bytes in:\n bytes_in = os_net[interface].bytes_recv\n\n #*** Ensure keys in dicts:\n if not interface in self.prev_bytes_in:\n self.prev_bytes_in[interface] = 0\n if not interface in self.delta_bytes_in:\n self.delta_bytes_in[interface] = 0\n\n #*** Calculate difference in bytes in:\n if self.prev_bytes_in[interface]:\n self.delta_bytes_in[interface] = \\\n bytes_in - self.prev_bytes_in[interface]\n else:\n self.delta_bytes_in[interface] = 0\n self.prev_bytes_in[interface] = bytes_in\n\n #*** Bytes out:\n bytes_out = os_net[interface].bytes_sent\n\n #*** Ensure keys in dicts:\n if not interface in self.prev_bytes_out:\n self.prev_bytes_out[interface] = 0\n if not interface in self.delta_bytes_out:\n self.delta_bytes_out[interface] = 0\n\n #*** Calculate difference in bytes out:\n if self.prev_bytes_out[interface]:\n self.delta_bytes_out[interface] = \\\n bytes_out - self.prev_bytes_out[interface]\n else:\n self.delta_bytes_out[interface] = 0\n self.prev_bytes_out[interface] = bytes_out", "def stored_reset(self):\r\n\t\tself.stored_reward = np.zeros((self.num_timesteps - self.first_considered_reward_step,))\r\n\t\tself.stored_optimum = np.zeros_like(self.stored_reward)", "def E_step_precompute(self, model_params, my_suff_stat, my_data):", "def update_score(self):\n td = self.created - datetime.datetime(1970, 1, 1)\n epoch_seconds = td.days * 86400 + td.seconds + (float(td.microseconds) / 1000000)\n order = math.log(max(abs(self.points), 1), 10)\n sign = 1 if self.points > 0 else -1 if self.points < 0 else 0\n seconds = epoch_seconds - 1134028003\n self.score = round(order + sign * seconds / 45000, 7)", "def increment(self) -> None:\n self._increment_called = True\n self.append(deepcopy(self._base_metric))", "def _deltas(self):\n istat = self.init\n lstat = self.stats\n uptime = self._uptime()\n delta = float(uptime) - float(self.uptime)\n self.uptime = uptime\n \n for dev in lstat.keys():\n if not istat.has_key(dev):\n del lstat[dev]\n continue\n idev = istat[dev]\n ldev = lstat[dev]\n\n for key,value in ldev.items():\n if re.search(r'(^major\\Z|^minor\\Z)',key):\n continue\n \n if not idev.has_key(key):\n print \"Different keys in statistics\"\n sys.exit(1)\n if not str(value).isdigit and \\\n not str(ldev[key]).isdigit(): \n print \"value of key is not a number\"\n sys.exit(1)\n \n if ldev[key] == idev[key]:\n ldev[key] = self._sprintf('%.2f', 0)\n elif int(delta) > 0:\n ldev[key] = self._sprintf('%.2f',float((ldev[key] - idev[key]) / delta))\n else:\n\t ldev[key] = self._sprintf('%.2f', float(ldev[key] - idev[key]))\n idev[key] = value\n return idev", "def _epoch_before_hook(self):\n self._train_steps_this_epoch = 0", "def finalize(self):\n\t\tif self._sum_factor != 0.0:\n\t\t\tself._last_score = self._current_score / self._sum_factor\n\t\telse:\n\t\t\tself._last_score = 0.0\n\n\t\tself._scores.append(self._last_score)\n\t\tself._scores = self._scores[-self._range[1]:]\n\t\n\t\tself._sum_factor = 0.0\n\t\tself._current_score = 0.0", "def moment_update(model, model_ema, m):\n for p1, p2 in zip(model.parameters(), model_ema.parameters()):\n p2.data.mul_(m).add_(1-m, p1.detach().data)", "def moment_update(model, model_ema, m):\n for p1, p2 in zip(model.parameters(), model_ema.parameters()):\n p2.data.mul_(m).add_(1-m, p1.detach().data)" ]
[ "0.66141224", "0.5771144", "0.57508063", "0.5575068", "0.5574594", "0.55501413", "0.5535069", "0.54281497", "0.53845894", "0.535291", "0.5337103", "0.5333742", "0.5300862", "0.5274768", "0.5269434", "0.52690667", "0.52612674", "0.5259409", "0.52576226", "0.5246559", "0.52445793", "0.52266544", "0.52266544", "0.52198565", "0.5217178", "0.5192304", "0.5188704", "0.5176454", "0.5167211", "0.5157337", "0.51542896", "0.515063", "0.514897", "0.5148494", "0.5137996", "0.51337636", "0.5128662", "0.5126366", "0.5119543", "0.5105233", "0.510163", "0.5096093", "0.5095771", "0.50750273", "0.50694275", "0.5037535", "0.5026349", "0.5025746", "0.502334", "0.5021451", "0.5018278", "0.5013243", "0.49944642", "0.49943978", "0.49936217", "0.49888745", "0.49882707", "0.49875024", "0.49864992", "0.4985423", "0.49824795", "0.49774098", "0.49659845", "0.49630666", "0.49610376", "0.4960813", "0.49583998", "0.49490345", "0.49387595", "0.4920942", "0.49206665", "0.49151126", "0.49110135", "0.490633", "0.49035272", "0.49014455", "0.48983124", "0.48964238", "0.4896249", "0.48909265", "0.48814645", "0.48795128", "0.48778415", "0.4877597", "0.48712832", "0.48706457", "0.4866237", "0.4864773", "0.48583356", "0.48559925", "0.4854537", "0.48466745", "0.48460513", "0.48455322", "0.484266", "0.4840621", "0.48358577", "0.48340118", "0.48339003", "0.48339003" ]
0.75371194
0
Calculates the aggregated metric values based on the current running processes and the historical metric record
def _calculate_aggregated_metrics(self): # using the historical values, calculate the aggregate # there are two kinds of metrics: # a) cumulative metrics - only the delta of the last 2 recorded values is used (eg cpu cycles) # b) absolute metrics - the last absolute value is used running_pids_set = set(self.__pids) for pid, process_metrics in self.__metrics_history.items(): for _metric, _metric_values in process_metrics.items(): if not self.__aggregated_metrics.get(_metric): self.__aggregated_metrics[_metric] = 0 if _metric.is_cumulative: if pid in running_pids_set: if len(_metric_values) > 1: # only report the cumulative metrics for more than one sample self.__aggregated_metrics[_metric] += ( _metric_values[-1] - _metric_values[-2] ) else: if pid in running_pids_set: # absolute metric - accumulate the last reported value self.__aggregated_metrics[_metric] += _metric_values[-1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_metrics(self):\n pass", "def _reset_absolute_metrics(self):\n\n for pid, process_metrics in self.__metrics_history.items():\n for _metric, _metric_values in process_metrics.items():\n if not _metric.is_cumulative:\n self.__aggregated_metrics[_metric] = 0", "def aggregate(global_params, running_aggregate, aggregation_result):\n running_ref = running_aggregate.get_ref('values')\n agg_ref = aggregation_result.get_ref('values')\n for i in range(global_params.dims):\n running_ref[i] += agg_ref[i]\n return running_aggregate", "def calc_stat_values(self):", "def calculate_batch_metrics(self):\n pass", "def collect(self):\n if not self._btime:\n return\n\n try:\n with open(\"/proc/self/stat\", 'rb') as stat:\n parts = (stat.read().split(b')')[-1].split())\n\n self.process_metrics[\"vmem\"].set(\"\", float(parts[20]))\n self.process_metrics[\"rss\"].set(\"\", float(parts[21]) * _PAGESIZE)\n start_time_secs = float(parts[19]) / self._ticks\n self.process_metrics[\"start_time\"].set(\n \"\", start_time_secs + self._btime\n )\n utime = float(parts[11]) / self._ticks\n stime = float(parts[12]) / self._ticks\n self.process_metrics[\"cpu\"].set(\"\", utime + stime)\n except IOError:\n pass\n\n try:\n with open(\"/proc/self/limits\", 'rb') as limits:\n for line in limits:\n if line.startswith(b'Max open file'):\n self.process_metrics[\"max_fds\"].set(\n \"\", float(line.split()[3])\n )\n break\n\n self.process_metrics[\"open_fds\"].set(\n \"\", float(len(os.listdir(\"/proc/self/fd\")))\n )\n except (IOError, OSError):\n pass\n\n # Update gc metrics if enabled.\n if \"collected\" in self.process_metrics:\n for generation, stat in enumerate(gc.get_stats()):\n generation = {\"generation\": str(generation)}\n self.process_metrics[\"collected\"].set(\n generation, stat[\"collected\"]\n )\n self.process_metrics[\"uncollectable\"].set(\n generation, stat[\"uncollectable\"]\n )\n self.process_metrics[\"collections\"].set(\n generation, stat[\"collections\"]\n )", "def calculate(self, data, *args, **kwargs):\n \n # Sets up priority queue, where data is prioritized by date\n queue = []\n \n # Sets up data dictionaries that will be used to contain calculated data\n severity_data = OrderedDict()\n status_data = OrderedDict()\n current_state = { }\n \n # List of fields used\n fields = [PROJECT, TRANS, STATUS, PRIORITY]\n \n # Populates priority queue with appropriate data\n for key, param_data in data.iteritems():\n # Grabs param_data fields\n priority = param_data.get(PRIORITY, None)\n hist = param_data.get(HIST, None)\n proj = param_data.get(PROJECT, self.project)\n \n # Adds the historical statuses of the current JIRA item to the queue\n if (hist):\n for i, date in enumerate(hist[TRANS]):\n heapq.heappush(queue, (date, proj, key, hist[NEW][i], priority))\n \n # Iterates through dates to populate status and severity data dictionaries\n if (queue):\n earliest = queue[0][0]\n for date in get_historical_dates(earliest, self.extraction_day, False):\n # Pops items off queue until queue is empty or date limit is reached\n while(queue and queue[0][0].date() <= date):\n curr, proj, key, status, priority = heapq.heappop(queue)\n \n # Maps the key's current parameters, overwriting previous mapping\n current_state[key] = { }\n for field, value in zip(fields, [proj, curr, status, priority]):\n current_state[key][field] = value\n \n # Sets severity and status metric data at the given date\n severity_data[date] = self._get_severity_data(current_state)\n status_data[date] = self._get_status_data(current_state)\n \n # Gets age data separately from status and severity\n age_map = self._get_average_age_data(data)\n \n return severity_data, status_data, age_map", "def _compute_running_metrics(self,\n model_output: torch.Tensor,\n batch: Tuple[torch.Tensor, torch.Tensor],\n running_metrics: dict) -> None:\n for metric in self.metrics:\n if metric.__name__ == 'word_error_rate' or metric.__name__ == 'character_error_rate':\n metric_result = metric(model_output, batch, self.decoder)\n else:\n metric_result = metric(model_output, batch)\n if type(metric_result) == torch.Tensor:\n metric_result = metric_result.item()\n\n running_metrics[metric.__name__].append(metric_result)", "def calculate_metrics(self):\n self.data_stats = self.sqlContext.read.format(\"org.apache.spark.sql.cassandra\").options(table=self.cassandra_trip_table, keyspace=self.cassandra_keyspace).load()\n self.data_stats = self.data_stats.groupBy(['time_block','day','month','borough_name']).agg(func.avg('num_trips').alias('mean'))", "def accumulateSubgridMassHistory(self,q):\n pass", "def aggregate_stats(self):\n if self.split_bn.track_running_stats:\n (\n self.bn.running_mean.data,\n self.bn.running_var.data,\n ) = self._get_aggregated_mean_std(\n self.split_bn.running_mean,\n self.split_bn.running_var,\n self.num_splits,\n )", "def compute_metrics(self, results: list) -> dict:", "def aggregate_statistics(self, new_stats):\n \n if isinstance(new_stats,RunStatistics):\n new_stats = [new_stats, ]\n elif isinstance(new_stats,list):\n if any(not isinstance(_,RunStatistics) for _ in new_stats):\n raise MadGraph5Error, \"The 'new_stats' argument of the function \"+\\\n \"'updtate_statistics' must be a (possibly list of) \"+\\\n \"RunStatistics instance.\"\n \n keys = set([])\n for stat in [self,]+new_stats:\n keys |= set(stat.keys())\n\n new_stats = new_stats+[self,]\n for key in keys:\n # Define special rules\n if key=='max_precision':\n # The minimal precision corresponds to the maximal value for PREC\n self[key] = min( _[key] for _ in new_stats if key in _)\n elif key=='min_precision':\n # The maximal precision corresponds to the minimal value for PREC\n self[key] = max( _[key] for _ in new_stats if key in _)\n elif key=='averaged_timing':\n n_madloop_calls = sum(_['n_madloop_calls'] for _ in new_stats if\n 'n_madloop_calls' in _)\n if n_madloop_calls > 0 :\n self[key] = sum(_[key]*_['n_madloop_calls'] for _ in \n new_stats if (key in _ and 'n_madloop_calls' in _) )/n_madloop_calls\n else:\n # Now assume all other quantities are cumulative\n self[key] = sum(_[key] for _ in new_stats if key in _)", "def compute(self) -> Any:\n # ddp hotfix, could be done better\n # but metric must handle DDP on it's own\n if self._ddp_backend == \"xla\":\n device = get_device()\n for key in self.statistics:\n key_statistics = torch.tensor([self.statistics[key]], device=device)\n key_statistics = xm.all_gather(key_statistics).sum(dim=0).cpu().numpy()\n self.statistics[key] = key_statistics\n elif self._ddp_backend == \"ddp\":\n for key in self.statistics:\n value: List[np.ndarray] = all_gather(self.statistics[key])\n value: np.ndarray = np.sum(np.vstack(value), axis=0)\n self.statistics[key] = value\n\n per_class, micro, macro, weighted = get_aggregated_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n support=self.statistics[\"support\"],\n zero_division=self.zero_division,\n )\n if self.compute_per_class_metrics:\n return per_class, micro, macro, weighted\n else:\n return [], micro, macro, weighted", "def compute(self) -> Any:\n # ddp hotfix, could be done better\n # but metric must handle DDP on it's own\n if self._ddp_backend == \"xla\":\n device = get_device()\n for key in self.statistics:\n key_statistics = torch.tensor([self.statistics[key]], device=device)\n key_statistics = xm.all_gather(key_statistics).sum(dim=0).cpu().numpy()\n self.statistics[key] = key_statistics\n elif self._ddp_backend == \"ddp\":\n for key in self.statistics:\n value: List[np.ndarray] = all_gather(self.statistics[key])\n value: np.ndarray = np.sum(np.vstack(value), axis=0)\n self.statistics[key] = value\n\n per_class, micro, macro, weighted = get_aggregated_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n support=self.statistics[\"support\"],\n zero_division=self.zero_division,\n )\n if self.compute_per_class_metrics:\n return per_class, micro, macro, weighted\n else:\n return [], micro, macro, weighted", "def record_metrics(self, pid, metrics):\n\n for _metric, _metric_value in metrics.items():\n if not self.__metrics_history[pid].get(_metric):\n self.__metrics_history[pid][_metric] = []\n self.__metrics_history[pid][_metric].append(_metric_value)\n # only keep the last 2 running history for any metric\n self.__metrics_history[pid][_metric] = self.__metrics_history[pid][_metric][\n -2:\n ]", "def compute(self) -> Any:\n per_class, micro, macro, weighted = get_aggregated_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n support=self.statistics[\"support\"],\n zero_division=self.zero_division,\n )\n return per_class, micro, macro, weighted", "def calculate_dataset_metrics(self):\n pass", "def calculate(self):\n avg = self.sum / self.n if self.n != 0 else 0\n self.running_avg.append(avg)\n return avg", "def compute_key_value(self) -> Dict[str, float]:\n # @TODO: ddp hotfix, could be done better\n if self._is_ddp:\n for key in self.statistics:\n value: List[np.ndarray] = all_gather(self.statistics[key])\n value: np.ndarray = np.sum(np.vstack(value), axis=0)\n self.statistics[key] = value\n\n per_class, micro, macro, weighted = self.compute()\n metrics = self._convert_metrics_to_kv(\n per_class=per_class, micro=micro, macro=macro, weighted=weighted\n )\n return metrics", "def read_metric_values(self):\n inv_objs = self._inventory_mgr.current_inventory()\n monitored_metrics = self._metric_mgr.get_monitored_metrics()\n perf_manager = self._si.RetrieveServiceContent().perfManager\n for mor in inv_objs.keys():\n for inv_obj in inv_objs[mor]:\n inv_obj_metrics = inv_obj.metric_id_map\n desired_keys = list(set(inv_obj_metrics.keys()) & set(monitored_metrics[mor].keys()))\n if not len(desired_keys) == 0:\n metric_id_objs = [inv_obj_metrics[key] for key in desired_keys]\n query_spec = vim.PerformanceManager.QuerySpec(\n entity=inv_obj.mor, metricId=metric_id_objs,\n intervalId=inv_obj.INSTANT_INTERVAL,\n maxSample=1, format='normal'\n )\n try:\n results = perf_manager.QueryPerf(querySpec=[query_spec])\n except Exception as e:\n self._logger.error(\"Exception while making performance query : {0}\".format(e))\n if results:\n dps = self._parse_query(inv_obj, results, monitored_metrics[mor])\n payload = self._build_payload(dps)\n self._dispatch_metrics(payload)\n else:\n self._logger.warning(\"Empty result from query : {0}\".format(query_spec))", "def compute_statistics(self):", "def sm_measure_current(self,num_readings=1):\n self.sm.set_measurement_function(\"CURRENT\")\n self.sm.format_readings(\"CURRENT\")\n ret = average(self.sm.take_measurement(num_readings))\n self.sm_restore_display\n return ret", "def prepareAccumulatedMetrics(self):\n displayDF = analyzeMetricsDF(self.resultList)\n displayDF.to_csv(\"data/results.csv\")", "def metrics_group():", "def compute_metrics(self):\n self.finalize_output_dict()\n self.metric_dict = {\n key: value(self.output_dict[\"labels\"], self.output_dict[\"pred_probs\"])\n for key, value in self.metric_fns.items()\n }", "def calculate_agrigate(self):\n self.total = 0.0\n for rec in self.data:\n self.total = self.total + rec[\"value\"]\n\n self.agrigate_data = {\n \"site\": self.site,\n \"utc\": self.timestamp_utc,\n \"local\": self.timestamp_local,\n \"tag\": \"TOTAL\",\n \"value\": round(self.total, 3)}\n self.data.append(self.agrigate_data)", "def calculate(self):\n self.results['max'] = numpy.max(self.data)\n self.results['min'] = numpy.min(self.data)\n if self.type == 0:\n self.group_discrete_data()\n if self.type == 1:\n self.group_continuous_data()\n\n self.results['arithAvg'] = self.average([self.data[i] * self.occurrences[i] for i in range(len(self.data))],\n self.totalOccurrences)\n self.results['quadAvg'] = math.sqrt(\n self.average([(self.data[i] * self.data[i]) * self.occurrences[i] for i in range(len(self.data))],\n self.totalOccurrences))\n if self.results['min'] > 0:\n self.results['geoAvg'] = math.exp(\n self.average([numpy.log(self.data[i]) * self.occurrences[i] for i in range(len(self.data))],\n self.totalOccurrences))\n self.results['harmAvg'] = 1 / self.average(\n [(self.occurrences[i] / self.data[i]) for i in range(len(self.data))],\n self.totalOccurrences)\n else:\n self.results['geoAvg'] = self.results['harmAvg'] = \"N/A\"\n self.results['momentsR'] = self.moments(self.data, self.occurrences, 4)\n self.results['centralMomentsR'] = self.moments([(i - self.results['arithAvg']) for i in self.data],\n self.occurrences, 4)\n self.results['std'] = self.average(\n [self.occurrences[i] * abs(self.data[i] - self.results['arithAvg']) for i in range(len(self.data))],\n self.totalOccurrences)", "def __calculate_agg_shap_scores(self):\n self.agg_stats_timer = SimbaTimer(start=True)\n for clf_state, clf_state_name in zip(range(2), [\"ABSENT\", \"PRESENT\"]):\n self.results = {}\n self.df_save_path = os.path.join(\n self.shap_logs_path,\n \"SHAP_summary_{}_{}_{}.csv\".format(\n self.classifier_name, clf_state_name, self.datetime\n ),\n )\n shap_clf_sliced = self.shap_df[\n self.shap_df[self.classifier_name] == clf_state\n ]\n for feature_category, feature_time_bin in itertools.product(\n self.unique_feature_category_names, self.unique_time_bin_names\n ):\n if feature_category not in self.results.keys():\n self.results[feature_category] = {}\n feature_names_sliced = list(\n self.feature_categories_df.loc[\n :, (feature_category, feature_time_bin)\n ]\n )\n feature_names_sliced = [\n x\n for x in feature_names_sliced\n if str(x) != \"nan\" and x in shap_clf_sliced\n ]\n self.results[feature_category][feature_time_bin] = round(\n shap_clf_sliced[feature_names_sliced].sum(axis=1).mean() * 100, 6\n )\n self.__save_aggregate_scores()\n self.agg_stats_timer.stop_timer()\n self.visualization_timer = SimbaTimer(start=True)\n\n stdout_success(\n msg=f\"Aggregate SHAP statistics saved in {self.shap_logs_path} directory\",\n elapsed_time=self.agg_stats_timer.elapsed_time_str,\n )", "def _calculate(self):\n source = self.source\n res = {}\n l_cols = [[], [], [], []]\n r_lines = {}\n dateline=None\n ###delete the below code when fetch data from database(assume: data in database has been pretreatment)\n if source[t.ror].min() > -99.0:\n pass\n else:\n source[t.ror] = np.where(\n source[t.ror] > -99.0, source[t.ror], -99.0)\n ###\n for account in self.accounts:\n source_account = source[source[t.account] == account]\n source_account = source_account.reset_index(drop=True)\n dateline=source_account[t.effective_date]\n ror=source_account[t.ror]/100\n returns_cum = ROR.ror_cum_ann(source_account, self.annualized)\n # double_return_cum=round(double_return_cum,2)+1\n returns_cum = returns_cum + 1\n growth_amounts = returns_cum * self.starting_value\n returns_cum, growth_amounts = round(returns_cum - 1, 4), \\\n round(growth_amounts, 2)\n l_cols[0].append(growth_amounts.iloc[-1, 0])#account growth amount\n l_cols[1].append(growth_amounts.iloc[-1, 1])#bench growth amount\n l_cols[2].append(returns_cum.iloc[-1, 0])#account return\n l_cols[3].append(returns_cum.iloc[-1, 1])#bench return\n r_lines[account] = [list(returns_cum.iloc[:,0]), list(growth_amounts.iloc[:, 0]),#list(returns_cum.iloc[:, 0])\n list(growth_amounts.iloc[:, 1])]#account return, account growth amount, bench growth amount\n res['account_vs_benchmark'] = {'xAxis': self.accounts,\n 'series': l_cols}\n res['growth_of_unit'] = {'xAxis': list(dateline),\n 'series': r_lines}\n return res\n # ret_dict = self._ret(accounts, starting_value, source, annualized)\n # return ret_dict", "def get_measurements(self):\n metrics = {}\n for key in self.fields.keys():\n metrics[key] = []\n # What's in output:\n # proc_pid date virt res shrd cpu mem power gpus_power\n while not self.queue.empty():\n data = self.queue.get().strip().split()\n for field in self.fields:\n tp = self.fields[field]['type']\n idx = self.fields[field]['index']\n count = self.fields[field]['count']\n if count == -1:\n metrics[field].append(ResourceMonitor.str_to_type(data[idx], tp))\n elif count == 0:\n metrics[field].append([ResourceMonitor.str_to_type(data[idx], tp)])\n else:\n metrics[field].append([\n ResourceMonitor.str_to_type(data[index], tp) for index in xrange(idx, idx+count)\n ])\n return metrics", "def calculate_epoch_metrics(self, val_metrics=False):\n metric_names = self.tracked_metrics()\n\n for metric in metric_names:\n if val_metrics:\n mean_val = np.array(self.metrics_history[f\"val_{metric}\"][\"batch_vals\"]).mean()\n self.metrics_history[f\"val_{metric}\"][\"epoch_vals\"].append(mean_val)\n else:\n mean_val = np.array(self.metrics_history[metric][\"batch_vals\"]).mean()\n self.metrics_history[metric][\"epoch_vals\"].append(mean_val)", "def get_job_metrics_summary_for_task(query):\n metric_list = ['hs06sec', 'gco2_global']\n metrics = {}\n for m in metric_list:\n metrics[m] = {'finished': 0, 'failed': 0, 'total': 0}\n\n hquery = copy.deepcopy(query)\n hquery['jobstatus__in'] = ('finished', 'failed')\n\n if 'jeditaskid' in hquery:\n\n hs06sec_sum = []\n # getting jobs. Can not use the .annotate() as there can be duplicates\n jobs = []\n jvalues = ['pandaid', 'jobstatus', ] + metric_list\n jobs.extend(Jobsarchived4.objects.filter(**hquery).values(*jvalues))\n jobs.extend(Jobsarchived.objects.filter(**hquery).values(*jvalues))\n jobs = drop_duplicates(jobs)\n\n for job in jobs:\n for m in metric_list:\n metrics[m]['total'] += job[m] if m in job and job[m] is not None else 0\n if job['jobstatus'] == 'finished':\n metrics[m]['finished'] += job[m] if m in job and job[m] is not None else 0\n elif job['jobstatus'] == 'failed':\n metrics[m]['failed'] += job[m] if m in job and job[m] is not None else 0\n\n # getting data from ATLARC DB, only hs06s\n pj_models = get_pandajob_arch_models_by_year(query['modificationtime__castdate__range'])\n if len(pj_models) > 0:\n for pjm in pj_models:\n try:\n hs06sec_sum.extend(pjm.objects.filter(**hquery).values('jobstatus').annotate(hs06secsum=Sum('hs06sec')))\n except Exception as ex:\n _logger.exception('Failed to get hs06sec from {} at ATLARC DB:\\n{}'.format(pjm, ex))\n\n if len(hs06sec_sum) > 0:\n for hs in hs06sec_sum:\n metrics['hs06sec']['total'] += hs['hs06secsum'] if hs['hs06secsum'] is not None else 0\n if hs['jobstatus'] == 'finished':\n metrics['hs06sec']['finished'] += hs['hs06secsum'] if hs['hs06secsum'] is not None else 0\n elif hs['jobstatus'] == 'failed':\n metrics['hs06sec']['failed'] += hs['hs06secsum'] if hs['hs06secsum'] is not None else 0\n\n\n return metrics", "def process(self,timeStamp,values,queueNo):\n\t\tdatain = values[0]\n\t\tcurValue = float(datain)\n\t\tif self.isBaselineRunning:\n\t\t\tself.testMinMax(curValue)\n\t\t\n\t\tif self.hasBaselineEnded:\n\t\t\tscaledValue = self.scale(curValue)\n\t\t\t#print scaledValue\n\t\t\tself.addProcessedValues(scaledValue)", "def evaluate(self, data_stream):\n self.initialize_aggregators()\n if self._accumulate_fun is not None:\n for batch in data_stream.get_epoch_iterator(as_dict=True):\n self.process_batch(batch)\n else:\n logger.debug(\n 'Only data independent variables were given,'\n 'will not iterate the over data!')\n\n return self.get_aggregated_values()", "def processStats(self):\n return self._processes.itervalues()", "def test_get_derived_metric_history(self):\n pass", "def calculate(self):\n #runs = [ai\n # for ei in self.experiment_queues\n # for ai in ei.cleaned_automated_runs]\n #\n #ni = len(runs)\n #self.nruns = ni\n # for ei in self.experiment_queues:\n # dur=ei.stats.calculate_duration(ei.cleaned_automated_runs)\n # if\n\n\n tt = sum([ei.stats.calculate_duration(ei.cleaned_automated_runs)\n for ei in self.experiment_queues])\n self._total_time = tt\n offset = 0\n if self._start_time:\n offset = time.time() - self._start_time\n\n self.etf = self.format_duration(tt - offset)", "def apply(self):\n counter = {}\n for act in self.activities:\n freq = []\n for trace in self.log:\n freq.append(len(self.project_trace(trace, [act])))\n if not len(freq) == 0:\n counter[act] = {'sum': sum(freq), 'min': min(freq),\n 'max': max(freq)}\n return counter", "def evaluate_data():\n try:\n # General system related info\n ram = psutil.virtual_memory()\n total_ram = round((ram.total / 1024 / 1024),2)\n free_ram = round((ram.available / 1024 / 1024),2)\n used_ram = round((ram.used / 1024 / 1024),2)\n cpu_total = psutil.cpu_count(logical=True)\n cpu_loadavg = round([x / cpu_total * 100 for x in psutil.getloadavg()][0],2)\n acs_8080 = sp.getoutput(\"netstat -an|grep -c 8080\")\n acs_8181 = sp.getoutput(\"netstat -an|grep -c 8181\")\n acs_8443 = sp.getoutput(\"netstat -an|grep -c 8443\")\n mysql = sp.getoutput(\"netstat -an|grep -c 3306\")\n oracle = sp.getoutput(\"netstat -an|grep -c 1521\")\n logging.info('General system info obtained')\n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)\n # Process specific details\n try:\n iis_pid = SystemInformation.get_pid(\"w3wp.exe\")\n iis_ram = SystemInformation.get_ram_usage(iis_pid)\n iis_cpu = SystemInformation.get_cpu_usage(iis_pid)\n java_pid = SystemInformation.get_pid(\"java.exe\")\n java_ram = SystemInformation.get_ram_usage(java_pid)\n java_cpu = SystemInformation.get_cpu_usage(java_pid)\n mysqld_pid = SystemInformation.get_pid(\"mysqld.exe\")\n mysqld_ram = SystemInformation.get_ram_usage(mysqld_pid) \n mysqld_cpu = SystemInformation.get_cpu_usage(mysqld_pid)\n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)\n\n try:\n dictionary = {}\n now = datetime.datetime.now()\n timestampt = now.strftime(\"%Y-%m-%d-%H:%M:%S\")\n fieldnames = ['timestampt','total_ram','free_ram','used_ram','cpu_total','cpu_loadavg','acs_8080','acs_8181','acs_8443','mysql','oracle','iis_ram','iis_cpu','java_ram','java_cpu','mysqld_ram','mysqld_cpu']\n for var in fieldnames:\n dictionary[var] = eval(var)\n \n logging.info('Data for report generated')\n return dictionary\n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)", "def calculate_times(log):\n log['processing_time'] = 0\n log['multitasking'] = 0\n log = log.to_dict('records')\n log = sorted(log, key=lambda x: (x['source'], x['caseid']))\n for _, group in itertools.groupby(log, key=lambda x: (x['source'], x['caseid'])):\n events = list(group)\n events = sorted(events, key=itemgetter('start_timestamp'))\n for i in range(0, len(events)):\n # In one-timestamp approach the first activity of the trace\n # is taken as instantsince there is no previous timestamp\n # to find a range\n dur = (events[i]['end_timestamp'] -\n events[i]['start_timestamp']).total_seconds()\n if i == 0:\n wit = 0\n else:\n wit = (events[i]['start_timestamp'] -\n events[i-1]['end_timestamp']).total_seconds()\n events[i]['waiting_time'] = wit if wit >= 0 else 0\n events[i]['processing_time'] = dur\n return pd.DataFrame.from_dict(log)", "def compute(self) -> Dict[str, np.ndarray]:\n return {name: self.metrics[name].compute() for name in self.metrics}", "def accumulateSubgridMassHistory(self,q):\n if self.trackSubScales:\n for ci in range(self.nc):\n self.subgridTmp[ci][:] = self.subgridError_last[ci]\n dt = self.timeIntegration.dt\n assert dt > 0.0\n dtInv = old_div(1.0,dt)\n self.subgridTmp[ci] *= dtInv\n self.subgridTmp[ci] *= self.subgridErrorMassCoef_last[ci]#decide how to approximate\n logEvent(\"ADR trackSubScales accumulating delta u^n.abs.max= %s dm.max=%s \" % (max(numpy.absolute(self.subgridTmp[ci].flat)),\n max(numpy.absolute(self.subgridErrorMassCoef_last[ci].flat))),10)\n\n q[('mt',ci)] -= self.subgridTmp[ci]", "def run(self):\n self.evaluate()\n self.accumulate()\n self.summarize()", "def metrics(self):\n raise NotImplementedError(\"metrics\")", "def accumulateSubgridMassHistory(self,q):\n if self.trackSubScales:\n for ci in range(1,self.nc):\n self.subgridTmp[ci][:] = self.subgridError_last[ci]\n dt = self.timeIntegration.dt\n assert dt > 0.0\n dtInv = old_div(1.0,dt)\n self.subgridTmp[ci] *= dtInv\n self.subgridTmp[ci] *= self.subgridErrorMassCoef_last[ci]#decide how to approximate\n logEvent(\"NS_ASGS trackSubScales accumulating delta u^n ci=%s .abs.max= %s dm.max=%s \" % (ci,max(numpy.absolute(self.subgridTmp[ci].flat)),\n max(numpy.absolute(self.subgridErrorMassCoef_last[ci].flat))),1)\n\n q[('mt',ci)] -= self.subgridTmp[ci]", "def _compute_global_stats():\n global_stats = []\n \n wmt16_group = Group.objects.filter(name='WMT16')\n wmt16_users = _get_active_users_for_group(wmt16_group)\n \n # Check how many HITs have been completed. We now consider a HIT to be\n # completed once it has been annotated by one or more annotators.\n #\n # Before we required `hit.users.count() >= 3` for greater overlap.\n hits_completed = HIT.objects.filter(mturk_only=False, completed=True).count()\n \n # Check any remaining active HITs which are not yet marked complete.\n for hit in HIT.objects.filter(active=True, mturk_only=False, completed=False):\n if hit.users.count() >= 1:\n hits_completed = hits_completed + 1\n hit.completed = True\n hit.save()\n \n # Compute remaining HITs for all language pairs.\n hits_remaining = HIT.compute_remaining_hits()\n \n # Compute number of results contributed so far.\n ranking_results = RankingResult.objects.filter(\n item__hit__completed=True, item__hit__mturk_only=False)\n \n from math import factorial\n system_comparisons = 0\n for result in ranking_results:\n result.reload_dynamic_fields()\n # TODO: this implicitly counts A=B comparisons for multi systems.\n # Basically, inflating the number of pairwise comparisons... Fix!\n combinations = factorial(result.systems)/(factorial(result.systems-2) * 2) if result.systems > 2 else 0\n system_comparisons = system_comparisons + combinations\n \n # Aggregate information about participating groups.\n groups = set()\n for user in wmt16_users:\n for group in _identify_groups_for_user(user):\n groups.add(group)\n \n # Compute average/total duration over all results.\n durations = RankingResult.objects.all().values_list('duration', flat=True)\n total_time = sum([datetime_to_seconds(x) for x in durations])\n avg_time = total_time / float(hits_completed or 1)\n avg_user_time = total_time / float(3 * hits_completed or 1)\n \n global_stats.append(('Users', len(wmt16_users)))\n global_stats.append(('Groups', len(groups)))\n global_stats.append(('HITs completed', '{0:,}'.format(hits_completed)))\n global_stats.append(('HITs remaining', '{0:,}'.format(hits_remaining)))\n global_stats.append(('Ranking results', '{0:,}'.format(ranking_results.count())))\n global_stats.append(('System comparisons', '{0:,}'.format(system_comparisons)))\n global_stats.append(('Average duration (per HIT)', seconds_to_timedelta(avg_time)))\n global_stats.append(('Average duration (per task)', seconds_to_timedelta(avg_user_time)))\n global_stats.append(('Total duration', seconds_to_timedelta(total_time)))\n \n # Create new status data snapshot\n TimedKeyValueData.update_status_if_changed('users', str(len(wmt16_users)))\n TimedKeyValueData.update_status_if_changed('groups', str(len(groups)))\n TimedKeyValueData.update_status_if_changed('hits_completed', str(hits_completed))\n TimedKeyValueData.update_status_if_changed('hits_remaining', str(hits_remaining))\n TimedKeyValueData.update_status_if_changed('ranking_results', str(ranking_results.count()))\n TimedKeyValueData.update_status_if_changed('system_comparisons', str(system_comparisons))\n TimedKeyValueData.update_status_if_changed('duration_per_hit', str(seconds_to_timedelta(avg_time)))\n TimedKeyValueData.update_status_if_changed('duration_per_task', str(seconds_to_timedelta(avg_user_time)))\n TimedKeyValueData.update_status_if_changed('duration_total', str(seconds_to_timedelta(total_time)))\n \n return global_stats", "def compute(self) -> Tuple[float, float, float]:\n # ddp hotfix, could be done better\n # but metric must handle DDP on it's own\n if self._ddp_backend == \"xla\":\n self.statistics = {\n k: xm.mesh_reduce(k, v, np.sum) for k, v in self.statistics.items()\n }\n elif self._ddp_backend == \"ddp\":\n for key in self.statistics:\n value: List[int] = all_gather(self.statistics[key])\n value: int = sum(value)\n self.statistics[key] = value\n\n precision_value, recall_value, f1_value = get_binary_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n zero_division=self.zero_division,\n )\n return precision_value, recall_value, f1_value", "def calculate_metrics(jobs, metrics_names):\n metrics_def_dict = {mn: {'metric': mn.split('_')[0], 'agg': mn.split('_')[1], 'data': [], 'value': -1} for mn in metrics_names}\n\n for job in jobs:\n if job['category'] == 'run' and job['jobstatus'] == 'finished':\n for mn, mdata in metrics_def_dict.items():\n if 'per' in mdata['metric']:\n if mdata['metric'].split('per')[0] in job and mdata['metric'].split('per')[1] in job and job[mdata['metric'].split('per')[1]] > 0:\n mdata['data'].append(job[mdata['metric'].split('per')[0]]/(1.0*job[mdata['metric'].split('per')[1]]))\n elif mdata['metric'] in job and job[mdata['metric']]:\n mdata['data'].append(job[mdata['metric']])\n\n for mn, mdata in metrics_def_dict.items():\n if 'avg' in mdata['agg']:\n mdata['value'] = sum(mdata['data'])/(1.0*len(mdata['data'])) if len(mdata['data']) > 0 else -1\n if 'sum' in mdata['agg']:\n mdata['value'] = sum(mdata['data'])\n\n metrics = {}\n for mn, mdata in metrics_def_dict.items():\n if mdata['value'] > 0:\n if 'percent' in mdata['agg']:\n metrics[mn] = round(mdata['value'] * 100.0, 2)\n else:\n metrics[mn] = round(mdata['value'], 2)\n\n return metrics", "def accumulateSubgridMassHistory(self,q):\n if self.trackSubScales:\n for ci in range(self.nc):\n self.subgridTmp[ci][:] = self.subgridError_last[ci]\n #would be nice to have dt^{n+1} alone\n dt = self.timeIntegration.dt\n assert dt > 0.0\n dtInv = old_div(1.0,dt)\n self.subgridTmp[ci] *= dtInv\n self.subgridTmp[ci] *= self.subgridErrorMassCoef_last[ci]#figure this out\n #mwf debug\n logEvent(\"HaukeSangalliTrackSubScales accumulating delta u^n.abs.max= %s dm.max=%s \" % (max(numpy.absolute(self.subgridTmp[ci].flat)),max(numpy.absolute(self.subgridErrorMassCoef_last[ci].flat))),1)\n #mwf should be\n q[('mt',ci)] -= self.subgridTmp[ci]\n #don't think this matters right now because called after calculateSubgridError\n self.subgridTmp_ip[ci][:] = self.subgridError_ip_last[ci]\n self.subgridTmp_ip[ci] *= dtInv\n self.subgridTmp_ip[ci] *= self.subgridErrorMassCoef_ip_last[ci]#figure this out\n self.cip[('mt',ci)] -= self.subgridTmp_ip[ci]", "def calculate_metrics(metrics_data: List[Tuple[Metric, DataType]]) -> List[float]:\n pass", "def aggregate(self):\n data_to_track = {}\n for possession in self.possessions_to_track_aggregate:\n data_to_track[possession] = self._haves[possession]\n\n for variable in self.variables_to_track_aggregate:\n try:\n data_to_track[variable] = self.__dict__[variable]\n except KeyError:\n pass\n self.database_connection.put([\"aggregate\",\n data_to_track,\n self.group,\n self.round])", "def _get_eval_metric(self):\n raise NotImplementedError", "def _getMetrics(self):\n metric = None\n if self.metrics is not None:\n metric = self.metrics(self._currentRecordIndex+1)\n elif self.metricValue is not None:\n metric = self.metricValue\n else:\n raise RuntimeError('No metrics or metric value specified for dummy model')\n\n return {self._optimizeKeyPattern:metric}", "def compute_values(self, update_statistics=False):\n\n self.compute_iterations()\n self.axsec = sum([one.axsec for one in self])\n self.xsec = sum([one.xsec for one in self])\n self.xerrc = sum([one.xerrc for one in self])\n self.xerru = math.sqrt(sum([one.xerru**2 for one in self]))\n\n self.nevents = sum([one.nevents for one in self])\n self.nw = sum([one.nw for one in self])\n self.maxit = len(self.yerr_iter) # \n self.nunwgt = sum([one.nunwgt for one in self]) \n self.wgt = 0\n self.luminosity = min([0]+[one.luminosity for one in self])\n if update_statistics:\n self.run_statistics.aggregate_statistics([_.run_statistics for _ in self])", "def _summarize_results(df, metrics):\n def return_cm(x):\n if isinstance(x, int):\n return (0, 0, 0)\n\n elif len(x) > 3:\n return x[1:]\n\n return x\n\n def get_status(x):\n return {\n \"OK\": 0,\n \"ERROR\": 1\n }[x]\n\n df['status'] = df['status'].apply(get_status)\n df['confusion_matrix'] = df['confusion_matrix'].apply(ast.literal_eval)\n df['confusion_matrix'] = df['confusion_matrix'].apply(return_cm)\n df[['fp', 'fn', 'tp']] = pd.DataFrame(df['confusion_matrix'].tolist(), index=df.index)\n\n # calculate f1 score\n df_ = df.groupby(['dataset', 'pipeline'])[['fp', 'fn', 'tp']].sum().reset_index()\n\n precision = df_['tp'] / (df_['tp'] + df_['fp'])\n recall = df_['tp'] / (df_['tp'] + df_['fn'])\n df_['f1'] = 2 * (precision * recall) / (precision + recall)\n\n result = dict()\n\n # number of wins over ARIMA\n arima_pipeline = 'arima'\n intermediate = df_.set_index(['pipeline', 'dataset'])['f1'].unstack().T\n arima = intermediate.pop(arima_pipeline)\n\n result['# Wins'] = (intermediate.T > arima).sum(axis=1)\n result['# Wins'][arima_pipeline] = None\n\n # number of anomalies detected\n result['# Anomalies'] = df_.groupby('pipeline')[['tp', 'fp']].sum().sum(axis=1).to_dict()\n\n # average f1 score\n result['Average F1 Score'] = df_.groupby('pipeline')['f1'].mean().to_dict()\n\n # failure rate\n result['Failure Rate'] = df.groupby(\n ['dataset', 'pipeline'])['status'].mean().unstack('pipeline').T.mean(axis=1)\n\n result = pd.DataFrame(result)\n result.index.name = 'pipeline'\n result.reset_index(inplace=True)\n\n rank = 'Average F1 Score'\n result = _sort_leaderboard(result, rank, metrics)\n result = result.drop('rank', axis=1).set_index('pipeline')\n\n return result", "def get_job_tick_stats(self, job_origin_id):", "def compute(self) -> Tuple[float, float, float]:\n # @TODO: ddp hotfix, could be done better\n if self._is_ddp:\n for key in self.statistics:\n value: List[float] = all_gather(self.statistics[key])\n value: float = sum(value)\n self.statistics[key] = value\n\n precision_value, recall_value, f1_value = get_binary_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n zero_division=self.zero_division,\n )\n return precision_value, recall_value, f1_value", "def calculate(cls, data_hist, params):\n raise NotImplementedError(\"This is an abstract function that needs to be implemented for each value function\")", "def _eval_graph(self, context, sampled_rate=None, cached_id=0):\n results, names = context.run_eval_graph(sampled_rate, cached_id)\n metric = np.mean(results[list(names).index(self.metric_name)])\n return metric", "def collect_metrics(params, start, uuid=str(uuid4())):\n list_of_metrics = ['AWS/EC2/CPUUtilization',\n 'CGCloud/MemUsage',\n 'CGCloud/DiskUsage_mnt_ephemeral',\n 'CGCloud/DiskUsage_root',\n 'AWS/EC2/NetworkIn',\n 'AWS/EC2/NetworkOut',\n 'AWS/EC2/DiskWriteOps',\n 'AWS/EC2/DiskReadOps']\n\n ids = get_instance_ids(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-worker')\n\n while ids:\n # metrics = {metric: [] for metric in list_of_metrics}\n for instance_id in ids:\n for metric in list_of_metrics:\n averages = []\n try:\n s = start\n while s < stop:\n e = s + (4 * 24 * 3600)\n aws_start = datetime.utcfromtimestamp(s)\n aws_stop = datetime.utcfromtimestamp(e)\n met_object = get_metric(metric, instance_id, aws_start, aws_stop)\n averages.extend([x['Average'] for x in get_datapoints(met_object)])\n s = e\n if averages:\n metrics[metric].append(averages)\n logging.info('# of Datapoints for metric {} is {}'.format(metric, len(metrics[metric][0])))\n except RuntimeError:\n if instance_id in instance_ids:\n instance_ids.remove(instance_id)\n # Remove metrics if no datapoints were collected\n metrics = dict((k, v) for k, v in metrics.iteritems() if v)\n # Save CSV of data\n mkdir_p('{}_{}'.format(uuid, str(datetime.utcnow()).split()[0]))\n for metric in metrics:\n with open('{}_{}/{}.csv'.format(uuid, str(datetime.utcnow()).split()[0], metric.rsplit('/', 1)[1]), 'wb') as f:\n writer = csv.writer(f)\n writer.writerows(metrics[metric])", "def get_accumulated_data(self, topic, start_time, end_time, units):\n ignore_start_time = self._get_value('ignore_start_time', topic)\n ignore_end_time = self._get_value('ignore_end_time', topic)\n adjust_start_time = self._get_value('adjust_start_time', topic)\n adjust_end_time = self._get_value('adjust_end_time', topic)\n\n if ignore_start_time:\n self.logger.debug(\"Service ignoring start time.\")\n start_ts = self.peek_datetime(topic) - adjust_start_time\n else:\n start_ts = start_time - adjust_start_time\n\n if ignore_end_time:\n self.logger.debug(\"Service ignoring end time.\")\n end_ts = self.peek_last_datetime(topic) + adjust_end_time\n else:\n end_ts = end_time + adjust_end_time\n\n self.logger.debug(\"Service processing interval: %f %f\" %(start_ts, end_ts))\n accumulator = weewx.accum.Accum(weeutil.weeutil.TimeSpan(start_ts, end_ts))\n\n for data in self.get_data(topic, end_ts):\n if data:\n try:\n self.logger.debug(\"Service data to accumulate: %s %s\"\n % (weeutil.weeutil.timestamp_to_string(data['dateTime']), to_sorted_string(data)))\n accumulator.addRecord(data)\n except weewx.accum.OutOfSpan:\n self.logger.info(\"Service ignoring record outside of interval %f %f %f %s\"\n %(start_ts, end_ts, data['dateTime'], (to_sorted_string(data))))\n else:\n break\n\n target_data = {}\n if not accumulator.isEmpty:\n aggregate_data = accumulator.getRecord()\n self.logger.debug(\"Service data prior to conversion is: %s %s\"\n % (weeutil.weeutil.timestamp_to_string(aggregate_data['dateTime']), to_sorted_string(aggregate_data)))\n target_data = weewx.units.to_std_system(aggregate_data, units)\n self.logger.debug(\"Service data after to conversion is: %s %s\"\n % (weeutil.weeutil.timestamp_to_string(target_data['dateTime']), to_sorted_string(target_data)))\n else:\n self.logger.debug(\"Dervice queue was empty\")\n\n # Force dateTime to packet's datetime so that the packet datetime is not updated to the MQTT datetime\n if ignore_end_time:\n target_data['dateTime'] = end_time\n\n return target_data", "def aggregate(all_metrics, reducer, suffix):\n # Collect metric separately\n separated_metrics = {} # type: dict[frozenset, list[dict]]\n for el in all_metrics:\n key = frozenset(el[\"metric\"][\"dimensions\"].items())\n if key not in separated_metrics:\n separated_metrics[key] = [el]\n else:\n separated_metrics[key].append(el)\n\n # Collect all dimensions\n dims = {}\n for metric_dims in separated_metrics.keys():\n for prop, val in dict(metric_dims).iteritems():\n if prop in dims:\n dims[prop].add(val)\n else:\n dims[prop] = set(val)\n\n # Sort each metric\n for _, metric in separated_metrics.iteritems():\n metric.sort(key=lambda v: v[\"metric\"][\"timestamp\"])\n\n separated_metrics = sorted(separated_metrics.values(), key=len)\n separated_metrics.reverse()\n\n # Compute the new values\n new_values = []\n all_timestamps = map(\n lambda l: map(\n lambda x: x[\"metric\"][\"timestamp\"], l),\n separated_metrics)\n metric_count = len(separated_metrics)\n for index in range(0, len(separated_metrics[0])):\n new_value = reducer[0](\n separated_metrics[0][index][\"metric\"][\"value\"],\n metric_count)\n new_timestamp = separated_metrics[0][index][\"metric\"][\"timestamp\"]\n for metric_index in range(1, metric_count):\n new_value = reducer[1](new_value, helpers.interpolate(\n new_timestamp,\n separated_metrics[metric_index],\n all_timestamps[metric_index]\n ), metric_count)\n new_values.append((new_timestamp, new_value))\n\n # Aggregate the other details:\n metric_name = separated_metrics[0][0][\"metric\"][\"name\"] + suffix\n meta = separated_metrics[0][0][\"meta\"]\n new_metrics = [\n helpers.create_agg_metric(\n metric_name,\n meta,\n dims,\n val[0],\n val[1]\n ) for val in new_values\n ]\n return new_metrics", "def get_post_stats(self):\n stats = self.stats\n stats.results = self.job.result().get_counts(stats.iteration)\n stats.datetime = str(datetime.now())", "def metrics(env):\n envs = environments()\n check_env(env, envs)\n\n metrics = get_or_abort(puppetdb._query, 'mbean')\n return render_template('metrics.html',\n metrics=sorted(metrics.keys()),\n envs=envs,\n current_env=env)", "def process_and_write_aggregate_results(\n aggregate_metrics: List[Dict],\n aggregate_stats: List[Dict],\n configuration: Dict,\n args: argparse.Namespace,\n dataset_id: str,\n) -> None:\n (\n averaged_metrics,\n averaged_stats,\n ) = fanatic.metrics.average_metrics_stats_from_seed_runs(aggregate_metrics, aggregate_stats)\n\n fanatic.output.save_averaged_results(averaged_metrics, averaged_stats, configuration, args, dataset_id)\n\n final_metric = averaged_metrics[\"ami\"][\"mean\"]\n logger.info(f\"For dataset_id={dataset_id} final averaged ami metric={final_metric}\")", "def emit_metrics(self):\n parse_time = time.perf_counter() - self._parsing_start_time\n Stats.gauge(\"dag_processing.total_parse_time\", parse_time)\n Stats.gauge(\"dagbag_size\", sum(stat.num_dags for stat in self._file_stats.values()))\n Stats.gauge(\n \"dag_processing.import_errors\", sum(stat.import_errors for stat in self._file_stats.values())\n )", "def get_state(self, duration):\n metrics = []\n\n if duration:\n for count_key in self.kv_counts:\n metrics.append(\n MetricObject(\n count_key,\n self.kv_counts[count_key] / duration\n )\n )\n\n for time_key in self.kv_times:\n values = self.kv_times[time_key]['values']\n unit = self.kv_times[time_key]['unit']\n\n metrics.append(\n MetricObject(\n '.'.join([time_key, 'mean']),\n stats_helper.find_mean(values),\n unit\n )\n )\n\n metrics.append(\n MetricObject(\n '.'.join([time_key, 'median']),\n stats_helper.find_median(values),\n unit\n )\n )\n\n for pct in self.percentiles:\n metrics.append(\n MetricObject(\n '.'.join([time_key, \"%sth_percentile\" % pct]),\n stats_helper.find_percentile(values, int(pct)),\n unit\n )\n )\n\n return metrics", "def compute_huawei_4g_value_counts(self):\n\n tech = '4G'\n\n # List of parameters to ignore\n ignore_list = ['LOADID', 'VARDATE', 'DATETIME', 'REGION', 'NENAME', 'CELLID', 'ID', 'FILENAME', 'TECHNOLOGY', 'VENDOR', 'VERSION', 'NETYPE', 'CELLNAME']\n\n self.logger.info(\"Processing Huawei baseline for {}...\".format(tech))\n\n # Get list of mos configured in process_config\n result = self.engine.execute(text(\"SELECT * FROM baseline.process_config WHERE process = true AND technology = :tech AND vendor = :vendor\"), tech=tech, vendor='HUAWEI')\n for row in result:\n vendor = row['vendor']\n technology = row['technology']\n mo = row['mo']\n\n self.logger.info(\"vendor:{}, technology:{}, mo:{}\".format(vendor, technology, mo))\n\n # Get field names from information_schema\n field_qry = \"\"\"\n SELECT t1.column_name as field \n FROM\n information_schema.columns t1\n LEFT JOIN baseline.parameter_ignore_list t2 \n ON t1.table_name = t2.mo\n AND t1.column_name = t2.parameter\n WHERE \n table_schema = 'huawei_cm'\n AND table_name = :mo\n AND t2.parameter is NULL\n AND UPPER(t1.column_name) NOT IN ('{}')\n \"\"\".format(\"','\".join(ignore_list))\n\n field_result = self.engine.execute(text(field_qry), mo=mo)\n\n # self.logger.info([row[0] for row in field_result])\n\n # self.logger.info(field_qry)\n\n for f in field_result:\n parameter = f[0]\n\n self.logger.info(\"Processing baseline for {}.{}...\".format(mo, parameter))\n\n value_qry = \"\"\"\n INSERT INTO baseline.parameter_value_counts\n (date_time, vendor, nename, mo, parameter, pvalue, occurence)\n SELECT \n MAX(t1.\"DATETIME\") AS date_time,\n 'HUAWEI' as vendor,\n t4.\"TAC\" AS nename,\n '{0}' AS mo,\n '{1}' AS parameter,\n t1.\"{1}\" AS pvalue,\n COUNT(t1.\"{1}\") AS occurence\n FROM huawei_cm.\"{0}\" t1\n INNER JOIN cm_loads t5 on t5.pk = t1.\"LOADID\"\n INNER JOIN huawei_cm.\"CELL\" t2\n ON t2.\"CELLID\" = t1.\"LOCALCELLID\"\n AND t2.\"LOADID\" = t1.\"LOADID\"\n INNER JOIN huawei_cm.\"ENODEBFUNCTION\" t3 \n ON t3.\"ENODEBFUNCTIONNAME\" = t2.\"ENODEBFUNCTIONNAME\"\n AND t3.\"LOADID\" = t1.\"LOADID\"\n INNER JOIN huawei_cm.\"CNOPERATORTA\" t4 \n ON t4.\"ENODEBFUNCTIONNAME\" = t3.\"ENODEBFUNCTIONNAME\"\n AND t4.\"LOADID\" = t1.\"LOADID\"\n\n WHERE \n t1.\"{1}\" IS NOT NULL\n AND t5.is_current_load = true\n GROUP BY t4.\"TAC\", t1.\"{1}\"\n ON CONFLICT ON CONSTRAINT uq_parameter_value_counts\n DO NOTHING\n \"\"\".format(mo, parameter)\n\n # self.logger.info(value_qry)\n try:\n self.engine.execute(text(value_qry))\n except Exception as e:\n self.logger.error(str(e))", "def get_progress_dict(self, *, global_step: int):\n res = dict(global_step=f\"{global_step :8,}\")\n\n for k in self.progress_indicators:\n if k in self.queues:\n if len(self.queues[k]) == 0:\n continue\n v = np.mean(self.queues[k])\n elif k in self.histograms:\n if len(self.histograms[k]) == 0:\n continue\n v = np.mean(self.histograms[k])\n else:\n if len(self.scalars[k]) == 0:\n continue\n v = np.mean(self.scalars[k])\n\n res[k] = f\"{v :8,.2f}\"\n\n return res", "def co_average_metrics_fx():\r\n co_inv_overall_28d_list = []\r\n co_inv_50k_28d_list = []\r\n co_inv_100k_28d_list = []\r\n co_inv_manager_28d_list = []\r\n co_inv_sales_28d_list = []\r\n co_inv_key_roles_28d_list = []\r\n co_inv_it_28d_list = []\r\n co_inv_hourly_28d_list = []\r\n\r\n co_fut_cost_overall_28d_list = []\r\n co_fut_cost_50k_28d_list = []\r\n co_fut_cost_100k_28d_list = []\r\n co_fut_cost_manager_28d_list = []\r\n co_fut_cost_sales_28d_list = []\r\n co_fut_cost_key_roles_28d_list = []\r\n co_fut_cost_it_28d_list = []\r\n co_fut_cost_hourly_28d_list = []\r\n\r\n for x in range(len(start_list)):\r\n a = start_list[x]\r\n b = end_list[x]\r\n\r\n co_inv_overall_28d_values = df_co_metrics.iloc[a:b, df_co_metrics.columns.get_loc('co_inv_overall')].rolling(window=28).mean()\r\n co_inv_50k_28d_values = df_co_metrics.iloc[a:b, df_co_metrics.columns.get_loc('co_inv_50k')].rolling(window=28).mean()\r\n co_inv_100k_28d_values = df_co_metrics.iloc[a:b, df_co_metrics.columns.get_loc('co_inv_100k')].rolling(window=28).mean()\r\n co_inv_manager_28d_values = df_co_metrics.iloc[a:b,df_co_metrics.columns.get_loc('co_inv_manager')].rolling(window=28).mean()\r\n co_inv_sales_28d_values = df_co_metrics.iloc[a:b, df_co_metrics.columns.get_loc('co_inv_sales')].rolling(window=28).mean()\r\n co_inv_key_roles_28d_values = df_co_metrics.iloc[a:b, df_co_metrics.columns.get_loc('co_inv_key_roles')].rolling(window=28).mean()\r\n co_inv_it_28d_values = df_co_metrics.iloc[a:b, df_co_metrics.columns.get_loc('co_inv_it')].rolling(window=28).mean()\r\n co_inv_hourly_28d_values = df_co_metrics.iloc[a:b, df_co_metrics.columns.get_loc('co_inv_hourly')].rolling(window=28).mean()\r\n\r\n co_fut_cost_overall_28d_values = df_co_metrics.iloc[a:b, df_co_metrics.columns.get_loc('co_fut_cost_overall')].rolling(window=28).mean()\r\n co_fut_cost_50k_28d_values = df_co_metrics.iloc[a:b, df_co_metrics.columns.get_loc('co_fut_cost_50k')].rolling(window=28).mean()\r\n co_fut_cost_100k_28d_values = df_co_metrics.iloc[a:b, df_co_metrics.columns.get_loc('co_fut_cost_100k')].rolling(window=28).mean()\r\n co_fut_cost_manager_28d_values = df_co_metrics.iloc[a:b, df_co_metrics.columns.get_loc('co_fut_cost_manager')].rolling(window=28).mean()\r\n co_fut_cost_sales_28d_values = df_co_metrics.iloc[a:b, df_co_metrics.columns.get_loc('co_fut_cost_sales')].rolling(window=28).mean()\r\n co_fut_cost_key_roles_28d_values = df_co_metrics.iloc[a:b, df_co_metrics.columns.get_loc('co_fut_cost_key_roles')].rolling(window=28).mean()\r\n co_fut_cost_it_28d_values = df_co_metrics.iloc[a:b, df_co_metrics.columns.get_loc('co_fut_cost_it')].rolling(window=28).mean()\r\n co_fut_cost_hourly_28d_values = df_co_metrics.iloc[a:b, df_co_metrics.columns.get_loc('co_fut_cost_hourly')].rolling(window=28).mean()\r\n\r\n co_inv_overall_28d_list.append(co_inv_overall_28d_values)\r\n co_inv_50k_28d_list.append(co_inv_50k_28d_values)\r\n co_inv_100k_28d_list.append(co_inv_100k_28d_values)\r\n co_inv_manager_28d_list.append(co_inv_manager_28d_values)\r\n co_inv_sales_28d_list.append(co_inv_sales_28d_values)\r\n co_inv_key_roles_28d_list.append(co_inv_key_roles_28d_values)\r\n co_inv_it_28d_list.append(co_inv_it_28d_values)\r\n co_inv_hourly_28d_list.append(co_inv_hourly_28d_values)\r\n\r\n co_fut_cost_overall_28d_list.append(co_fut_cost_overall_28d_values)\r\n co_fut_cost_50k_28d_list.append(co_fut_cost_50k_28d_values)\r\n co_fut_cost_100k_28d_list.append(co_fut_cost_100k_28d_values)\r\n co_fut_cost_manager_28d_list.append(co_fut_cost_manager_28d_values)\r\n co_fut_cost_sales_28d_list.append(co_fut_cost_sales_28d_values)\r\n co_fut_cost_key_roles_28d_list.append(co_fut_cost_key_roles_28d_values)\r\n co_fut_cost_it_28d_list.append(co_fut_cost_it_28d_values)\r\n co_fut_cost_hourly_28d_list.append(co_fut_cost_hourly_28d_values)\r\n\r\n print('avaerages calculated...') \r\n df_co_metrics['co_inv_overall_28d'] = pd.concat(co_inv_overall_28d_list)\r\n df_co_metrics['co_inv_50k_28d'] = pd.concat(co_inv_50k_28d_list)\r\n df_co_metrics['co_inv_100k_28d'] = pd.concat(co_inv_100k_28d_list)\r\n df_co_metrics['co_inv_manager_28d'] = pd.concat(co_inv_manager_28d_list)\r\n df_co_metrics['co_inv_sales_28d'] = pd.concat(co_inv_sales_28d_list)\r\n df_co_metrics['co_inv_key_roles_28d'] = pd.concat(co_inv_key_roles_28d_list)\r\n df_co_metrics['co_inv_it_28d'] = pd.concat(co_inv_it_28d_list)\r\n df_co_metrics['co_inv_hourly_28d'] = pd.concat(co_inv_hourly_28d_list)\r\n\r\n df_co_metrics['co_fut_cost_overall_28d'] = pd.concat(co_fut_cost_overall_28d_list)\r\n df_co_metrics['co_fut_cost_50k_28d'] = pd.concat(co_fut_cost_50k_28d_list)\r\n df_co_metrics['co_fut_cost_100k_28d'] = pd.concat(co_fut_cost_100k_28d_list)\r\n df_co_metrics['co_fut_cost_manager_28d'] = pd.concat(co_fut_cost_manager_28d_list)\r\n df_co_metrics['co_fut_cost_sales_28d'] = pd.concat(co_fut_cost_sales_28d_list)\r\n df_co_metrics['co_fut_cost_key_roles_28d'] = pd.concat(co_fut_cost_key_roles_28d_list)\r\n df_co_metrics['co_fut_cost_it_28d'] = pd.concat(co_fut_cost_it_28d_list)\r\n df_co_metrics['co_fut_cost_hourly_28d'] = pd.concat(co_fut_cost_hourly_28d_list)\r\n print(columns added...)", "def compute_key_value(self) -> Dict[str, float]:\n per_class, micro, macro, weighted = self.compute()\n metrics = self._convert_metrics_to_kv(\n per_class=per_class, micro=micro, macro=macro, weighted=weighted\n )\n return metrics", "def compute_key_value(self) -> Dict[str, float]:\n per_class, micro, macro, weighted = self.compute()\n metrics = self._convert_metrics_to_kv(\n per_class=per_class, micro=micro, macro=macro, weighted=weighted\n )\n return metrics", "def compute_metrics(self):\n overall_ret = OrderedDict()\n for ap_iou_thresh in self.ap_iou_thresh:\n ret_dict = OrderedDict()\n rec, prec, ap = eval_det_multiprocessing(self.pred_map_cls, self.gt_map_cls, ovthresh=ap_iou_thresh)\n for key in sorted(ap.keys()):\n clsname = self.class2type_map[key] if self.class2type_map else str(key)\n ret_dict[\"%s Average Precision\" % (clsname)] = ap[key]\n ap_vals = np.array(list(ap.values()), dtype=np.float32)\n ap_vals[np.isnan(ap_vals)] = 0\n ret_dict[\"mAP\"] = ap_vals.mean()\n rec_list = []\n for key in sorted(ap.keys()):\n clsname = self.class2type_map[key] if self.class2type_map else str(key)\n try:\n ret_dict[\"%s Recall\" % (clsname)] = rec[key][-1]\n rec_list.append(rec[key][-1])\n except:\n ret_dict[\"%s Recall\" % (clsname)] = 0\n rec_list.append(0)\n ret_dict[\"AR\"] = np.mean(rec_list)\n overall_ret[ap_iou_thresh] = ret_dict\n return overall_ret", "def __init__(self):\n super().__init__()\n self.printTag = 'POSTPROCESSOR Metrics'\n self.dynamic = False # is it time-dependent?\n self.features = None # list of feature variables\n self.targets = None # list of target variables\n self.metricsDict = {} # dictionary of metrics that are going to be assembled\n self.multiOutput = 'mean'# defines aggregating of multiple outputs for HistorySet\n # currently allow mean, max, min, raw_values\n self.weight = None # 'mean' is provided for self.multiOutput, weights can be used\n # for each individual output when all outputs are averaged\n self.pivotParameter = None\n self.pivotValues = []\n # assembler objects to be requested\n self.addAssemblerObject('Metric', InputData.Quantity.one_to_infinity)", "def aggregate_gis_historical_data():\n \n logging.info(\"Processing historical weather data aggregation.\")\n \n # Initialising function variables\n config_data = get_config()\n \n # Initialise pandas dataframe column name for baseline reference\n # and historical data.\n hist_file_path = get_file_path(folder_name=\"data\"\n ,subdirectory=config_data[\"gis\"][\"output_subdirectory\"]\n ,file_name=config_data[\"gis\"][\"output_base_historical_file_name\"])\n\n # Define group by columns.\n group_by_cols = [\"Location\", \"Month\"]\n\n # Define aggregate columns.\n aggregate_cols = {\"Temperature_Min\": \"mean\"\n ,\"Temperature_Max\": \"mean\"\n ,\"Humidity\": [\"min\", \"max\"]\n ,\"Pressure\": [\"min\", \"max\"]}\n\n logging.info(\"Reading historical weather data.\")\n \n # Read baseline historical data.\n df = pd.read_csv(hist_file_path)\n \n logging.info(\"Completed reading historical weather data.\")\n \n logging.info(\"Aggregating historical weather data.\")\n df_aggregate = df.groupby(group_by_cols, as_index=False).aggregate(aggregate_cols)\n df_aggregate.columns = [\"\".join(name) for name in df_aggregate.columns.ravel()]\n df_aggregate.rename(columns={\"Temperature_Minmean\": \"T_avg_min\"\n ,\"Temperature_Maxmean\": \"T_avg_max\"\n ,\"Humiditymin\": \"H_min\"\n ,\"Humiditymax\": \"H_max\"\n ,\"Pressuremin\": \"P_min\"\n ,\"Pressuremax\": \"P_max\"}\n ,inplace=True)\n df_aggregate [\"T_avg_range\"] = df_aggregate [\"T_avg_max\"] - df_aggregate [\"T_avg_min\"]\n df_aggregate [\"H_range\"] = df_aggregate [\"H_max\"] - df_aggregate [\"H_min\"]\n df_aggregate [\"P_range\"] = df_aggregate [\"P_max\"] - df_aggregate [\"P_min\"]\n\n logging.info(\"Saving baseline aggregate data.\")\n df_aggregate.to_csv(get_file_path(folder_name=\"data\"\n ,subdirectory=config_data[\"gis\"][\"output_subdirectory\"]\n ,file_name=config_data[\"gis\"][\"output_base_aggregate_file_name\"])\n ,index=False)\n logging.info(\"Completed saving baseline aggregate data.\")", "def get_metadata(self, db_running_task: SchedulerTask, task_logger: Logger):\n cmd = [f\"qstat -f -F json -x {db_running_task.job_id}\"]\n\n out, err = self._run_command_and_wait(cmd, shell=True)\n # remove values that contains backslash\n # several GPU-related variables contains only a single backslash and nothing else, which will cause loads() to crash\n out = out.replace(\"\\\\\", \"\")\n json_dict = json.loads(out, strict=False)\n\n if \"Jobs\" not in json_dict:\n # a special case when a job is cancelled before getting logged in the scheduler\n task_logger.warning(\n \"job data cannot be retrieved from qstat.\"\n \" likely the job is cancelled before recording.\"\n \" setting job status to CANCELLED\"\n )\n submit_time, start_time, end_time = [0] * 3\n n_cores = 0.0\n run_time = 0\n status = \"CANCELLED\"\n return start_time, end_time, run_time, n_cores, status\n\n tasks_dict = json_dict[\"Jobs\"]\n assert (\n len(tasks_dict.keys()) == 1\n ), f\"Too many tasks returned by qstat: {tasks_dict.keys()}\"\n\n task_name = list(tasks_dict.keys())[0]\n task_dict = tasks_dict[task_name]\n submit_time = task_dict[\"ctime\"].replace(\" \", \"_\")\n start_time = task_dict[\"qtime\"].replace(\" \", \"_\")\n # Last modified time. There isn't an explicit end time,\n # so only other option would be to add walltime to start time\n end_time = task_dict[\"mtime\"].replace(\" \", \"_\")\n # check if 'resources_used' are one of the fields\n if \"resources_used\" in task_dict.keys():\n n_cores = float(task_dict[\"resources_used\"][\"ncpus\"])\n run_time = task_dict[\"resources_used\"][\"walltime\"]\n else:\n # give a dummy data when pbs failed to return json with required field\n n_cores = 1\n run_time = \"00:00:01\"\n\n # status uses the same states as the queue monitor, rather than full words like sacct\n status = task_dict[\"job_state\"]\n\n return start_time, end_time, run_time, n_cores, status", "def _get_stats_record(proc_info: psutil.Process) -> ResourceStats:\n return ResourceStats(\n time.time(),\n proc_info.cpu_percent(),\n memory_profiler.memory_usage(proc_info.pid, max_usage=True),\n )", "def _aggregate_log_values(self, source, dest):\n remove = []\n for key, item in source.items():\n if \"data\" not in item:\n # Assume it's a sub-group\n dest[key] = {}\n self._aggregate_log_values(item, dest[key])\n else:\n aggregator = self._get_aggregator_for_key(key, item['agg'])\n value = aggregator(item['data'])\n if item['precision'] is not None:\n value = round(value, item['precision'])\n dest[key] = value\n if item['scope'] == 'get':\n remove.append(key)\n for key in remove:\n del source[key]", "def analysis_host_sec(self):\n #calc the date\n time_now = int(time.time())\n time_local = time.localtime(time_now)\n date = time.strftime(\"%Y-%m-%d\",time_local)\n sum_cpu_ratio = 0\n sum_phy_mem_size = 0\n sum_virt_mem_size = 0\n\n key_re_word = \"%s qa_work\" % self.pid\n for line in self.file_top.readlines():\n if re.search(key_re_word, line):\n #analysis_cpu_rate()\n sum_cpu_ratio += float(line.split()[8])\n self.cpu_list_1sec.append(float(line.split()[8]))\n\n #analysis_host_phy_mem_size(), the standerd unit is \"g\"\n if \"m\" in line.split()[5]:\n phy_mem_size = float(line.split()[5].strip(\"m\")) / 1000\n elif \"g\" in line.split()[5]:\n phy_mem_size = float(line.split()[5].strip(\"g\"))\n elif \"k\" in line.split()[5]:\n phy_mem_size = float(line.split()[5].strip(\"k\")) / 1000 / 1000\n else:\n phy_mem_size = 0.0\n self.phy_mem_list_1sec.append(float(phy_mem_size))\n sum_phy_mem_size += phy_mem_size\n\n #analysis_host_virt_mem_size(), the standerd unit is \"g\"\n if \"m\" in line.split()[4]:\n vir_mem_size = float(line.split()[4].strip(\"m\")) / 1000\n elif \"g\" in line.split()[4]:\n vir_mem_size = float(line.split()[4].strip(\"g\"))\n elif \"k\" in line.split()[4]:\n vir_mem_size = float(line.split()[4].strip(\"k\")) / 1000 / 1000\n else:\n vir_mem_size = 0\n self.virt_mem_list_1sec.append(float(vir_mem_size))\n sum_virt_mem_size += vir_mem_size\n\n elif re.search(\"top -\", line):\n final_time = date + \" \" + line.split()[2]\n self.top_pertime.append(final_time)\n top_num = min(len(self.top_pertime), len(self.cpu_list_1sec), len(self.phy_mem_list_1sec), len(self.virt_mem_list_1sec))\n\n #cal the average data\n average_cpu_ratio = round(sum_cpu_ratio/len(self.cpu_list_1sec), 2)\n average_phy_mem_size = round(sum_phy_mem_size/len(self.phy_mem_list_1sec), 2)\n average_virt_mem_size = round(sum_virt_mem_size/len(self.virt_mem_list_1sec), 2)\n #cal the max data\n max_cpu_ratio = max(self.cpu_list_1sec)\n max_phy_mem_size = max(self.phy_mem_list_1sec)\n max_virt_mem_size = max(self.virt_mem_list_1sec)\n #insert into mysql-top_list_1sec_avg\n print \"average_cpu_ratio: %s\" % average_cpu_ratio\n print \"average_phy_mem_size: %s\" % average_phy_mem_size\n print \"average_virt_mem_size: %s\" % average_virt_mem_size\n print \"max_cpu_ratio: %s\" % max_cpu_ratio\n print \"max_phy_mem_size: %s\" % max_phy_mem_size\n print \"max_virt_mem_size: %s\" % max_virt_mem_size\n if self.db_onoff == \"on\":\n self.mysql.insert_table_sql_top_avg(self.time_sql, max_cpu_ratio, max_phy_mem_size, max_virt_mem_size)", "def tradecorerunning() -> dict:\n return {\n 'metricName': 'TradeCoreRunning',\n 'resolution': 'second',\n 'fillGaps': True,\n 'defaultLookBack': 172800,\n 'archived': False,\n 'boolMetric': True\n }", "def gather_sample(self):\n\n for _pid in self._select_processes():\n if not self.__trackers.get(_pid):\n self.__trackers[_pid] = ProcessTracker(_pid, self._logger, self.__id)\n\n self._reset_absolute_metrics()\n\n for _tracker in self.__trackers.values():\n _metrics = _tracker.collect()\n self.record_metrics(_tracker.pid, _metrics)\n\n self._calculate_aggregated_metrics()\n self._remove_dead_processes()\n\n self.print_metrics()", "def get_stats_summary(self):\n perf_table = spark.table(self.performance_table)\\\n .where(\"yyyy_mm_dd between '{start_date}' and '{end_date}'\"\n .format(start_date = self.start_date, end_date = self.end_date))\\\n .where(\"clicks > 0\")\\\n .where(\"commission_expected_euro <= {max_rpb}\".format(max_rpb = self.max_rpb))\n\n if self.pos == ['All']:\n perf_table = perf_table.groupBy(*self.agg_on)\\\n .agg(f.sum(\"nits_bookings\").alias(\"nits_bookings\")\n ,f.sum(\"commission_expected_euro\").alias(\"nits_commission\")\n ,f.sum(\"bookings\").alias(\"gross_bookings\")\n ,f.sum(\"commission_amount_euro\").alias(\"gross_commission\")\n ,f.sum(\"cost_euro\").alias(\"cost\")\n ,f.sum(\"clicks\").alias(\"clicks\")\n ,f.sum(\"roomnights\").alias(\"roomnights\"))\\\n .withColumn(\"nits_profit\",f.expr(\"nits_commission-cost\"))\\\n .withColumn(\"gross_profit\", f.expr(\"gross_commission-cost\"))\n else:\n filtered_pos = spark.createDataFrame(pd.DataFrame(data = self.pos,\n columns = [\"pos\"]))\n\n perf_table = perf_table.join(filtered_pos, on = \"pos\", how = \"inner\")\\\n .groupBy(*self.agg_on)\\\n .agg(f.sum(\"nits_bookings\").alias(\"nits_bookings\")\n ,f.sum(\"commission_expected_euro\").alias(\"nits_commission\")\n ,f.sum(\"bookings\").alias(\"gross_bookings\")\n ,f.sum(\"commission_amount_euro\").alias(\"gross_commission\")\n ,f.sum(\"cost_euro\").alias(\"cost\")\n ,f.sum(\"clicks\").alias(\"clicks\")\n ,f.sum(\"roomnights\").alias(\"roomnights\"))\\\n .withColumn(\"nits_profit\",f.expr(\"nits_commission-cost\"))\\\n .withColumn(\"gross_profit\", f.expr(\"gross_commission-cost\"))\n\n return (perf_table)", "def update(self, current, values=None):\n values = values or []\n for k, v in values:\n if k not in self._values_order:\n self._values_order.append(k)\n if k not in self.stateful_metrics:\n if k not in self._values:\n self._values[k] = [\n v * (current - self._seen_so_far),\n current - self._seen_so_far,\n ]\n else:\n self._values[k][0] += v * (current - self._seen_so_far)\n self._values[k][1] += current - self._seen_so_far\n else:\n # Stateful metrics output a numeric value. This representation\n # means \"take an average from a single value\" but keeps the\n # numeric formatting.\n self._values[k] = [v, 1]\n self._seen_so_far = current\n\n now = time.time()\n info = \" - %.0fs\" % (now - self._start)\n if self.verbose == 1:\n if (\n now - self._last_update < self.interval\n and self.target is not None\n and current < self.target\n ):\n return\n\n prev_total_width = self._total_width\n if self._dynamic_display:\n sys.stdout.write(\"\\b\" * prev_total_width)\n sys.stdout.write(\"\\r\")\n else:\n sys.stdout.write(\"\\n\")\n\n if self.target is not None:\n numdigits = int(np.log10(self.target)) + 1\n bar = (\"%\" + str(numdigits) + \"d/%d [\") % (current, self.target)\n prog = float(current) / self.target\n prog_width = int(self.width * prog)\n if prog_width > 0:\n bar += \"=\" * (prog_width - 1)\n if current < self.target:\n bar += \">\"\n else:\n bar += \"=\"\n bar += \".\" * (self.width - prog_width)\n bar += \"]\"\n else:\n bar = \"%7d/Unknown\" % current\n\n self._total_width = len(bar)\n sys.stdout.write(bar)\n\n if current:\n time_per_unit = (now - self._start) / current\n else:\n time_per_unit = 0\n if self.target is not None and current < self.target:\n eta = time_per_unit * (self.target - current)\n if eta > 3600:\n eta_format = \"%d:%02d:%02d\" % (\n eta // 3600,\n (eta % 3600) // 60,\n eta % 60,\n )\n elif eta > 60:\n eta_format = \"%d:%02d\" % (eta // 60, eta % 60)\n else:\n eta_format = \"%ds\" % eta\n\n info = \" - ETA: %s\" % eta_format\n else:\n if time_per_unit >= 1 or time_per_unit == 0:\n info += \" %.0fs/%s\" % (time_per_unit, self.unit_name)\n elif time_per_unit >= 1e-3:\n info += \" %.0fms/%s\" % (time_per_unit * 1e3, self.unit_name)\n else:\n info += \" %.0fus/%s\" % (time_per_unit * 1e6, self.unit_name)\n\n for k in self._values_order:\n info += \" - %s:\" % k\n if isinstance(self._values[k], list):\n avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))\n if abs(avg) > 1e-3:\n info += \" %.4f\" % avg\n else:\n info += \" %.4e\" % avg\n else:\n info += \" %s\" % self._values[k]\n\n self._total_width += len(info)\n if prev_total_width > self._total_width:\n info += \" \" * (prev_total_width - self._total_width)\n\n if self.target is not None and current >= self.target:\n info += \"\\n\"\n\n sys.stdout.write(info)\n sys.stdout.flush()\n\n elif self.verbose == 2:\n if self.target is not None and current >= self.target:\n numdigits = int(np.log10(self.target)) + 1\n count = (\"%\" + str(numdigits) + \"d/%d\") % (current, self.target)\n info = count + info\n for k in self._values_order:\n info += \" - %s:\" % k\n avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))\n if avg > 1e-3:\n info += \" %.4f\" % avg\n else:\n info += \" %.4e\" % avg\n info += \"\\n\"\n\n sys.stdout.write(info)\n sys.stdout.flush()\n\n self._last_update = now", "def activityProps(pdict):\n # For baseline, establish the precentage of time each cell of each type\n act = {gr: {'burst': [], 'tonic': [], 'silent': [],\n 'burstLoc': [], 'tonicLoc': []} for gr in pdict.keys()}\n \n # Populate the dict\n for group in pdict.keys():\n for cell in pdict[group]['intervals']['GapFree I=0 / Baseline recording'].keys():\n inters, timeSpent = [], []\n for clust in range(len(pdict[group]['intervals']['GapFree I=0 / Baseline recording'][cell])):\n inters.append(np.mean(pdict[group]['intervals']['GapFree I=0 / Baseline recording'][cell][clust]))\n timeSpent.append(np.mean(pdict[group]['activity']['GapFree I=0 / Baseline recording'][cell][0][clust]))\n \n # Add these percentages\n maxT = pdict[group]['duration']['GapFree I=0 / Baseline recording'][cell]\n if len(inters) > 1:\n time_sort =[x for (y,x) in sorted(zip(inters, timeSpent))]\n inter_sort = [i for i in sorted(inters)]\n act[group]['burst'].append(time_sort[0]/maxT)\n act[group]['tonic'].append(time_sort[1]/maxT)\n act[group]['silent'].append(1-(time_sort[0]+time_sort[1])/maxT)\n act[group]['burstLoc'].append(inter_sort[0])\n act[group]['tonicLoc'].append(inter_sort[1])\n else:\n act[group]['tonic'].append(timeSpent[0]/maxT)\n act[group]['tonicLoc'].append(inters[0])\n act[group]['silent'].append(1-(timeSpent[0]/maxT))\n \n # Each cell done\n # Group done\n # All groups done\n return act", "def compute_metrics(self, results: list) -> dict:\n dump(results, self.out_file_path)\n print_log(\n f'Results has been saved to {self.out_file_path}.',\n logger='current')\n return {}", "def compute_ericsson_2g_value_counts(self):\n\n tech = '2G'\n\n # List of parameters to ignore\n ignore_list = ['LOADID', 'VARDATE', 'DATETIME', 'REGION', 'NENAME', 'CELLID', 'ID', 'FILENAME', 'TECHNOLOGY', 'VENDOR', 'VERSION', 'NETYPE', 'CELLNAME']\n\n self.logger.info(\"Processing Huawei baseline for {}...\".format(tech))\n\n # Get list of mos configured in process_config\n result = self.engine.execute(text(\"SELECT * FROM baseline.process_config WHERE process = true AND technology = :tech AND vendor = :vendor\"), tech=tech, vendor='ERICSSON')\n for row in result:\n vendor = row['vendor']\n technology = row['technology']\n mo = row['mo']\n\n self.logger.info(\"vendor:{}, technology:{}, mo:{}\".format(vendor, technology, mo))\n\n # Get field names from information_schema\n field_qry = \"\"\"\n SELECT t1.column_name as field \n FROM\n information_schema.columns t1\n LEFT JOIN baseline.parameter_ignore_list t2 \n ON t1.table_name = t2.mo\n AND t1.column_name = t2.parameter\n WHERE \n table_schema = 'ericsson_cm'\n AND table_name = :mo\n AND t2.parameter is NULL\n AND UPPER(t1.column_name) NOT IN ('{}')\n \"\"\".format(\"','\".join(ignore_list))\n\n field_result = self.engine.execute(text(field_qry), mo=mo)\n\n # self.logger.info([row[0] for row in field_result])\n\n # self.logger.info(field_qry)\n\n self.logger.info('Processing parameters...')\n for f in field_result:\n parameter = f[0]\n\n self.logger.info(\"Processing baseline for {}.{}...\".format(mo, parameter))\n\n value_qry = \"\"\"\n INSERT INTO baseline.parameter_value_counts\n (date_time, vendor, nename, mo, parameter, pvalue, occurence)\n SELECT \n MAX(t1.\"DATETIME\") AS date_time,\n 'ERICSSON' as vendor,\n t1.\"BSC_NAME\" AS nename,\n '{0}' AS mo,\n '{1}' AS parameter,\n t1.\"{1}\" AS pvalue,\n COUNT(t1.\"{1}\") AS occurence\n FROM\n ericsson_cm.\"{0}\" t1\n INNER JOIN cm_loads t2 on t2.pk = t1.\"LOADID\"\n WHERE t2.is_current_load = true AND t1.\"{1}\" IS NOT NULL\n GROUP BY \n t1.\"BSC_NAME\", t1.\"{1}\"\n ON CONFLICT ON CONSTRAINT uq_parameter_value_counts\n DO NOTHING\n \"\"\".format(mo, parameter)\n\n self.engine.execute(text(value_qry))", "def set_metrics(self):", "def mymetrics(): \n _update_metric_counters()\n logging.debug(prom_objects_seen.collect())\n return flask.Response(generate_latest(), mimetype='text/plain')", "def get_metrics(self):\n return None", "def calculate(self):", "def get_metrics(self) -> Dict[str, base.Number]:\n return self._metrics", "def compute_total_times(self):\n rval = {}\n for fgraph, node in self.apply_time:\n if node not in rval:\n self.fill_node_total_time(fgraph, node, rval)\n return rval", "def process_metrics_overall(\n self, the_dict, names=[\"metric\", \"phase\", \"epoch\", \"performance\"]\n ):\n result = (\n pd.DataFrame(the_dict)\n .reset_index()\n .melt(id_vars=\"index\")\n .set_index([\"index\", \"variable\"])\n .value.apply(pd.Series)\n .stack()\n .reset_index()\n )\n result.columns = names\n return result", "def evaluate(\n self,\n results,\n metric=\"mAP\",\n logger=None,\n proposal_nums=(100, 300, 1000),\n iou_thr=0.75,\n scale_ranges=None,\n ):\n\n if not isinstance(metric, str):\n assert len(metric) == 1\n metric = metric[0]\n allowed_metrics = [\"mAP\", \"recall\"]\n if metric not in allowed_metrics:\n raise KeyError(f\"metric {metric} is not supported\")\n annotations = [self.get_ann_info(i) for i in range(len(self))]\n eval_results = {}\n\n if metric == \"mAP\":\n assert isinstance(iou_thr, float)\n ds_name = list(self.class_names)\n mean_ap, _ = eval_map(\n results,\n annotations,\n scale_ranges=None,\n iou_thr=iou_thr,\n dataset=ds_name,\n logger=logger,\n )\n eval_results[\"mAP\"] = mean_ap\n\n elif metric == \"recall\":\n gt_bboxes = [ann[\"bboxes\"] for ann in annotations]\n if isinstance(iou_thr, float):\n iou_thr = [iou_thr]\n recalls = eval_recalls(\n gt_bboxes, results, proposal_nums, iou_thr, logger=logger\n )\n for i, num in enumerate(proposal_nums):\n for j, iou in enumerate(iou_thr):\n eval_results[f\"recall@{num}@{iou}\"] = recalls[i, j]\n if recalls.shape[1] > 1:\n ar = recalls.mean(axis=1)\n for i, num in enumerate(proposal_nums):\n eval_results[f\"AR@{num}\"] = ar[i]\n\n return eval_results", "def update_running_totals(self) -> None:\n while True:\n try:\n results = self.queue_manager.get_results_report()\n except Empty:\n break\n if \"results\" in results and \"step_results\" in results[\"results\"]:\n self.update_running_totals_from_load_step_results(results[\"results\"])\n elif \"error\" in results:\n self.logger.warning(f\"Error in load: {results}\")\n else: # pragma: no cover\n self.logger.warning(f\"Unexpected message from subtask: {results}\")", "def calculate_metrics(self, metric_df, dose):\n # Prepare to iterate through all rois\n roi_exists = self.roi_mask.max(axis=(0, 1, 2))\n voxels_in_tenth_of_cc = np.maximum(1, np.round(100/self.voxel_size)) #\n for roi_idx, roi in enumerate(self.data_loader.full_roi_list):\n if roi_exists[roi_idx]:\n roi_mask = self.roi_mask[:, :, :, roi_idx].flatten()\n roi_dose = dose[roi_mask]\n roi_size = len(roi_dose)\n if roi in self.data_loader.rois['oars']:\n if 'D_0.1_cc' in self.oar_eval_metrics:\n # Find the fractional volume in 0.1cc to evaluate percentile\n fractional_volume_to_evaluate = 100 - voxels_in_tenth_of_cc/roi_size * 100\n metric_eval = np.percentile(roi_dose, fractional_volume_to_evaluate)\n metric_df.at[self.patient_list[0], ('D_0.1_cc', roi)] = metric_eval\n if 'mean' in self.oar_eval_metrics:\n metric_eval = roi_dose.mean()\n metric_df.at[self.patient_list[0], ('mean', roi)] = metric_eval\n elif roi in self.data_loader.rois['targets']:\n if 'D_99' in self.tar_eval_metrics:\n metric_eval = np.percentile(roi_dose, 1)\n metric_df.at[self.patient_list[0], ('D_99', roi)] = metric_eval\n if 'D_95' in self.tar_eval_metrics:\n metric_eval = np.percentile(roi_dose, 5)\n metric_df.at[self.patient_list[0], ('D_95', roi)] = metric_eval\n if 'D_1' in self.tar_eval_metrics:\n metric_eval = np.percentile(roi_dose, 99)\n metric_df.at[self.patient_list[0], ('D_1', roi)] = metric_eval\n\n return metric_df", "def __calculate_all_indicators(self,df):\n df=self.__calculate_moving_average(df)\n df=self.__calculate_tsi(df)\n df=self.__calculate_adx(df)\n df=self.__calculate_rsi(df)\n\n\n return df", "def generateDerivedMetrics(kernelMetrics, statistics, throughputMetrics = {}, countMetrics = {}, combinedMetrics = {}):\n\n # combine single metrics \n for combinedMetric in combinedMetrics:\n for kernel in kernelMetrics:\n logging.debug(\"Combining metrics for kernel {}\".format(kernel))\n # iterate over each run, take the number of runs to be\n # the length of the first source metric\n if combinedMetrics[combinedMetric][0] in kernelMetrics[kernel]:\n combinedMetricCounts = []\n sourceMetricMissing = False\n # go through each run\n for run in range(0, len(kernelMetrics[kernel][ combinedMetrics[combinedMetric][0] ])):\n\n combinedMetricRunCount = 0\n # take all the source metrics and add them into the\n # combined metric\n for sourceMetric in combinedMetrics[combinedMetric]:\n if sourceMetric in kernelMetrics[kernel]:\n # TODO delete once debugged print(\"runs of {} {}\".format(sourceMetric, kernelMetrics[kernel][sourceMetric]))\n combinedMetricRunCount = combinedMetricRunCount + kernelMetrics[kernel][sourceMetric][run]\n else:\n sourceMetricMissing = True\n logging.info(\"Source metric {} missing for combined metric {}, combined metric will not be\"\n \"added\".format(sourceMetric, combinedMetric))\n # append this run ot the end of the list\n combinedMetricCounts.append(combinedMetricRunCount)\n if not sourceMetricMissing:\n kernelMetrics[kernel][combinedMetric] = combinedMetricCounts\n\n # take throughputs and convert them to counts\n # doesn't use averages since that can skew results\n for throughputMetricName, countMetricName in zip(throughputMetrics, countMetrics):\n for kernel in kernelMetrics:\n logging.debug(\"Generating count metrics for {} in kernel {}\".format(throughputMetricName, kernel))\n if throughputMetricName in kernelMetrics[kernel]:\n counts = []\n for run in range(0, len(kernelMetrics[kernel][throughputMetricName])):\n count = kernelMetrics[kernel][throughputMetricName][run] * kernelMetrics[kernel][\"Duration\"][run]\n counts.append(count)\n kernelMetrics[kernel][countMetricName] = counts", "def calc(self, app_id, node_id, system, control_input, environment_input, use_cache=True):\n app = system.get_app(app_id)\n dst_node = system.get_node(node_id)\n\n arrival_rate = calc_received_load(app_id, node_id, system, control_input, environment_input,\n use_cache=use_cache, per_instance=True)\n alloc_cpu = control_input.get_allocated_cpu(app.id, dst_node.id)\n service_rate = alloc_cpu / float(app.work_size)\n\n return GlobalProcessingResult(arrival_rate, service_rate)" ]
[ "0.65272045", "0.6503945", "0.6044549", "0.5962655", "0.5960727", "0.5932751", "0.58754987", "0.5782237", "0.5775919", "0.5757956", "0.57359225", "0.57347035", "0.5726887", "0.57170856", "0.57170856", "0.56863284", "0.56776404", "0.56517655", "0.56427175", "0.5625753", "0.56163687", "0.5585638", "0.5527937", "0.55202895", "0.54722095", "0.5436671", "0.543601", "0.5426373", "0.54232574", "0.53982216", "0.5369598", "0.5349854", "0.5348538", "0.5329617", "0.532101", "0.52917594", "0.5279826", "0.526879", "0.5266732", "0.52653396", "0.52626497", "0.525206", "0.5251638", "0.52511126", "0.5250086", "0.5247283", "0.5241842", "0.5231916", "0.52268434", "0.521095", "0.5205098", "0.51721245", "0.51689726", "0.51616234", "0.51478297", "0.5138128", "0.51324445", "0.51281136", "0.5126408", "0.51212364", "0.5114707", "0.5101992", "0.50978017", "0.5093912", "0.50875854", "0.50843084", "0.5062719", "0.50613195", "0.50610185", "0.50396204", "0.503597", "0.50288886", "0.50288886", "0.50168854", "0.5015404", "0.50140023", "0.5001741", "0.49930006", "0.49906644", "0.49903014", "0.4986793", "0.49820352", "0.49709257", "0.49692833", "0.49687064", "0.4965114", "0.49641752", "0.49539062", "0.49513444", "0.4948124", "0.49439964", "0.49414608", "0.4937918", "0.4936762", "0.4930108", "0.4923512", "0.49218878", "0.49214727", "0.49150655", "0.49081978" ]
0.8260474
0
Collect the perprocess tracker for the monitored process(es).
def gather_sample(self): for _pid in self._select_processes(): if not self.__trackers.get(_pid): self.__trackers[_pid] = ProcessTracker(_pid, self._logger, self.__id) self._reset_absolute_metrics() for _tracker in self.__trackers.values(): _metrics = _tracker.collect() self.record_metrics(_tracker.pid, _metrics) self._calculate_aggregated_metrics() self._remove_dead_processes() self.print_metrics()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _proc_collect(self) -> None:\n while True:\n self.process_num_threads.set(self._process.num_threads())\n self.process_memory_bytes.set(self._process.memory_info().rss)\n self.process_cpu_percent.set(self._process.cpu_percent())\n\n sleep(self.process_scrape_interval)", "def collect(self):\n if not self._btime:\n return\n\n try:\n with open(\"/proc/self/stat\", 'rb') as stat:\n parts = (stat.read().split(b')')[-1].split())\n\n self.process_metrics[\"vmem\"].set(\"\", float(parts[20]))\n self.process_metrics[\"rss\"].set(\"\", float(parts[21]) * _PAGESIZE)\n start_time_secs = float(parts[19]) / self._ticks\n self.process_metrics[\"start_time\"].set(\n \"\", start_time_secs + self._btime\n )\n utime = float(parts[11]) / self._ticks\n stime = float(parts[12]) / self._ticks\n self.process_metrics[\"cpu\"].set(\"\", utime + stime)\n except IOError:\n pass\n\n try:\n with open(\"/proc/self/limits\", 'rb') as limits:\n for line in limits:\n if line.startswith(b'Max open file'):\n self.process_metrics[\"max_fds\"].set(\n \"\", float(line.split()[3])\n )\n break\n\n self.process_metrics[\"open_fds\"].set(\n \"\", float(len(os.listdir(\"/proc/self/fd\")))\n )\n except (IOError, OSError):\n pass\n\n # Update gc metrics if enabled.\n if \"collected\" in self.process_metrics:\n for generation, stat in enumerate(gc.get_stats()):\n generation = {\"generation\": str(generation)}\n self.process_metrics[\"collected\"].set(\n generation, stat[\"collected\"]\n )\n self.process_metrics[\"uncollectable\"].set(\n generation, stat[\"uncollectable\"]\n )\n self.process_metrics[\"collections\"].set(\n generation, stat[\"collections\"]\n )", "def monitoredProcs(self):\n return self._pidToProcess.itervalues()", "def processStats(self):\n return self._processes.itervalues()", "def setup_process_stats(pid):\n return psutil.Process(pid)", "def process():\n interesting_procs = set(INTERESTING_PROCESSES)\n\n pids = psutil.pids()\n info = {\n \"stats_type\": \"process\",\n \"proc\": {\n \"count\": len(pids),\n }\n }\n proc_root = os.environ.get(\"PROC_ROOT\", \"/proc\")\n for pid in pids:\n proc_info = proc.core.Process.from_path(\n os.path.join(proc_root, str(pid)))\n\n proc_name = get_proc_name(proc_info, interesting_procs)\n if not proc_name:\n continue\n\n if 'sshd' in proc_name and ':' in proc_info.cmdline:\n continue\n\n if proc_name not in info['proc']:\n info['proc'][proc_name] = {\n 'running': proc_info.state in ('R', 'S', 'D', 'T', 'W'),\n 'pid': proc_info.pid,\n 'ppid': proc_info.ppid,\n 'user_time': int(proc_info.stat_fields[16]), # cutime\n 'sys_time': int(proc_info.stat_fields[17]), # cstime\n 'vsize': proc_info.vsize,\n 'rss': proc_info.rss,\n 'voluntary_ctxt_switches': int(proc_info.status_fields[\n 'voluntary_ctxt_switches']),\n 'nonvoluntary_ctxt_switches': int(proc_info.status_fields[\n 'nonvoluntary_ctxt_switches']),\n 'age': proc_info.runtime,\n 'count': 1\n }\n else:\n pinfo = info['proc'][proc_name]\n pinfo['count'] += 1\n\n def append(dest, field, value):\n \"\"\"Append values for an existing process.\"\"\"\n if isinstance(dest[field], list):\n dest[field].append(value)\n else:\n dest[field] = [dest[field], value]\n\n # append('state', proc_info.state)\n append(pinfo, 'pid', proc_info.pid)\n append(pinfo, 'ppid', proc_info.ppid)\n pinfo['user_time'] += int(proc_info.stat_fields[16]) # cutime\n pinfo['sys_time'] += int(proc_info.stat_fields[17]) # cstime\n pinfo['vsize'] += proc_info.vsize\n pinfo['rss'] += proc_info.rss\n pinfo['voluntary_ctxt_switches'] = \\\n int(proc_info.status_fields['voluntary_ctxt_switches'])\n pinfo['nonvoluntary_ctxt_switches'] = \\\n int(proc_info.status_fields['nonvoluntary_ctxt_switches'])\n append(pinfo, 'age', proc_info.runtime)\n\n return info", "def _collect_set(self, pidset):", "def GetPublishedProcesses():\r\n pass", "def collect(self):\n\n collector = {}\n for gather in self.gathers:\n try:\n stats = gather.run_single_cycle(collector=collector)\n if stats:\n collector.update(stats)\n except Exception as ex:\n self._logger.exception(\n \"Exception while collecting metrics for PID: %s of type: %s. Details: %s\",\n self.pid,\n type(gather),\n repr(ex),\n )\n return collector", "def add_process(self):\r\n\r\n proc_dict = dict()\r\n total_count = len(self.newest_connections['pid'].unique())\r\n count = 0\r\n for proc in self.newest_connections['pid'].unique():\r\n count += 1\r\n percent = round((count / total_count * 100))\r\n print('{}{}Identifying processes in progress. Accomplished: {}%{}'.format(Colors.GREEN,Colors.BOLD,percent,Colors.END), end='\\r')\r\n output = subprocess.run([\"powershell.exe\", \"-Command\", f'Get-Process -Id {proc} | select-object -Property ProcessName | ft -HideTableHeaders'], capture_output=True, text=True).stdout.strip()\r\n proc_dict[proc] = output\r\n print()\r\n processes = pd.Series(proc_dict)\r\n processes_df = pd.DataFrame(processes.reset_index())\r\n processes_df.columns = ['pid', 'process_name']\r\n if 'process_name' in self.newest_connections:\r\n self.newest_connections = pd.merge(self.newest_connections, processes_df, on=['pid', 'process_name'], how='right')\r\n else:\r\n self.newest_connections = pd.merge(self.newest_connections, processes_df, on='pid', how='right')\r\n return self.newest_connections", "def get_process_list() -> Dict:\n return {proc.pid: proc.name() for proc in psutil.process_iter()}", "def monitor(self):\n procdata = self.collect_userprocs_info()\n now = int(time.time())\n #-------------------\n proclist = []\n for name in procdata:\n mem = procdata[name]['rss']\n pcode = self.DB.get_code(name)\n proclist.append((now, pcode, mem))\n self.DB.add_proc_info(proclist)\n #-------------------\n totmem = psutil.virtual_memory()\n self.DB.add_total_mem_info(now, totmem.used, totmem.available, totmem.free)\n #-------------------\n disk = psutil.disk_usage('/')\n dinfo = {\n \"utime\" : now,\n \"total\" : disk.total,\n \"used\" : disk.used,\n \"free\" : disk.free,\n \"percent\" : disk.percent\n }\n self.DB.add_diskuse_info(dinfo)\n #-------------------\n cpu = json.dumps(psutil.cpu_percent(None, True))\n self.DB.add_total_cpu(now, cpu)\n #-------------------\n net = psutil.net_io_counters()\n ninfo = {\n \"utime\" : now,\n \"brecv\" : net.bytes_recv,\n \"bsent\" : net.bytes_sent,\n \"precv\" : net.packets_recv,\n \"psent\" : net.packets_sent,\n \"errin\" : net.errin,\n \"errin\" : net.errout\n }\n self.DB.add_net_info(ninfo)", "def _start_proc_collector(self) -> None:\n thread = threading.Thread(target=self._proc_collect, name=\"ProcessMetricsCollector\", daemon=True)\n thread.start()", "def resource_collect(pid=None):\n try:\n import psutil\n except ImportError:\n return {}\n\n p = psutil.Process(pid or os.getpid())\n return {'cpu_percent': psutil.cpu_percent(),\n 'status': p.status(),\n 'memory_percent': p.memory_percent(),\n 'memory_info_ex': p.memory_info_ex(),\n 'disk_io_counters': metrics.disk_io_counters(),\n 'net_io_counters': metrics.net_io_counters()}", "def identify_processes(self) -> Dict[int, dict]:\n\n processes = {}\n\n for process in self.behavior[\"generic\"]:\n\n proc_name, proc_path = split_path(process[\"process_path\"])\n\n processes[int(process[\"pid\"])] = {\n FieldNames.PROCESS_IMAGE: proc_name,\n FieldNames.PROCESS_IMAGE_PATH: proc_path,\n FieldNames.PROCESS_ID: int(process[\"pid\"]),\n }\n\n return processes", "def processes(self):\n # MODIFIED 11/1/16 OLD:\n return list(item.process for item in self.process_tuples)\n # # MODIFIED 11/1/16 NEW:\n # return sorted(list(item.process for item in self.process_tuples), key=lambda process: process.name)\n # MODIFIED 11/1/16 END", "def monitor(self):\n for idx, process in enumerate(self.__process_list):\n process.id_number = idx + 1\n while len(self.__process_list) > 0:\n for process in list(self.__process_list):\n if not process.has_output():\n _return_code = process.return_code\n self.__process_list.remove(process)\n if _return_code == 0:\n logger.info(\"Finished process #{}: there are now {}/{} running\".format(process.id_number, len(self.__process_list), self.__n_initial))\n else:\n logger.warning(\"Process #{} terminated unexpectedly (return code {}): there are now {}/{} running\".format(process.id_number, _return_code, len(self.__process_list), self.__n_initial))", "def pids(self):\n return self._pidToProcess.iterkeys()", "def get_processes():\n yield from psutil.process_iter()", "def monitorAll(self):\n\n websites = self.user.mySites.values()\n\n # subprocesses to get the requests logs\n self.processes = [Process(target=self.monitorOne, args=(website,)) for website in websites]\n\n for process in self.processes:\n process.daemon = True\n\n for process in self.processes:\n process.start()\n\n for process in self.processes:\n process.join()\n\n return", "def allprocs(self):\n\n processes = self.getHash( 'nameHash' ) # safely get copy of process name dictionary\n\n allprocs = {}\n\n for p in processes.keys():\n allprocs[p] = processes[p].procinfo()\n\n return allprocs", "def allprocs(self):\n\n processes = self.getHash( 'nameHash' ) # safely get copy of process name dictionary\n\n allprocs = {}\n\n for p in processes.keys():\n allprocs[p] = processes[p].procinfo()\n\n return allprocs", "def _select_processes(self):\n\n # check if at least one process is running\n is_running = False\n for pid in self.__pids:\n if ProcessMonitor.__is_running(pid):\n is_running = True\n break # at least one process is running\n\n if is_running:\n if not self.__aggregate_multiple_processes:\n return self.__pids\n\n # aggregate metrics, check the last discovered time\n if (\n self.__last_discovered\n and time.time() * 1000 - self.__last_discovered\n < self.__process_discovery_interval * 1000\n ):\n return self.__pids\n\n ps = ProcessList()\n if self.__commandline_matcher:\n self.__last_discovered = time.time() * 1000\n if self.__include_child_processes:\n matched_processes = ps.get_matches_commandline_with_children(\n self.__commandline_matcher\n )\n else:\n matched_processes = ps.get_matches_commandline(\n self.__commandline_matcher\n )\n self.__pids = matched_processes\n\n if not self.__aggregate_multiple_processes and len(self.__pids) > 1:\n # old behaviour where multiple processes were not supported for aggregation\n self._logger.warning(\n \"Multiple processes match the command '%s'. Returning existing pid. \"\n \"You can turn on the multi process aggregation support by adding the \"\n \"aggregate_multiple_processes configuration to true\"\n % self.__commandline_matcher,\n limit_once_per_x_secs=300,\n limit_key=\"linux-process-monitor-existing-pid\",\n )\n self.__pids = [self.__pids[0]]\n else:\n # See if the specified target pid is running. If so, then return it.\n # Special cases:\n # '$$' mean this process.\n # '$$TBD' mean that the PID of the target process has not been determined yet and it will be set later.\n pids = []\n if self.__target_pids:\n for t_pid in self.__target_pids:\n if t_pid == \"$$\":\n t_pid = int(os.getpid())\n\n # skip this until it will be replaced with a real PID.\n elif t_pid == \"$$TBD\":\n continue\n else:\n t_pid = int(t_pid)\n pids.append(t_pid)\n self.__pids = pids\n return self.__pids", "def get_process_mapping():\n with open('/proc/{0}/stat'.format(os.getpid())) as f:\n self_tty = f.read().split()[STAT_TTY]\n processes = {}\n for pid in os.listdir('/proc'):\n if not pid.isdigit():\n continue\n try:\n stat = '/proc/{0}/stat'.format(pid)\n cmdline = '/proc/{0}/cmdline'.format(pid)\n with open(stat) as fstat, open(cmdline) as fcmdline:\n stat = re.findall(r'\\(.+\\)|\\S+', fstat.read())\n cmd = fcmdline.read().split('\\x00')[:-1]\n ppid = stat[STAT_PPID]\n tty = stat[STAT_TTY]\n if tty == self_tty:\n processes[pid] = Process(\n args=tuple(cmd), pid=pid, ppid=ppid,\n )\n except IOError:\n # Process has disappeared - just ignore it.\n continue\n return processes", "def pid_processes(self):\n return [(process.namespec(), process.infos[self.address_name]['pid'])\n for process in self.processes.values()\n if process.pid_running_on(self.address_name)]", "def get_processes_running():\r\n p = [] #array of processes\r\n if platform == \"linux\" or platform == \"linux2\":\r\n for proc in psutil.process_iter():\r\n try:\r\n tmp=Process(proc.name(),int(proc.pid),proc.username(),int(0),int(0))\r\n p.append(tmp)\r\n except:\r\n continue\r\n return (p)\r\n\t\t\t\r\n tasks = check_output(['tasklist']).decode('cp866', 'ignore').split(\"\\r\\n\")\r\n for task in tasks:\r\n m = re.match(b'(.*?)\\\\s+(\\\\d+)\\\\s+(\\\\w+)\\\\s+(\\\\w+)\\\\s+(.*?)\\\\s.*', task.encode())\r\n if m is not None:\r\n tmp=Process(m.group(1).decode(),int(m.group(2).decode()),m.group(3).decode(),int(m.group(4).decode()),int(m.group(5).decode('ascii', 'ignore')))\r\n p.append(tmp)\r\n #m.group(1).decode() image name\r\n #m.group(2).decode() process id\r\n #m.group(3).decode() session_name\r\n #m.group(4).decode() session_num\r\n #m.group(5).decode('ascii', 'ignore') memory usage\r\n return(p)", "def get_processes_info():\n processes_list = []\n for proc in get_processes():\n try:\n # Fetch process details as dict\n pinfo = proc.as_dict(attrs=[\"pid\", \"name\", \"username\"])\n pinfo[\"rss\"] = proc.memory_info().rss / (1024 * 1024)\n pinfo[\"ports\"] = []\n try:\n connections = proc.connections()\n except psutil.Error:\n continue\n if connections:\n for conn in connections:\n pinfo[\"ports\"].append({\"port\": conn.laddr.port, \"status\": conn.status})\n # Append dict to list\n processes_list.append(pinfo)\n except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):\n pass\n processes_list = sorted(processes_list, key=lambda procObj: procObj[\"rss\"], reverse=True)\n return processes_list[:25]", "def get_processes(self):\n processes = {}\n # Get ps output\n cmd = [\"ps\", \"-Z\"]\n # Split by newlines and remove first line (\"LABEL USER PID PPID NAME\")\n # TODO: surround with try/except?\n psz = subprocess.check_output(self.shell + cmd).decode().split('\\n')[1:]\n for line in psz:\n line = line.strip(\"\\r\")\n if line:\n try:\n p = Process(line, self.android_version)\n except ValueError as e:\n self.log.warning(e)\n else:\n processes[p.pid] = p\n return processes", "def __iter__(self):\n seen = self.seen\n if time_now() - self.last_cleanup_time > self.cleanup_seen_interval:\n # Time to cleanup seen set\n to_remove = set()\n for pid in seen:\n # Remove from seen if PID no longer running\n if not P.exists(P.join(PROC_DIR, str(pid))):\n to_remove.add(pid)\n\n seen -= to_remove\n self.last_cleanup_time = time_now()\n\n for file in os.listdir(PROC_DIR):\n try:\n pid = int(file)\n if pid not in seen:\n self._new_pids.append(pid)\n\n except ValueError:\n # Non PID file in /proc\n pass\n\n seen.update(self._new_pids)\n\n return self", "def reload(self):\n\t\tdel self.processes\n\t\tself.processes = {}\n\t\tpids = os.listdir(self.basedir)\n\t\tfor spid in pids:\n\t\t\ttry:\n\t\t\t\tpid = int(spid)\n\t\t\texcept:\n\t\t\t\tcontinue\n\n\t\t\tself.processes[pid] = process(pid, self.basedir)", "def watch_process(self):\n psutil.wait_procs([psutil.Process(self._proc.pid)],\n callback=self.start)", "def getProcessInfo():\n \n blacklist = [\"_Total\",\"Idle\"] #processes we don't care about\n \n #execute wmic command and capture output\n temp = subprocess.check_output([\"wmic\", \"path\", \"Win32_PerfRawData_PerfProc_Process\", \"get\", \n \"Name,PercentProcessorTime\"]) \n \n #iterate over processes and split into lists\n firstline = True\n result = [] #list of lists to contain the final result\n \n for line in temp.splitlines():\n if(firstline):\n firstline = False\n continue\n elif not line: #skip empty lines\n continue\n \n proclist = line.split() #split on whitespace to return a 2 element list\n \n if (proclist[0] not in blacklist ):\n result.append([proclist[0], int(proclist[1])/(10**7)]) #convert times to ints, percent processor time is in 100 nanosecond intervals\n \n \n #sort list on processor time, highest first\n result.sort(key=lambda x: x[1])\n result.reverse()\n \n # narrow process list down\n times = [x[1] for x in result]\n\n nonzero = [x for x in times if x]\n \n ind = min(int(math.ceil(len(times)/5)),len(nonzero)) #reduce processes to top 20% (atleast 1) or to all with nonzero cpu time\n cutoff = max(times[ind],1)\n \n return [x for x in result if x[1] >= cutoff]", "def _proc_info(self):\n ret = cext.proc_info(self.pid)\n assert len(ret) == len(pinfo_map)\n return ret", "def get_processes(self):\n processes={}\n for (server_ip, server_port) in self.hosts:\n try:\n server = xmlrpclib.ServerProxy(\"http://%s:%d\"%(server_ip, server_port))\n uid = server.get_id()\n if uid != self.uid:\n processes[uid] = server\n except socket.error:\n pass\n return processes", "def getAllProcessInfo(self):\r\n self._update('getAllProcessInfo')\r\n\r\n all_processes = self._getAllProcesses(lexical=True)\r\n\r\n output = []\r\n for group, process in all_processes:\r\n name = make_namespec(group.config.name, process.config.name)\r\n output.append(self.getProcessInfo(name))\r\n return output", "def getActiveProcesses():\n active = []\n\n for p in PROCESSRUNNER_PROCESSES:\n if p.is_alive():\n active.append(p)\n\n return active", "def get_running_processes(self, dev_handler):\n # Get the list of running processes on each device\n running_processes = NvmlHandler.exec_nvml_function(nvmlDeviceGetComputeRunningProcesses,dev_handler)\n\n # Turns these process objects into dicts\n running_processes_dicts = [obj.__dict__ for obj in running_processes if obj]\n\n # Enhance these dicts with information from psutil\n new_dicts = []\n for running_processes_dict in running_processes_dicts:\n\n # Init the new dict with the current information\n more_ps_infos = {}\n more_ps_infos.update(running_processes_dict)\n\n # Rename the usedGpuMemory key, if any\n if 'usedGpuMemory' in more_ps_infos:\n more_ps_infos['gpu_memory_used'] = utils.psutil_parse_readable_bytes(\n more_ps_infos.get('usedGpuMemory')\n )\n del more_ps_infos['usedGpuMemory']\n\n # Try to retreive info about the process using psutil\n try:\n pid = running_processes_dict.get('pid')\n more_ps_infos.update(utils.psutil_snapshot_process(pid))\n except Exception as e:\n logger.warning('Cannot gather info from process {}'.format(pid))\n\n new_dicts.append(more_ps_infos)\n\n return new_dicts", "def pids():\n stream = os.popen(\"ps aux | grep '[m]itm' | awk '{print $2}'\")\n return stream.read()", "def get_threads(pid: int) -> dict:\n threads_map = defaultdict(list)\n proc = psutil.Process(pid)\n for thread in proc.threads():\n threads_map[psutil.Process(thread.id).name()].append(thread.id)\n return threads_map", "def register_process_statistics():\n if resource is None:\n log.warning(\n 'Unable to import resource module, memory diags not available'\n )\n return\n\n rusage_fields = [\n ('Execution time in user mode (seconds)', 'ru_utime'),\n ('Execution time in kernel mode (seconds)', 'ru_stime'),\n ('Maximum Resident Set Size (KB)', 'ru_maxrss'),\n ('Soft page faults', 'ru_minflt'),\n ('Hard page faults', 'ru_majflt'),\n ('Input events', 'ru_inblock'),\n ('Output events', 'ru_oublock'),\n ('Voluntary context switches', 'ru_nvcsw'),\n ('Involuntary context switches', 'ru_nivcsw'),\n ]\n\n def dump(log):\n process = resource.getrusage(resource.RUSAGE_SELF)\n for name, field in rusage_fields:\n data = getattr(process, field, 'None')\n log.info('%s: %s', name, data)\n\n register_diags('Process Statistics', dump)", "def inspire_pidstore():", "def _storePerfStats(self, results):\n self.state = ZenProcessTask.STATE_STORE_PERF\n byConf = reverseDict(self._deviceStats._pidToProcess)\n for procStat, pids in byConf.iteritems():\n if len(pids) != 1:\n log.debug(\"There are %d pids by the name %s - %s\",\n len(pids), procStat._config.name, procStat._config.originalName)\n procName = procStat._config.name\n for pid in pids:\n if not AS400PLUG in self._device.zCollectorPlugins:\n cpu = results.get(CPU + str(pid), None)\n else:\n cpu = results.get(AS400CPU + str(pid), None) / 10 ## as we get millis vs centis\n mem = results.get(MEM + str(pid), None)\n procStat.updateCpu(pid, cpu)\n procStat.updateMemory(pid, mem)\n self._save(procName, 'cpu_cpu', procStat.getCpu(),\n 'DERIVE', min=0)\n self._save(procName, 'mem_mem',\n procStat.getMemory() * 1024, 'GAUGE')\n return results", "def List(cls):\n\t\tres = {}\n\t\tfor p in glob.glob(\"/proc/*/cmdline\"):\n\t\t\tprocess = p.split(\"/\")[2]\n\t\t\tif cls.RE_PID.match(process):\n\t\t\t\tres[int(process)] = cat(p).replace(\"\\x00\", \" \")\n\t\treturn res", "def collectData(self):\n\n self.data.datahash = {} # dict of system data\n\n vmstat_dict = self._getvmstat()\n if vmstat_dict:\n self.data.datahash.update(vmstat_dict)\n\n uptime_dict = self._getuptime()\n if uptime_dict:\n self.data.datahash.update(uptime_dict)\n\n log.log( \"<system>system.collectData(): new system list created\", 7 )", "def performance_stats(self):\n current_status = psutil.STATUS_DEAD\n try:\n current_status = self.process.status()\n except psutil.NoSuchProcess:\n pass\n\n self.process_manager.handle_status_change(self.process_index, round(self.ioloop.time(), 2), current_status)\n\n if current_status != psutil.STATUS_DEAD:\n self.ioloop.call_later(0.5, self.performance_stats)", "def _init_local_processes_stats_publisher(self):\n stats = self._local_processes_stats\n # Init cache\n stats.cache = StatsCache(stats.cache_size)\n # Init source\n stats_source = ProcessesStatsSource()\n # Configure stats publishing\n stats.publisher = StatsPublisher(stats_source, stats.update_interval)\n stats.publisher.subscribe(stats.cache)\n self._publishers.append(stats.publisher)\n # Configure handlers\n self._routes['/stats/local/processes/cache'] = HandlerInfo(\n handler_class=CachedStatsHandler,\n init_kwargs=dict(stats_cache=stats.cache)\n )\n self._routes['/stats/local/processes/current'] = HandlerInfo(\n handler_class=CurrentStatsHandler,\n init_kwargs=dict(stats_source=stats_source)\n )", "def list_local_processes(self, process_type=''):\n if not process_type:\n return self.procs.values()\n\n return [p for p in self.procs.itervalues() if p.process_type == process_type]", "def num_processes():\n return 1", "def procinfo(self):\n\n info = {}\n info[\"pid\"] = self.pid\n info[\"exe\"] = self.exe\n info[\"procname\"] = self.procname\n\n return info", "def get_processes():\n cmd = 'ps -do pid:1,cmd' # linux command to run\n processes = {}\n\n with os.popen(cmd) as out:\n # see https://stackoverflow.com/questions/24362007/\n next(out.__iter__()) # skip header (first line)\n\n for line in out:\n # sepate pid and command in a tuple\n p = line.rstrip('\\n').split(' ', 2)\n\n # skip kernel threads\n if p[1][0] == '[':\n continue\n\n processes[p[0]] = p[1]\n\n return processes", "def get_running_processes(self):\n\n all_processes = []\n for _process in self.processes:\n all_processes.append(_process[\"pid\"])\n return all_processes", "def collect_pidin(self, log_dir):\n log_type = \"pidin\"\n log_name = \"pidin.txt\"\n cmd = \"pidin > /tmp/{}\".format(log_name)\n\n self._collect_log(log_type, log_dir, log_name, cmd)", "def PIDs():\n from ctypes import windll,c_ulong,byref,sizeof\n PIDs = (c_ulong*512)()\n size_of_PIDs = c_ulong()\n windll.psapi.EnumProcesses(byref(PIDs),sizeof(PIDs),byref(size_of_PIDs))\n nPIDs = size_of_PIDs.value/sizeof(c_ulong())\n pidProcess = sorted([int(i) for i in PIDs][:nPIDs])\n return pidProcess", "def get_process_info(name):\n process_lst = list()\n all_pid = psutil.pids()\n for pid in all_pid:\n info = psutil.Process(pid)\n if name in info.name():\n process_lst.append(info)\n\n return process_lst", "def pslist(self) -> Generator[dict, None, None]:\n\n # Function to switch fields to represent a parent\n def _convert_to_parent_fields(process: dict) -> dict:\n output = {}\n for left, right in [\n (FieldNames.PROCESS_IMAGE, FieldNames.PARENT_PROCESS_IMAGE),\n (FieldNames.PROCESS_ID, FieldNames.PARENT_PROCESS_ID),\n (FieldNames.COMMAND_LINE, FieldNames.PARENT_COMMAND_LINE),\n (FieldNames.PROCESS_IMAGE_PATH, FieldNames.PARENT_PROCESS_IMAGE_PATH),\n ]:\n output[right] = process[left]\n\n return output\n\n # Use the pstree dict output to get a mapping from pid -> proc\n procs = self.session.plugins.pstree()._make_process_dict()\n\n parent_procs: Dict[int, dict] = {}\n\n # Add the system idle process\n parent_procs[0] = {\n FieldNames.PARENT_PROCESS_ID: 0,\n FieldNames.PARENT_COMMAND_LINE: \"\",\n FieldNames.PARENT_PROCESS_IMAGE: \"System Idle Process\",\n FieldNames.PARENT_PROCESS_IMAGE_PATH: \"\\\\\",\n }\n\n for proc in procs.values():\n\n parent_pid = proc.InheritedFromUniqueProcessId\n\n # Get the current processes info\n command_line = str(proc.Peb.ProcessParameters.CommandLine)\n image_path = str(proc.Peb.ProcessParameters.ImagePathName)\n\n if int(proc.pid) == 4:\n process_image = \"SYSTEM\"\n process_image_path = \"\\\\\"\n else:\n process_image, process_image_path = split_path(image_path)\n\n current_proc = {\n FieldNames.EVENT_TYPE: EventTypes.PROCESS_LAUNCHED,\n FieldNames.PROCESS_ID: int(proc.pid),\n FieldNames.COMMAND_LINE: command_line,\n FieldNames.PROCESS_IMAGE: process_image,\n FieldNames.PROCESS_IMAGE_PATH: process_image_path,\n }\n\n # Keep track of the processes.\n self.processes[int(proc.pid)] = current_proc\n\n current_as_parent = _convert_to_parent_fields(current_proc)\n parent_procs[int(proc.pid)] = current_as_parent\n\n # Parse the parent process\n if parent_pid not in parent_procs:\n\n # Do we the _EPROCESS for this process?\n if int(parent_pid) in procs:\n parent = procs[int(parent_pid)]\n parent_image_path = parent.Peb.ProcessParameters.ImagePathName\n\n parent_process_image, parent_process_image_path = split_path(\n str(parent_image_path)\n )\n\n parent_proc = {\n FieldNames.PARENT_PROCESS_ID: int(parent.pid),\n FieldNames.PARENT_COMMAND_LINE: parent.Peb.ProcessParameters.CommandLine,\n FieldNames.PARENT_PROCESS_IMAGE: parent_process_image,\n FieldNames.PARENT_PROCESS_IMAGE_PATH: parent_process_image_path,\n }\n\n # If not, make a dummy one with the PID\n else:\n parent_proc = {\n FieldNames.PARENT_PROCESS_ID: int(parent_pid),\n FieldNames.PARENT_COMMAND_LINE: \"\",\n FieldNames.PARENT_PROCESS_IMAGE: \"\",\n FieldNames.PARENT_PROCESS_IMAGE_PATH: \"\",\n }\n\n parent_procs[int(parent_pid)] = parent_proc\n\n yield {**current_proc, **parent_procs[int(parent_pid)]}", "def processes(self):\n return self._getint('processes')", "def procinfo(self):\n\n info = {}\n info[\"state\"] = self.state\n info[\"user\"] = self.user\n info[\"ruser\"] = self.ruser\n info[\"uid\"] = self.uid\n info[\"ruid\"] = self.ruid\n info[\"gid\"] = self.gid\n info[\"rgid\"] = self.rgid\n info[\"pid\"] = self.pid\n info[\"ppid\"] = self.ppid\n info[\"pgid\"] = self.pgid\n info[\"pri\"] = self.pri\n info[\"pcpu\"] = self.pcpu\n info[\"pmem\"] = self.pmem\n info[\"vsz\"] = self.vsz\n info[\"rss\"] = self.rss\n info[\"time\"] = self.time\n info['timesec'] = self.timeconv(self.time)\n info[\"stime\"] = self.stime\n info[\"f\"] = self.f\n info[\"tty\"] = self.tty\n info[\"nice\"] = self.nice\n info[\"wchan\"] = self.wchan\n info[\"comm\"] = self.comm\n info[\"args\"] = self.args\n info[\"procname\"] = self.procname\n\n return info", "def pids(self):\r\n return copy(self._pids)", "def dump_proc_self_maps():\n return", "def addMonitoring(process):\n import FWCore.ParameterSet.Config as cms\n \n process.SimpleMemoryCheck = cms.Service(\"SimpleMemoryCheck\",\n jobReportOutputOnly = cms.untracked.bool(True)\n )\n process.Timing = cms.Service(\"Timing\",\n summaryOnly = cms.untracked.bool(True)\n )\n \n return process", "def pids(node, java_class):\n cmd = \"ps -C java -wwo pid,args | grep '%s' | awk -F' ' '{print $1}'\" % java_class\n\n return [int(pid) for pid in node.account.ssh_capture(cmd, allow_fail=True)]", "def num_processes(self):\n return 1", "def __merge_processes_data(self, manager_data, tracker=None):\n\n if manager_data is not None:\n if (\n not self.autosave.authorized\n and PyFunceble.CONFIGURATION.multiprocess_merging_mode != \"live\"\n and not PyFunceble.CONFIGURATION.quiet\n ):\n print(\n Fore.MAGENTA\n + Style.BRIGHT\n + \"\\nMerging cross processes data... This process may take some time.\"\n )\n\n for test_output in manager_data:\n if self.autosave.authorized:\n print(Fore.MAGENTA + Style.BRIGHT + \"Merging process data ...\")\n\n self.post_test_treatment(\n test_output,\n self.file_type,\n complements_test_started=self.complements_test_started,\n auto_continue_db=self.autocontinue,\n inactive_db=self.inactive_db,\n mining=self.mining,\n whois_db=self.whois_db,\n )\n\n if tracker:\n tracker.add_position(len(test_output[\"given\"]))\n\n manager_data[:] = []\n\n self.autocontinue.save()\n self.inactive_db.save()\n self.mining.save()\n\n self.cleanup(self.autocontinue, self.autosave, test_completed=False)", "def setChildPIDs(self):\n\t\tpids = []\n\t\tfor proc in process_iter():\n\t\t\tfor child in self.expectedChildren:\n\t\t\t\tif child == proc.name():\n\t\t\t\t\tif proc.parent().name() == \"Python\": # Hardcoded string comparison. Sue me.\n\t\t\t\t\t\tpids.append(proc.pid)\n\t\tself.pids = pids", "def _process(self):\n export_collect_data(self.kwargs[\"collect\"])", "def _collect_all(self):", "def _UpdateProcessingStatus(self, pid, process_status, used_memory):", "def _StopMonitoringProcesses(self):\n # We need to make a copy of the list of pids since we are changing\n # the dict in the loop.\n for pid in list(self._process_information_per_pid.keys()):\n self._RaiseIfNotRegistered(pid)\n process = self._processes_per_pid[pid]\n\n self._StopMonitoringProcess(process)", "def process_iter():\r\n def add(pid):\r\n proc = Process(pid)\r\n _pmap[proc.pid] = proc\r\n return proc\r\n\r\n def remove(pid):\r\n _pmap.pop(pid, None)\r\n\r\n a = set(get_pid_list())\r\n b = set(_pmap.keys())\r\n new_pids = a - b\r\n gone_pids = b - a\r\n\r\n for pid in gone_pids:\r\n remove(pid)\r\n for pid, proc in sorted(list(_pmap.items()) + \\\r\n list(dict.fromkeys(new_pids).items())):\r\n try:\r\n if proc is None: # new process\r\n yield add(pid)\r\n else:\r\n # use is_running() to check whether PID has been reused by\r\n # another process in which case yield a new Process instance\r\n if proc.is_running():\r\n yield proc\r\n else:\r\n yield add(pid)\r\n except NoSuchProcess:\r\n remove(pid)\r\n except AccessDenied:\r\n # Process creation time can't be determined hence there's\r\n # no way to tell whether the pid of the cached process\r\n # has been reused. Just return the cached version.\r\n yield proc", "def plugin_initialize():\n global _PROC_PID_STAT\n collectd.info('Initializing collectd-mlab plugin.')\n _PROC_PID_STAT = '/proc/%s/stat' % os.getpid()", "def _StartTelemetry(self, vm):\n try:\n vm.RemoteCommand('perf list')\n except errors.VirtualMachine.RemoteCommandError as ex:\n logging.exception('Failed executing perf. Is it installed?')\n raise ex\n perf_collect_file = posixpath.join(self.telemetry_dir, 'perfspect',\n 'perf-collect.sh')\n vm.RemoteCommand(f'sudo chmod +x {perf_collect_file}')\n perf_dir = posixpath.join(self.telemetry_dir, 'perfspect')\n stdout, _ = vm.RemoteCommand(\n f'cd {perf_dir} && sudo ./perf-collect.sh')\n self.pid = stdout.strip()\n logging.debug('fpid of PerfSpect collector process: %s', self.pid)", "def analyse_per_priority(self):\n dm1_total = 0\n dm2_total = 0\n none_total = 0\n dm1_size = []\n dm2_size = []\n none_size = []\n users = []\n pdd = self.chipdata.get_var_strict('$_per_prio_data')\n num_entries = len(pdd.members)\n\n for current in range(0, num_entries):\n dm1_size.append(\n pdd.members[current].get_member('alloc_info_dm1')\n .get_member('alloc_size').value\n )\n dm2_size.append(\n pdd.members[current].get_member('alloc_info_dm2')\n .get_member('alloc_size').value\n )\n none_size.append(\n pdd.members[current].get_member('alloc_info_none')\n .get_member('alloc_size').value\n )\n users.append(pdd.members[current].get_member('refcount').value)\n dm1_total = dm1_total + dm1_size[current]\n dm2_total = dm2_total + dm2_size[current]\n none_total = none_total + none_size[current]\n\n total_mem = dm1_total + dm2_total + none_total\n self.formatter.output('Total scratch memory used: ' + str(total_mem))\n self.formatter.output('DM1 total: ' + str(dm1_total))\n self.formatter.output('DM2 total: ' + str(dm2_total))\n self.formatter.output('none total: ' + str(none_total))\n\n for current in range(0, num_entries):\n mem = dm1_size[current] + dm2_size[current] + none_size[current]\n self.formatter.output(\n 'For priority ' + str(current) + ' the memory allocated is ' +\n str(mem) + ' and the total no of users is ' +\n str(users[current])\n )\n self.formatter.output('DM1 ' + str(dm1_size[current]))\n self.formatter.output('DM2 ' + str(dm2_size[current]))\n self.formatter.output('none ' + str(none_size[current]))", "def process():", "def init_priority(self):\n arr = []\n priority_dict = dict()\n\n for p in self.processes:\n priority_dict[p.id] = int(p.period)\n\n for key, value in sorted(priority_dict.items(), key=lambda value: value[1]):\n arr.append(key)\n\n return arr", "def ppid(self):", "def processing(self) -> list:\r\n\r\n return self.__processing", "def collect_set(self, pidset):\r\n self.clear(pidset)\r\n self._process_lines(self._collect_set(pidset))", "def get_pid_list():\r\n pids = [int(x) for x in os.listdir('/proc') if x.isdigit()]\r\n return pids", "def run(self):\n logger.info(\"Running...\")\n self._child = Process(target=self.collect, args=())\n self._child.start()", "def StartLookingForEvents(self):\n for process_logger in self.process_loggers:\n process_logger.looking = True", "def processes(self):\n nodes = (self.nodes.exclude(process__isnull=True)\n .values_list('process_id', flat=True))\n return Process.objects.filter(id__in=nodes).distinct()", "def _collectCallback(self):\n log.debug(\"Scanning for processes from %s [%s]\",\n self._devId, self._manageIp)\n\n self.state = ZenProcessTask.STATE_SCANNING_PROCS\n\ttables = [NAMETABLE, PATHTABLE, ARGSTABLE]\n\tif AS400PLUG in self._device.zCollectorPlugins: tables = [AS400NAME]\n try:\n tableResult = yield self._getTables(tables)\n summary = 'Process table up for device %s' % self._devId\n self._clearSnmpError(\"%s - timeout cleared\" % summary, 'table_scan_timeout')\n if self.snmpConnInfo.zSnmpVer == 'v3':\n self._clearSnmpError(\"%s - v3 error cleared\" % summary, 'table_scan_v3_error')\n processes = self._parseProcessNames(tableResult)\n self._clearSnmpError(summary, 'resource_mib')\n self._deviceStats.update(self._device)\n processStatuses = self._determineProcessStatus(processes)\n self._sendProcessEvents(processStatuses)\n self._clearSnmpError(summary)\n yield self._fetchPerf()\n log.debug(\"Device %s [%s] scanned successfully\",\n self._devId, self._manageIp)\n except HostResourceMIBExecption as e:\n summary = 'Device %s does not publish HOST-RESOURCES-MIB' %\\\n self._devId\n resolution = \"Verify with snmpwalk %s %s\" %\\\n (self._devId, NAMETABLE )\n log.warn(summary)\n self._sendSnmpError(summary, \"resource_mib\", resolution=resolution)\n\n except error.TimeoutError as e:\n log.debug('Timeout fetching tables on device %s' % self._devId)\n self._sendSnmpError('%s; Timeout on device' % PROC_SCAN_ERROR % self._devId, 'table_scan_timeout')\n except Snmpv3Error as e:\n msg = \"Cannot connect to SNMP agent on {0._devId}: {1.value}\".format(self, str(e))\n log.debug(msg)\n self._sendSnmpError('%s; %s' % (PROC_SCAN_ERROR % self._devId, msg), 'table_scan_v3_error')\n except Exception as e:\n log.exception('Unexpected Error on device %s' % self._devId)\n msg = '%s; error: %s' % (PROC_SCAN_ERROR % self._devId, e)\n self._sendSnmpError(msg)", "def run(self):\n self.empty_pid_file()\n self.queue = Queue()\n self.monitor_process = Process(\n target=ResourceMonitor.monitor_function,\n args=(self.launcher, self.pid_file, self.frequency, self.queue)\n )\n self.monitor_process.start()", "def relevant_processes(self):\n return set(self._required_processes.keys())", "def _collect_counts(self):\n for t in self.system.keys():\n if t in self.gold:\n self.tp += 1\n else:\n self.fp += 1\n for t in self.gold.keys():\n if t not in self.system:\n self.fn += 1", "def collect_results(self) -> None:\n ready = multiprocessing.connection.wait(\n self.waitables.keys() - [self._direct_scheduler_conn], timeout=0\n )\n\n for sentinel in ready:\n if sentinel is self._direct_scheduler_conn:\n continue\n processor = cast(DagFileProcessorProcess, self.waitables[sentinel])\n self.waitables.pop(processor.waitable_handle)\n self._processors.pop(processor.file_path)\n self._collect_results_from_processor(processor)\n\n self.log.debug(\"%s/%s DAG parsing processes running\", len(self._processors), self._parallelism)\n\n self.log.debug(\"%s file paths queued for processing\", len(self._file_path_queue))", "def running_processes(self):\n return [process for process in self.processes.values()\n if process.running_on(self.address_name)]", "def get_stats(self):\n # pool.map needs an arg for each function that will be run\n dmx_mean = [self.dmx.mean()] * len(self.genome_paths)\n with ProcessingPool() as pool:\n results = pool.map(genome.mp_stats, self.genome_paths, dmx_mean)\n self.stats = pd.concat(results)\n self.stats.to_csv(self.stats_path)", "def procs_running():\n \n return __proc_stat('procs_running')", "def _repopulate_pool(self):\n for i in range(self._processes - len(self._pool)):\n w = self.Process(target=worker,\n args=(self._inqueue, self._outqueue,\n self._initializer,\n self._initargs, self._maxtasksperchild,\n self._wrap_exception,\n self._finalizer,\n self._finalargs)\n )\n self._pool.append(w)\n w.name = w.name.replace('Process', 'PoolWorker')\n w.daemon = True\n w.start()\n util.debug('added worker')", "def verify_services(self):\n services = [\"metric_collector\", \"log_collector\"]\n service_version_9 = [\"lma_collector\"]\n pids = {}\n processes_count = {\n \"collectd \": 1,\n \"collectdmon \": 1\n }\n\n if self.settings.version.startswith(\"0.9\"):\n processes_count[\n \"hekad -config[= ]/etc/{}\".format(service_version_9)] = 1\n else:\n # Starting with 0.10, there are one collector for logs and one for\n # metrics\n for service in services:\n processes_count[\"hekad -config[= ]/etc/{}\".format(service)] = 1\n online_nodes = [node for node in self.helpers.get_all_ready_nodes()\n if node[\"online\"]]\n for node in online_nodes:\n pids[node[\"name\"]] = {}\n with self.env.d_env.get_ssh_to_remote(node[\"ip\"]) as remote:\n for process, count in processes_count.items():\n logger.info(\"Checking process {0} on node {1}\".format(\n process, node[\"name\"]\n ))\n pids[node[\"name\"]][process] = (\n self.checkers.check_process_count(\n remote, process, count))\n return pids", "def run(self):\n\t\t#Go through each number in the list\n\t\tfor number in self.numList:\n\t\t\t#record the count for this number\n\t\t\tself.permutationCountArray[self.count.value]=len(number)\n\t\t\t#increment total of numbers processed by this process\n\t\t\tself.count.value+=1", "def list_java_processes():\n for line in shell_command_output('jps -l').splitlines():\n line = line.strip()\n if len(line) == 0:\n continue\n (pid, class_name) = line.split()\n yield (int(pid), class_name)", "def reapChildren():\n\n for col in allLivingCollectors():\n now = int(time.time())\n status = col.proc.poll()\n if status is None:\n # The process hasn't terminated yet\n continue\n col.proc = None\n\n # behavior based on status. a code 0 is normal termination, code 13\n # is used to indicate that we don't want to restart this collector.\n # any other status code is an error and is logged.\n if status == 13:\n LOG.info('removing %s from the list of collectors (by request)',\n col.name)\n col.dead = True\n elif status != 0:\n LOG.warning('collector %s terminated after %d seconds with '\n 'status code %d, marking dead', col.name,\n now - col.lastspawn, status)\n col.dead = True\n else:\n LOG.debug('Reap collector : %s', col.name)\n registerCollector(Collector(col.name, col.interval, col.filename,\n col.mtime, col.lastspawn))", "def process_details(self) -> List[ClaraProcessDetails]:\r\n return self._process_details", "def get_all_current_processes():\n p = subprocess.Popen(['ps', '-A'], stdout=subprocess.PIPE)\n out, err = p.communicate()\n return out", "def monitor(state: int):\n while True:\n if get_state() < state:\n logger.awaiting(\n \"awaiting reconciliation system state: %d with desired state: %d\" % (get_state(), state)\n )\n elif get_state() == state:\n logger.stable(\"number of processes: %d\" % (state))\n\n pids = \", \".join([str(obj.pid) for obj in active_children()])\n logger.info(\n \"current PIDs: %s\" % (pids)\n )\n time.sleep(1)", "def _process(proc_data: List[Dict]) -> List[Dict]:\n return proc_data", "def run(self):\n logging.info(\"Running...\\n\")\n self._child = Process(target=self.collect, args=())\n self._child.start()", "def run(self):\n logging.info(\"Running...\\n\")\n self._child = Process(target=self.collect, args=())\n self._child.start()" ]
[ "0.70074445", "0.70008403", "0.65227914", "0.6477468", "0.64733964", "0.6447299", "0.64127636", "0.6285023", "0.6207518", "0.6167097", "0.6145146", "0.61055756", "0.6044723", "0.6031808", "0.5893498", "0.58866787", "0.58456194", "0.5834667", "0.5807094", "0.5745234", "0.5732411", "0.5732411", "0.5665456", "0.5664748", "0.5649203", "0.564741", "0.56443006", "0.56217676", "0.56139535", "0.5609689", "0.5597539", "0.5594532", "0.5516826", "0.55165935", "0.5499586", "0.54683745", "0.54534024", "0.5437455", "0.54221445", "0.5410198", "0.5403087", "0.54027176", "0.5392814", "0.53867817", "0.5381904", "0.5377982", "0.53596103", "0.5359276", "0.5352792", "0.5349328", "0.53474754", "0.5344456", "0.5338915", "0.53216094", "0.53141147", "0.53139263", "0.53132105", "0.53114337", "0.530784", "0.5299218", "0.5298699", "0.52935296", "0.5278999", "0.52484846", "0.524622", "0.52389085", "0.5237394", "0.51962125", "0.5196061", "0.5191783", "0.51883817", "0.51768285", "0.5175642", "0.5173018", "0.51605576", "0.51605016", "0.51519126", "0.5145833", "0.51456827", "0.51435435", "0.51343775", "0.51296395", "0.51254535", "0.51252145", "0.5125047", "0.51163656", "0.5115674", "0.50975555", "0.50874096", "0.50842786", "0.5070064", "0.5067408", "0.50668883", "0.50637674", "0.5059667", "0.5059633", "0.5049656", "0.50495327", "0.50477815", "0.50477815" ]
0.7125841
0
Returns true if the current process is still running.
def __is_running(pid): try: # signal flag 0 does not actually try to kill the process but does an error # check that is useful to see if a process is still running. os.kill(pid, 0) return True except OSError as e: # Errno #3 corresponds to the process not running. We could get # other errors like this process does not have permission to send # a signal to self.pid. But, if that error is returned to us, we # know the process is running at least, so we ignore the error. return e.errno != errno.ESRCH
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_running(self):\r\n if self._gone:\r\n return False\r\n try:\r\n # Checking if pid is alive is not enough as the pid might\r\n # have been reused by another process.\r\n # pid + creation time, on the other hand, is supposed to\r\n # identify a process univocally.\r\n return self.create_time == \\\r\n self.get_process_create_time()\r\n except NoSuchProcess:\r\n self._gone = True\r\n return False", "def is_running(self):\n if self._process:\n return self._process.poll() is None\n else:\n return False", "def isRunning(self):\n if not self.running:\n return False\n elif self.process.poll() == 0 or self.process.returncode >= 0:\n return False\n else:\n return True", "def is_running(self):\n if self._process and self._process.poll() is None:\n return True\n return False", "def running(self):\n return self.sub_process and self.sub_process.is_alive()", "def _is_alive(self) -> bool:\n\n if self._on:\n return True\n\n try:\n os.kill(self.proc.pid, 0)\n except (OSError, ProcessLookupError):\n return False\n\n return True", "def alive(self):\n return self._process.is_alive()", "def is_running(self) -> bool:\r\n return self.__running", "def is_running(self) -> bool:\n return self._running.is_set()", "def is_running(self) -> bool:\n return self.executor.is_alive() if self.executor else False", "def running(self):\n try:\n return self._thread.isAlive()\n except (AttributeError, RuntimeError, ThreadError):\n return False", "def _is_running(self):\n try:\n # Process is not killed, os.kill(pid, 0) does nothing but raise if process does not\n # exist.\n os.kill(self.pid, 0)\n except ProcessLookupError:\n return False\n else:\n return True", "def get_status(self) -> bool:\n try:\n self.__driver.service.assert_process_still_running()\n return True\n except AttributeError:\n return False", "def is_running(self):\n if self._thread and self._thread.is_alive:\n return True\n\n return False", "def running(self):\n return bool(self.proc and self._running())", "def is_running(self):\n\t\treturn self._running", "def is_running(self):\n qstat = self._grep_qstat('running')\n if qstat:\n return True\n return False", "def is_running(self):\n return self._running.is_set()", "def is_running(self) -> bool:\n return self._is_running", "def running(self):\n return not self._kill_event.is_set()", "def is_running(self):\n\t\treturn self in _running", "def is_running(self):\n return self.current_state == self.States.RUNNING", "def _is_running(self):\n return self._run_state.is_running()", "def alive(self):\n\n return self.subprocess.poll() is None and not self.thread_stop.is_set()", "def is_running(self):\n return self._running", "def is_running(self):\n return self._running", "def is_running(self):\n self.__condition.acquire()\n result = self.__is_running\n self.__condition.release()\n return result", "def is_alive(self) -> bool:\n return self._main_thread.is_alive()", "def is_running(self):\n return self.running", "def is_running(self):\n return self.running", "def is_running(self):\n return self.running", "def is_instance_running(self):\n try:\n self.instance.wait(timeout=1)\n except psutil.TimeoutExpired:\n pass\n return self.instance.is_running()", "def _is_alive(self, pid):\n process = next(x for x in self._processes if x.pid == pid)\n return process.is_alive()", "def is_process_running(self, name):\n log_tag = self.get_log_tag()\n self.logger.info(\"{} Checking to see if the process {} is \"\n \"running\".format(log_tag, name))\n return self.get_pids(name) is not None", "def running(self) -> bool:\n return self._running", "def is_running(self):\n return self._is_running", "def is_running(self):\n return self._is_running", "def is_alive(self) -> bool:\n if self._thread is None:\n return False\n return self._thread.is_alive()", "def is_running(self) -> bool:\n return False", "def _proc_is_alive(self):\n if self._proc is None:\n return False\n\n return self._proc.poll() is None", "def is_running(self):\n # type: () -> bool\n return self._run_state.is_running()", "def is_alive(self):\n result = execute('ps -Ao pgid', check_pg_alive=False, stdout=PIPE)\n pgids = result['stdout'].decode('utf8').split()\n return str(self.process.pid) in pgids", "def _isSubProcessRunning(self): \n # Check if child process has terminated. Set and return returncode attribute.\n if self.__process.poll() is None:\n return True\n else:\n return False", "def _is_running(self):\n # Public interface is given by get_status instead.\n self._update()\n return True if self.running_mode else False", "def proc_is_alive(pid):\n try:\n os.kill(pid, 0)\n except OSError as e:\n if e.errno == errno.EPERM:\n return True\n if e.errno == errno.ESRCH:\n return False\n raise # something else went wrong\n else:\n return True", "def IsRunning(self):\n return self.running", "def is_running(self):\n return self._task.running()", "def is_running(self):\n return self._task.running()", "def isRunning(self):\n if not self.hasBeenStarted():\n return False\n \n if not self._slave_dhcp_client_proc.poll(): # Poll our direct child (sudo)\n return False\n \n for pid in self._all_processes_pid:\n if not self._checkPid(pid):\n return False\n \n return True", "def is_running(self):\n if self.__process.poll() is not None: # process has ended\n for nbsr in (\"stdout\", \"stderr\"):\n getattr(self, nbsr).finalise()\n return False\n return True", "def is_running(self):\n\n return self._state == \"RUNNING\"", "def proc_is_alive(pid):\n handle = windll.kernel32.OpenProcess(\n win32con.SYNCHRONIZE | win32con.PROCESS_QUERY_INFORMATION, 0, pid)\n if handle == 0:\n return False\n\n # If the process exited recently, a pid may still exist for the handle.\n # So, check if we can get the exit code.\n exit_code = DWORD()\n rval = windll.kernel32.GetExitCodeProcess(handle, byref(exit_code))\n windll.kernel32.CloseHandle(handle)\n if rval == 0: # GetExitCodeProcess failure\n raise WinError()\n return exit_code.value == win32con.STILL_ACTIVE", "def alive(self):\n return self._proc is not None and self._proc.poll() is None", "def is_running(self):\n return all(p.status == 'running' for p in self.values())", "def is_started(self):\n return bool(self._processes)", "def is_alive(self):\n\n return not self._stop.is_set()", "def is_alive(self) -> bool:\n return any(thread.is_alive() for thread in self.threads)", "def is_process_running(process_id):\n try:\n os.kill(process_id, 0)\n return True\n except OSError:\n return False", "def is_active(self):\n with self._lock:\n return self._termination_manager.is_active()", "def is_running(self):\n return self.action_thread and self.action_thread.is_alive()", "def is_alive(self, pid):\n return pid in self._pids", "def is_alive(self) -> bool:\n self.check_is_alive()\n return self.__is_alive", "def _is_working():\n global _worker\n return _worker is not None and _worker.is_alive()", "def running(self):\n return self._lifetime_state in {\"starting\",\"running\",\"finishing\"}", "def is_alive(pid):\n pid = int(pid)\n return psutil.pid_exists(pid)", "def is_process_running(name):\n if not hasattr(is_process_running, \"proc\"):\n is_process_running.proc = None # it doesn't exist yet, so init it\n\n if is_process_running.proc:\n if is_process_running.proc.is_running():\n return True\n else:\n is_process_running.proc = None\n return False\n else:\n for p in psutil.process_iter():\n if p.name() == name:\n is_process_running.proc = p\n return True\n #\n return False", "def is_alive(self):\n if (self._s.fileno()>0 and self._running and self._listen):\n return True\n else:\n return False", "def is_running(self) -> Awaitable[bool]:\n return self.instance.is_running()", "def running(self):\n return self._state == RUNNING_STATE", "def alive(self):\n return self._thread.is_alive()", "def running(self):\n\t\treturn self._start is not None", "def still_active(pid: int, cmd: str) -> bool:\n os_cmd = get_command_for_pid(pid)\n return cmd in os_cmd", "def isAlive(self):\n return self._state.isAlive()", "def running(self):\n if self.done() and self._is_running:\n self._is_running = False\n return self._is_running", "def is_proc_running(name):\n\n for p in psutil.process_iter(['name']):\n if p.info['name'] == name:\n return True\n\n return False", "def isAlive(self):\n return self.is_alive()", "def is_process_running(pid):\n return os.path.exists(\"/proc/%s\" % pid)", "def is_running(self):\n # do we have a job ID to work with?\n if self.jobid == None:\n return False\n else:\n q_status = self.queue.get_status(self.jobid)\n\n if q_status == self.queue.state[\"active\"]:\n self.meta[\"status\"] = 'PENDING'\n return True\n else:\n return False", "async def is_running(self, **kwargs: Any) -> bool:\n return self._enabled", "def is_running(self):\n status = self._call_player_proxy('GetStatus', None).unpack()[0]\n if status[3] == 1:\n return True\n return False", "def get_connected(self) -> bool:\n try:\n return self._background_process.is_alive()\n except AttributeError:\n return False", "def is_running(self):\n return self.type_id == STATE_RUNNING", "def is_running(self):\n return self._event_loop is not None and self._event_loop.is_running()", "def _ServerIsRunning( self ):\n return utils.ProcessIsRunning( self._gocode_handle )", "def _check_and_kill(self):\n if not self._pid:\n _log.warning('No PID; wptserve has not started.')\n return True\n\n # Polls the process in case it has died; otherwise, the process might be\n # defunct and check_running_pid can still succeed.\n if (self._process and self._process.poll()) or \\\n (not self._executive.check_running_pid(self._pid)):\n _log.debug('pid %d is not running', self._pid)\n return True\n\n _log.debug('pid %d is running, killing it', self._pid)\n self._executive.kill_process(self._pid)\n\n return False", "def is_alive(self):\n # TODO: This is not 100% correct, but that's the behavior our code relies ON.\n # Eventually we need to fix the implementation on StoppableThread so RunState defaults\n # \"_is_running\" to False and then set it to True inside \"run()\" and only check that value\n # here.\n if six.PY2:\n return super(StoppableThread, self).isAlive()\n\n if (\n not self._run_state.is_running()\n or not super(StoppableThread, self).is_alive()\n ):\n return False\n\n return True", "def check_pid_is_running(self):\n if not os.path.exists(self.__file):\n return True\n\n with open(self.__file, \"r\") as f:\n try:\n pid = int(f.read().strip())\n except Exception:\n return True\n\n try:\n os.kill(pid, 0)\n except OSError:\n return True\n\n return self.check_process_cmd_line(pid)", "def is_alive(self) -> bool:\n if self._loop_handler:\n return self._loop_handler.is_alive()\n else:\n return False", "def is_monitor_process_live(pid_file):\n live = False\n\n try:\n check_process_status(pid_file)\n live = True\n except ComponentIsNotRunning:\n pass\n\n return live", "def is_running(program):\n return program in get_running()", "def is_alive(self):\n if self.status == 1:\n return True\n else:\n return False", "def is_alive(self):\n return self._is_alive", "def is_alive(self):\n return hasattr(self, 'alive') and self.alive", "def is_alive(self):\n return hasattr(self, 'alive') and self.alive", "def is_running(self):\n status = self.get_status_response()\n return ((status[1] & 2) == 2)\n #end is_running()", "def is_working(self):\n if not self.__th:\n return False\n return self.__th.is_alive()", "def status(self):\n # process running ?\n pid = self.get_pidfile()\n \n running = True\n \n # process is not running\n if pid is None:\n running = False\n else:\n if not self.send_signal(pid,0):\n running = False\n # abnormal state, delete the file\n self.delete_pidfile()\n \n if running:\n message = \"server is running\\n\"\n else:\n message = \"server is not running\\n\"\n sys.stdout.write(message)\n \n return running", "def _isrunning(self):\n return self.dp.state()==PyTango.DevState.RUNNING", "def running(self):\n with self._done_condition:\n return self._state == RUNNING", "def check_parent_processes_alive():\n cur_process = psutil.Process()\n parent = cur_process.parent()\n while True:\n time.sleep(1)\n if not parent.is_running():\n break\n\n logger.warning(\"Parent process is terminated abnormally. Process exits.\")\n cur_process.kill()" ]
[ "0.8082826", "0.8080181", "0.8026835", "0.8013894", "0.8012157", "0.7952827", "0.78069186", "0.77576995", "0.7746866", "0.77379435", "0.7681154", "0.76794994", "0.7676883", "0.76404685", "0.7629628", "0.76252586", "0.75991505", "0.7584161", "0.75792927", "0.75562996", "0.75445235", "0.7544515", "0.7529347", "0.75221586", "0.7514568", "0.7514568", "0.75060815", "0.7483109", "0.7470721", "0.7470721", "0.7470721", "0.74611086", "0.74578655", "0.7408636", "0.7383971", "0.7382162", "0.7382162", "0.73478067", "0.73423094", "0.73390526", "0.7331595", "0.73276484", "0.7295713", "0.72735906", "0.72723615", "0.72706497", "0.7260417", "0.7260417", "0.7244883", "0.7225425", "0.72216487", "0.7218979", "0.72188365", "0.7211837", "0.7210926", "0.72080225", "0.72067857", "0.7188789", "0.71775395", "0.7126526", "0.7123141", "0.7121773", "0.7107302", "0.70849", "0.7079956", "0.7042862", "0.7032897", "0.70275354", "0.701647", "0.7011856", "0.7011206", "0.7001433", "0.6981356", "0.6980263", "0.69532907", "0.6951752", "0.69420815", "0.6941908", "0.69416666", "0.6932555", "0.69230723", "0.69180626", "0.69007593", "0.68920106", "0.6891695", "0.68892145", "0.6880873", "0.6865453", "0.68651336", "0.68643606", "0.6844998", "0.6844226", "0.6837354", "0.6837354", "0.68352795", "0.68203413", "0.6819126", "0.6814032", "0.6799884", "0.67984" ]
0.7129584
59
Returns a list of the process ids of processes that fulfills the match criteria. This will either use the commandline matcher or the target pid to find the process. If no process is matched, an empty list is returned.
def _select_processes(self): # check if at least one process is running is_running = False for pid in self.__pids: if ProcessMonitor.__is_running(pid): is_running = True break # at least one process is running if is_running: if not self.__aggregate_multiple_processes: return self.__pids # aggregate metrics, check the last discovered time if ( self.__last_discovered and time.time() * 1000 - self.__last_discovered < self.__process_discovery_interval * 1000 ): return self.__pids ps = ProcessList() if self.__commandline_matcher: self.__last_discovered = time.time() * 1000 if self.__include_child_processes: matched_processes = ps.get_matches_commandline_with_children( self.__commandline_matcher ) else: matched_processes = ps.get_matches_commandline( self.__commandline_matcher ) self.__pids = matched_processes if not self.__aggregate_multiple_processes and len(self.__pids) > 1: # old behaviour where multiple processes were not supported for aggregation self._logger.warning( "Multiple processes match the command '%s'. Returning existing pid. " "You can turn on the multi process aggregation support by adding the " "aggregate_multiple_processes configuration to true" % self.__commandline_matcher, limit_once_per_x_secs=300, limit_key="linux-process-monitor-existing-pid", ) self.__pids = [self.__pids[0]] else: # See if the specified target pid is running. If so, then return it. # Special cases: # '$$' mean this process. # '$$TBD' mean that the PID of the target process has not been determined yet and it will be set later. pids = [] if self.__target_pids: for t_pid in self.__target_pids: if t_pid == "$$": t_pid = int(os.getpid()) # skip this until it will be replaced with a real PID. elif t_pid == "$$TBD": continue else: t_pid = int(t_pid) pids.append(t_pid) self.__pids = pids return self.__pids
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_matching_pids(pattern):\n cmd = [\"pgrep\", \"-f\", pattern]\n rc, output, err = run_cmd_output(cmd)\n if rc == 0:\n # One or more processes matched\n pids = [int(p) for p in output.split('\\n') if p != \"\"]\n elif rc == 1:\n # No processes matched\n pids = []\n else:\n raise UserVisibleError(\"Failed to run {}\".format(\" \".join(cmd)))\n return pids", "def get_matches_commandline(self, match_pattern):\n\n matches = []\n for _process in self.processes:\n if re.search(match_pattern, _process[\"cmd\"]):\n matches.append(_process[\"pid\"])\n return matches", "def get_pids(process_name, match_predicate=None):\n # default match predicate\n # why aren't we using psutil ??\n def default_predicate(target, given):\n return target.strip().lower() in given.lower()\n\n if match_predicate is None:\n match_predicate = default_predicate\n\n if process_name is None:\n raise j.exceptions.RuntimeError(\"process cannot be None\")\n if j.data.platform.is_unix():\n pids = set()\n for process in get_processes():\n try:\n pid = process.pid\n if not isinstance(pid, int):\n continue\n name = process.name()\n if match_predicate(process_name, name):\n pids.add(pid)\n elif match_predicate(process_name, process.exe()):\n pids.add(pid)\n else:\n cmdline = process.cmdline()\n if cmdline and cmdline[0]:\n if match_predicate(process_name, cmdline[0]):\n pids.add(pid)\n except (psutil.Error, FileNotFoundError):\n continue\n return list(pids)\n else:\n raise j.exceptions.NotImplemented(\"getProcessPid is only implemented for unix\")", "def get_matches_commandline_with_children(self, match_pattern):\n\n matched_pids = self.get_matches_commandline(match_pattern)\n for matched_pid in matched_pids:\n matched_pids.extend(self.get_child_processes(matched_pid))\n return list(set(matched_pids))", "def get_pids_filtered_by_regex(regex_list, excludes=None):\n excludes = excludes or []\n res = []\n for process in psutil.process_iter():\n try:\n cmdline = process.cmdline()\n except psutil.NoSuchProcess:\n cmdline = None\n except psutil.AccessDenied:\n cmdline = None\n if cmdline:\n name = \" \".join(cmdline)\n for r in regex_list:\n if name.strip() != \"\" and re.match(r, name):\n res.append(process.pid)\n return res", "def findPIDs(name, user = os.getpid()):\n\n pids = []\n\n ps = subprocess.Popen(['ps', '-u', user, 'w'], stdout=subprocess.PIPE).communicate()[0]\n processes = ps.split('\\n')\n\n for line in processes:\n if len(line.split()) < 5:\n continue\n if re.match(name, line.split()[4]):\n #Then we have matching process\n pids.append(line.split()[0])\n\n return pids", "def pid_processes(self):\n return [(process.namespec(), process.infos[self.address_name]['pid'])\n for process in self.processes.values()\n if process.pid_running_on(self.address_name)]", "def get_pids(name=None):\n results = []\n for process in win32com.client.GetObject('winmgmts:').InstancesOf('Win32_Process'):\n if name is None or process.Properties_(\"Name\").Value == name:\n results.append(process.Properties_(\"ProcessID\").Value)\n return results", "def ListProcesses(self):\n stdout, stderr = self.RunCmdOnDevice(\n ['/bin/ps', '--no-headers', '-A', '-o', 'pid,ppid,args:4096,state'],\n quiet=True)\n assert stderr == '', stderr\n procs = []\n for l in stdout.split('\\n'):\n if l == '':\n continue\n m = re.match(r'^\\s*(\\d+)\\s+(\\d+)\\s+(.+)\\s+(.+)', l, re.DOTALL)\n assert m\n procs.append(\n (int(m.group(1)), m.group(3).rstrip(), int(m.group(2)), m.group(4)))\n logging.debug(\"ListProcesses(<predicate>)->[%i processes]\" % len(procs))\n return procs", "def ListProcesses(self):\n stdout, stderr = self.RunCmdOnDevice(\n [\n '/bin/ps', '--no-headers', '-A', '-o', 'pid,ppid,args:4096,state'\n ],\n quiet=True)\n assert stderr == '', stderr\n procs = []\n for l in stdout.split('\\n'):\n if l == '':\n continue\n m = re.match(r'^\\s*(\\d+)\\s+(\\d+)\\s+(.+)\\s+(.+)', l, re.DOTALL)\n assert m\n procs.append((int(m.group(1)), m.group(3).rstrip(), int(m.group(2)),\n m.group(4)))\n logging.debug(\"ListProcesses(<predicate>)->[%i processes]\" % len(procs))\n return procs", "def pids(self, node):\n try:\n cmd = \"ps ax | grep -i 'redpanda\\|node' | grep -v grep | awk '{print $1}'\"\n pid_arr = [\n pid for pid in node.account.ssh_capture(\n cmd, allow_fail=True, callback=int)\n ]\n return pid_arr\n except (RemoteCommandError, ValueError):\n return []", "def get_child_pids(pid):\n\n wmi = win32com.client.GetObject('winmgmts:')\n # noinspection SqlNoDataSourceInspection,SqlDialectInspection\n children = wmi.ExecQuery('SELECT * FROM Win32_Process WHERE ParentProcessID = %s' % pid)\n return [child.Properties_('ProcessId').Value for child in children]", "def pidof(process_name):\n\n\tpids = []\n\n\tif 'licornd' in process_name:\n\t\t# licorn / linux 3.x specifiq : we can match 'licornd/wmi'\n\t\t# faster than 'licornd-wmi', and in some case the 'cmdline'\n\t\t# is empty, whereas the 'comm' is not.\n\t\tnames = [ process_name, process_name.replace('/', '-') ]\n\n\telse:\n\t\tnames = [ process_name ]\n\n\tfor entry in os.listdir('/proc'):\n\t\tif entry.isdigit():\n\t\t\ttry:\n\n\t\t\t\tif cgroup and open('/proc/%s/cpuset' % entry).read().strip() != cgroup:\n\t\t\t\t\tlogging.progress(_(u'Skipped process @{0} which is not '\n\t\t\t\t\t\t\t\t\t\tu'in the same cgroup.').format(entry))\n\t\t\t\t\tcontinue\n\n\t\t\t\ttry:\n\t\t\t\t\t# Linux 3.x only\n\t\t\t\t\tcommand_line1 = open('/proc/%s/comm' % entry).read().strip()\n\t\t\t\texcept:\n\t\t\t\t\tcommand_line1 = ''\n\n\t\t\t\tcommand_line2 = open('/proc/%s/cmdline' % entry).read().strip()\n\n\t\t\t\tfor pname in names:\n\t\t\t\t\tif pname == command_line1 or pname+'\\0' in command_line2:\n\t\t\t\t\t\tpids.append(int(entry))\n\n\t\t\texcept (IOError, OSError), e:\n\t\t\t\t# in rare cases, the process vanishes during iteration. This\n\t\t\t\t# is harmless. Any other error is not cool, raise it.\n\t\t\t\tif e.errno != errno.ENOENT:\n\t\t\t\t\traise e\n\n\treturn pids", "def get_filtered_pids(filterstr, excludes=None):\n excludes = excludes or []\n cmd = \"ps ax | grep '%s'\" % filterstr\n rc, out, err = j.core.executors.run_local(cmd)\n # print out\n found = []\n\n def checkexclude(c, excludes):\n for item in excludes:\n c = c.lower()\n if c.find(item.lower()) != -1:\n return True\n return False\n\n for line in out.split(\"\\n\"):\n if line.find(\"grep\") != -1 or line.strip() == \"\":\n continue\n if line.strip() != \"\":\n if line.find(filterstr) != -1:\n line = line.strip()\n if not checkexclude(line, excludes):\n # print \"found pidline:%s\"%line\n found.append(int(line.split(\" \")[0]))\n return found", "def pids(self):\n return self._pidToProcess.iterkeys()", "def get_pid_list():\r\n pids = [int(x) for x in os.listdir('/proc') if x.isdigit()]\r\n return pids", "def get_pid(name: str) -> Set[int]:\n process_pids = set()\n for proc in psutil.process_iter():\n if name == proc.name():\n pid = proc.pid\n process_pids.add(pid)\n return process_pids", "def find(name, exact=False):\n processes = run(\"ps aux | grep {0}\".format(name))\n res = []\n for line in processes.split(\"\\n\"):\n if not line.strip():\n continue\n line = RE_SPACES.split(line, 10)\n # We skip lines that are not like we expect them (sometimes error\n # message creep up the output)\n if len(line) < 11:\n continue\n user, pid, cpu, mem, vsz, rss, tty, stat, start, time, command = line\n if (exact and command == name) \\\n or (not exact and command.startswith(name)):\n res.append(pid)\n return res", "def pids(node, java_class):\n cmd = \"ps -C java -wwo pid,args | grep '%s' | awk -F' ' '{print $1}'\" % java_class\n\n return [int(pid) for pid in node.account.ssh_capture(cmd, allow_fail=True)]", "def PIDs():\n from ctypes import windll,c_ulong,byref,sizeof\n PIDs = (c_ulong*512)()\n size_of_PIDs = c_ulong()\n windll.psapi.EnumProcesses(byref(PIDs),sizeof(PIDs),byref(size_of_PIDs))\n nPIDs = size_of_PIDs.value/sizeof(c_ulong())\n pidProcess = sorted([int(i) for i in PIDs][:nPIDs])\n return pidProcess", "def get_process_id(name):\n child = subprocess.Popen(['pgrep', '-f', name], stdout=subprocess.PIPE, shell=False)\n response = child.communicate()[0]\n return [int(pid) for pid in response.split()]", "def find_all_child_processes(pids_only=False):\n children = psutil.Process().children(recursive=True)\n return [child.pid for child in children] if pids_only else children", "def get_similar_processes():\n myprocess = get_my_process()\n result = []\n for item in psutil.process_iter():\n try:\n if item.cmdline() == myprocess.cmdline():\n result.append(item)\n except psutil.NoSuchProcess:\n pass\n return result", "def pids():\n stream = os.popen(\"ps aux | grep '[m]itm' | awk '{print $2}'\")\n return stream.read()", "def get_running_unison_processes(self):\n # Get PIDs\n # Note: throws exception if no instances exist\n try:\n pids = str(subprocess.check_output([\"pidof\", '/usr/bin/unison']))\n\n # Parse command output into list by removing junk chars and exploding\n # string with space delimiter\n pids = pids[2:-3].split(' ')\n\n except subprocess.CalledProcessError:\n # If error caught here, no unison instances are found running\n pids = []\n\n self.logger.debug(\n \"Found \" + str(len(pids)) + \" running instances on this system: PIDs \" +\n \", \".join(pids)\n )\n\n # Return, after converting to ints\n return list(map(int, pids))", "def setChildPIDs(self):\n\t\tpids = []\n\t\tfor proc in process_iter():\n\t\t\tfor child in self.expectedChildren:\n\t\t\t\tif child == proc.name():\n\t\t\t\t\tif proc.parent().name() == \"Python\": # Hardcoded string comparison. Sue me.\n\t\t\t\t\t\tpids.append(proc.pid)\n\t\tself.pids = pids", "def get_process(proc_name):\n #LOG = log.getLogger(__name__)\n procList = []\n try:\n for pr in psutil.process_iter():\n for args in pr.cmdline():\n if proc_name in args:\n procList.append(pr.pid)\n return procList\n except BaseException as e:\n print(\"Error in fetching process: {}\".format(e))\n return None", "def get_pids(pid):\n\n pids=set([pid])\n for child in get_children(pid):\n pids.update(traverse_tree(child,pids))\n \n return list(pids)", "def get_processes_running():\r\n p = [] #array of processes\r\n if platform == \"linux\" or platform == \"linux2\":\r\n for proc in psutil.process_iter():\r\n try:\r\n tmp=Process(proc.name(),int(proc.pid),proc.username(),int(0),int(0))\r\n p.append(tmp)\r\n except:\r\n continue\r\n return (p)\r\n\t\t\t\r\n tasks = check_output(['tasklist']).decode('cp866', 'ignore').split(\"\\r\\n\")\r\n for task in tasks:\r\n m = re.match(b'(.*?)\\\\s+(\\\\d+)\\\\s+(\\\\w+)\\\\s+(\\\\w+)\\\\s+(.*?)\\\\s.*', task.encode())\r\n if m is not None:\r\n tmp=Process(m.group(1).decode(),int(m.group(2).decode()),m.group(3).decode(),int(m.group(4).decode()),int(m.group(5).decode('ascii', 'ignore')))\r\n p.append(tmp)\r\n #m.group(1).decode() image name\r\n #m.group(2).decode() process id\r\n #m.group(3).decode() session_name\r\n #m.group(4).decode() session_num\r\n #m.group(5).decode('ascii', 'ignore') memory usage\r\n return(p)", "def get_children(pid):\n try:\n stdout=subprocess.check_output([\"ps\",\"--ppid\",pid,\"-o\",\"pid\"])\n except subprocess.CalledProcessError:\n stdout=[]\n\n pids=[]\n if stdout:\n pids=process_ps_stdout(stdout)\n\n return pids", "def get_process_info(name):\n process_lst = list()\n all_pid = psutil.pids()\n for pid in all_pid:\n info = psutil.Process(pid)\n if name in info.name():\n process_lst.append(info)\n\n return process_lst", "def matching(self, pids):\n for pid in pids:\n if self.matches(pid):\n yield pid", "def get_running_processes(self):\n\n all_processes = []\n for _process in self.processes:\n all_processes.append(_process[\"pid\"])\n return all_processes", "def getActiveProcesses():\n active = []\n\n for p in PROCESSRUNNER_PROCESSES:\n if p.is_alive():\n active.append(p)\n\n return active", "def _find_running_exe(exe):\n candidates = []\n exe = path.abspath(exe)\n for proc in _get_process_list():\n try:\n pinfo = proc.as_dict(attrs=['pid', 'exe'])\n except psutil.NoSuchProcess:\n pass\n else:\n if pinfo[\"exe\"] and pinfo['exe'] == exe:\n candidates.append(pinfo['pid'])\n return candidates", "def findProcessIdByName(processName):\n listOfProcessObjects = []\n # Iterate over the all the running process\n for proc in psutil.process_iter():\n try:\n pinfo = proc.as_dict(attrs=[\"pid\", \"name\", \"create_time\"])\n # Check if process name contains the given name string.\n if processName.lower() in pinfo[\"name\"].lower():\n listOfProcessObjects.append(pinfo)\n except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):\n pass\n return listOfProcessObjects", "def pids(self):\n resp = self.server.request(\"get\", \"/jobs/%s/%s/pids\" % (\n self.sessionid, self.name))\n result = self.server.json_body(resp)\n return result['pids']", "def get_current_server_pidfiles_and_ports():\r\n pidfile_dir = ReportingServerManager._get_pidfile_dir()\r\n # There should only be one pidfile, but there may be errors/race conditions where\r\n # there are multiple of them.\r\n pidfile_names = os.listdir(pidfile_dir) if os.path.exists(pidfile_dir) else []\r\n ret = []\r\n for pidfile_name in pidfile_names:\r\n m = re.match(r'port_(\\d+)\\.pid', pidfile_name)\r\n if m is not None:\r\n ret.append((os.path.join(pidfile_dir, pidfile_name), int(m.group(1))))\r\n return ret", "def get_process_list() -> Dict:\n return {proc.pid: proc.name() for proc in psutil.process_iter()}", "def fetch_process_queries(self):\n url = \"/api/investigate/v1/orgs/{}/processes/search_jobs\".format(\n self.credentials.org_key\n )\n ids = self.get_object(url)\n return ids.get(\"query_ids\", [])", "def GetWith(cls, expression, compare=(lambda a, b: fnmatch.fnmatch(a, b))):\n\t\tres = []\n\t\texpression = \"*\" + expression + \"*\"\n\t\tfor pid, cmdline in cls.List().items():\n\t\t\tif compare(cmdline, expression):\n\t\t\t\tres.append(pid)\n\t\treturn res", "def getChildPIDs(self):\n\t\treturn self.pids", "def list_processes(pid, name):\n \n if not pid and not name:\n rc, out, err = j.sal.process.execute(\"ps ax\")\n click.echo(out)\n elif name:\n click.echo(j.sal.process.psfind(name))\n elif pid:\n click.echo(j.sal.process.getProcessPid(pid))", "def get_user_processes(user):\n result = []\n for process in psutil.process_iter():\n if process.username() == user:\n result.append(process.pid)\n return result", "def running_processes(self):\n return [process for process in self.processes.values()\n if process.running_on(self.address_name)]", "def get_process_cmdline(process_name):\n\n\tfor pretendant in execute(['ps', '-U', 'root', '-u', 'root', '-o', 'args='])[0].split(\n\t\t\t\"\\n\")[:-1]:\n\t\t#print pretendant\n\t\tif pretendant.find(process_name) != -1:\n\t\t\treturn pretendant.split(' ')", "def get_processes():\n cmd = 'ps -do pid:1,cmd' # linux command to run\n processes = {}\n\n with os.popen(cmd) as out:\n # see https://stackoverflow.com/questions/24362007/\n next(out.__iter__()) # skip header (first line)\n\n for line in out:\n # sepate pid and command in a tuple\n p = line.rstrip('\\n').split(' ', 2)\n\n # skip kernel threads\n if p[1][0] == '[':\n continue\n\n processes[p[0]] = p[1]\n\n return processes", "def running_procs(self) -> List[int]:\n return [p.model_id for p in self.primary_scheduler.queue_nodes.run_q]", "def get_pid_list(disallowed_prefixes, allowed_prefixes):\n # exceptions\n but = disallowed_prefixes if disallowed_prefixes is not None else []\n if allowed_prefixes is None:\n # if nothing setted - all ps will be returned except setted\n result = [pid\n for pid in os.listdir('/proc')\n if pid.isdigit() and pid not in but]\n else:\n result = []\n for pid in os.listdir('/proc'):\n if pid.isdigit() and pid not in but:\n name = get_pid_name(pid)\n if pid in allowed_prefixes or \\\n any(name.startswith(val) for val in allowed_prefixes):\n print name\n # this is allowed pid?\n result.append(pid)\n return result", "def _search_multiprocessing(self):\n pool = multiprocessing.Pool(self._main_args_.n_jobs)\n _cand_list = pool.map(self._search, self._main_args_._n_process_range)\n\n return _cand_list", "def List(cls):\n\t\tres = {}\n\t\tfor p in glob.glob(\"/proc/*/cmdline\"):\n\t\t\tprocess = p.split(\"/\")[2]\n\t\t\tif cls.RE_PID.match(process):\n\t\t\t\tres[int(process)] = cat(p).replace(\"\\x00\", \" \")\n\t\treturn res", "def get_process_mapping():\n with open('/proc/{0}/stat'.format(os.getpid())) as f:\n self_tty = f.read().split()[STAT_TTY]\n processes = {}\n for pid in os.listdir('/proc'):\n if not pid.isdigit():\n continue\n try:\n stat = '/proc/{0}/stat'.format(pid)\n cmdline = '/proc/{0}/cmdline'.format(pid)\n with open(stat) as fstat, open(cmdline) as fcmdline:\n stat = re.findall(r'\\(.+\\)|\\S+', fstat.read())\n cmd = fcmdline.read().split('\\x00')[:-1]\n ppid = stat[STAT_PPID]\n tty = stat[STAT_TTY]\n if tty == self_tty:\n processes[pid] = Process(\n args=tuple(cmd), pid=pid, ppid=ppid,\n )\n except IOError:\n # Process has disappeared - just ignore it.\n continue\n return processes", "def pidof(processname = None):\n processname = os.path.basename(processname)\n pidpath = os.path.join(pid_path,processname + \".pid\")\n if processname is not None and os.path.exists(pidpath):\n f = open (pidpath)\n pids = f.readlines()\n f.close()\n return pids\n else:\n return False", "def processes(self):\n # MODIFIED 11/1/16 OLD:\n return list(item.process for item in self.process_tuples)\n # # MODIFIED 11/1/16 NEW:\n # return sorted(list(item.process for item in self.process_tuples), key=lambda process: process.name)\n # MODIFIED 11/1/16 END", "def get_process_list(config):\n # get list of processes\n process_list = getlist(config.getstr('config', 'PROCESS_LIST'))\n\n out_process_list = []\n # for each item remove dashes, underscores, and cast to lower-case\n for process in process_list:\n # if instance is specified, extract the text inside parenthesis\n match = re.match(r'(.*)\\((.*)\\)', process)\n if match:\n instance = match.group(2)\n process_name = match.group(1)\n else:\n instance = None\n process_name = process\n\n wrapper_name = get_wrapper_name(process_name)\n if wrapper_name is None:\n config.logger.warning(f\"PROCESS_LIST item {process_name} \"\n \"may be invalid.\")\n wrapper_name = process_name\n\n # if MakePlots is in process list, remove it because\n # it will be called directly from StatAnalysis\n if wrapper_name == 'MakePlots':\n continue\n\n out_process_list.append((wrapper_name, instance))\n\n return out_process_list", "def get_pid_by_port(port):\n\n process = get_process_by_port(port)\n if process is None:\n return []\n return process.pid", "def get_pid_of_all_workers(containers):\n res = []\n for i in containers:\n if \"mongo\" not in i.name and (\"slave\" in i.name or \"master\" in i.name):\n print(i.name, file=sys.stdout)\n pid = i.attrs[\"State\"][\"Pid\"]\n res.append(pid)\n return res", "def get_processes(self):\n processes = {}\n # Get ps output\n cmd = [\"ps\", \"-Z\"]\n # Split by newlines and remove first line (\"LABEL USER PID PPID NAME\")\n # TODO: surround with try/except?\n psz = subprocess.check_output(self.shell + cmd).decode().split('\\n')[1:]\n for line in psz:\n line = line.strip(\"\\r\")\n if line:\n try:\n p = Process(line, self.android_version)\n except ValueError as e:\n self.log.warning(e)\n else:\n processes[p.pid] = p\n return processes", "def get_child_processes(self, ppid):\n\n all_children = []\n children_to_explore = set()\n for _pid in self.parent_to_children_map[ppid]:\n all_children.append(_pid)\n children_to_explore.add(_pid)\n\n # get the children 'recursively'\n while children_to_explore: # the invariant\n child_to_explore = children_to_explore.pop()\n if not self.parent_to_children_map.get(child_to_explore):\n continue\n unvisited = self.parent_to_children_map[child_to_explore]\n for node in unvisited:\n if node not in all_children:\n children_to_explore.add(node)\n all_children.append(node)\n return list(set(all_children))", "def get_pids_filtered_sorted(filterstr, sortkey=None):\n if sortkey is not None:\n cmd = \"ps aux --sort={sortkey} | grep '{filterstr}'\".format(filterstr=filterstr, sortkey=sortkey)\n else:\n cmd = \"ps ax | grep '{filterstr}'\".format(filterstr=filterstr)\n rc, out, err = execute(cmd)\n # print out\n found = []\n for line in out.split(\"\\n\"):\n if line.find(\"grep\") != -1 or line.strip() == \"\":\n continue\n if line.strip() != \"\":\n if line.find(filterstr) != -1:\n line = line.strip()\n if sortkey is not None:\n found.append(int([x for x in line.split(\" \") if x][1]))\n else:\n found.append(int(line.split(\" \")[0]))\n return found", "def detect_instance_pids(self):\n for instance in self.all_instances:\n instance.detect_pid(\n ppid=self.instance.pid,\n full_binary_path=self.cfg.real_sbin_dir,\n offset=0,\n )\n\n self.show_all_instances()\n self.detect_arangosh_instances(self.cfg, self.cfg.version)", "def _psa(cmd, allmatching=True, paths=None):\n import psutil\n pids = list()\n cmdlines = list()\n procs = list()\n cmdline = ''\n bins = _whicha(cmd, paths)\n if not allmatching:\n bins = bins[:1]\n for pid in psutil.pids():\n try:\n proc = psutil.Process(pid)\n cmdline = proc.cmdline()\n if any([bin in cmdline for bin in bins]):\n cmdlines.append(cmdline)\n pids.append(pid)\n procs.append(proc)\n except psutil.ZombieProcess:\n pass\n except psutil.AccessDenied:\n pass\n return (pids, cmdlines, procs)", "def processes(self):\n nodes = (self.nodes.exclude(process__isnull=True)\n .values_list('process_id', flat=True))\n return Process.objects.filter(id__in=nodes).distinct()", "def monitoredProcs(self):\n return self._pidToProcess.itervalues()", "def get_running_pris(self):\n try:\n running_pris_list = []\n output = self.ssh.exec_command(self.check_running_kombu_dialer_command)\n for line in output[1].readlines():\n line = line.split()\n if self.server in line and \"-g\" in line:\n running_pris_list.append(\n int(\n line[line.index(\"-g\")+1][2:]\n )\n )\n return running_pris_list\n except Exception as err:\n self.error_logger.error(err.message + \" PRITester::get_running_pris\")\n return None", "def ProcessIterator(pids, process_regex_string, cmdline_regex_string,\n ignore_grr_process, error_list):\n pids = set(pids)\n if ignore_grr_process:\n grr_pid = psutil.Process().pid\n else:\n grr_pid = -1\n\n if process_regex_string:\n process_regex = re.compile(process_regex_string)\n else:\n process_regex = None\n\n if cmdline_regex_string:\n cmdline_regex = re.compile(cmdline_regex_string)\n else:\n cmdline_regex = None\n\n if pids:\n process_iterator = []\n for pid in pids:\n try:\n process_iterator.append(psutil.Process(pid=pid))\n except Exception as e: # pylint: disable=broad-except\n error_list.Append(\n rdf_memory.ProcessMemoryError(\n process=rdf_client.Process(pid=pid), error=str(e)))\n else:\n process_iterator = psutil.process_iter()\n\n for p in process_iterator:\n if process_regex and not process_regex.search(p.name()):\n continue\n\n if cmdline_regex and not cmdline_regex.search(\" \".join(p.cmdline())):\n continue\n\n if p.pid == grr_pid:\n continue\n\n yield p", "def get_pids(extdir):\n\n pid_fnames = glob.glob(extdir + \"/*.pid\")\n\n pids = {}\n for fname in pid_fnames:\n try:\n # Get the pid\n with open(fname, \"r\") as fobj:\n pid = fobj.read().strip()\n pid = int(pid)\n\n # Check if process running\n os.kill(pid, 0)\n except (OSError, IOError, ValueError):\n continue\n\n service = os.path.basename(fname)\n service = service.split(\".\")[0]\n pids[service] = pid\n\n return pids", "def get_ceph_pids():\n pids = []\n for srv in get_srv_list():\n cfg = get_srv_config(srv)\n with open(cfg['pid_file'], 'r') as file_fd:\n pids.append((srv, int(file_fd.read())))\n return pids", "def list_java_processes():\n for line in shell_command_output('jps -l').splitlines():\n line = line.strip()\n if len(line) == 0:\n continue\n (pid, class_name) = line.split()\n yield (int(pid), class_name)", "def processor_ids(self):\n return self._processor_ids", "def get_children(ppid):\n\n pid_dct = {}\n for proc in build_process_list():\n proc[\"_children\"] = []\n pid_dct[proc[\"pid\"]] = proc\n\n # fill in children array\n for pid in list(pid_dct.keys()):\n parent_pid = pid_dct[pid][\"parent_pid\"]\n\n if parent_pid in pid_dct:\n pid_dct[parent_pid][\"_children\"].append(pid)\n\n # now just walk down the tree\n if ppid is None or ppid not in pid_dct:\n # process has quit, we exit\n return []\n\n accepted = []\n to_accept = collections.deque([ppid, ])\n \n while to_accept:\n head = pid_dct[to_accept.popleft()]\n\n # do not include the monitoring pid\n if head[\"pid\"] != ppid:\n accepted.append(head)\n\n to_accept.extend(head.get(\"_children\", []))\n head[\"children\"] = head[\"_children\"]\n del head[\"_children\"]\n\n # deleting children breaks infinite loops\n # but Dima, can a process tree contain a loop? yes - via race-condition in reading procfs\n\n return accepted", "def get_vid_pid_list(self):\n\n return self.vid_pid_s", "def get_processes():\n yield from psutil.process_iter()", "def get_match_pipe_ids(urls, arg):\n match_pipe_id_list = []\n for url in urls:\n pipelines = get_pipe_ids(url + \"pipelines\", arg).json()\n for pipeline in pipelines:\n if pipeline['status'] in STATUS_LIST:\n match_pipe_id_list.append(pipeline['id'])\n return match_pipe_id_list", "def pids(self):\r\n return copy(self._pids)", "def get_pid_of_all_slaves(containers):\n res = []\n for i in containers:\n if \"mongo\" not in i.name and \"slave\" in i.name:\n print(i.name, file=sys.stdout)\n pid = i.attrs[\"State\"][\"Pid\"]\n res.append(pid)\n return res", "def get_jobs_in_queue() -> List[int]:\n output = subprocess.check_output([\"qstat\"]).decode().splitlines()\n job_ids = []\n for line in output:\n m = REGEX_QSTAT.match(line)\n if m:\n job_ids.append(int(m.group(1)))\n return job_ids", "def get_process_pid(robot_name):\n\n try:\n result = check_output(['pgrep', 'x{0}'.format(robot_name)])\n return int(result.strip())\n except:\n return None", "def get_all_current_processes():\n p = subprocess.Popen(['ps', '-A'], stdout=subprocess.PIPE)\n out, err = p.communicate()\n return out", "def kill_process_by_name(re_pattern):\n\n user_name = os.getlogin()\n parent_pid = os.getppid()\n current_pid = os.getpid()\n\n stdin = subprocess.check_output([\"ps\", \"-u\", user_name])\n\n processes = []\n\n processes = [(int(re.match(\" *[0-9]+\", line).group()), line.split(' ')[-1]) for line in stdin.split('\\n')[1:-1]]\n\n for process in processes:\n\n if re.match(re_pattern, process[1]) and process[0] != current_pid:\n# print \"KILLING PID: \", process\n os.kill(process[0], signal.SIGKILL)", "def get_running():\n ps = which('/usr/bin/ps') # avoid the old BSD variant\n lines = sh(ps, '-e', '-f', quiet=True)\n # The first line of the `ps' output is a header line which is\n # used to find the data field columns.\n column = lines[0].index('CMD')\n procs = set()\n for line in lines[1:]:\n cmd_line = line[column:]\n command = cmd_line.split()[0]\n procs.add(os.path.basename(command))\n return procs", "def filter_process(self, *args, **kwargs):\n if 'uuid' in kwargs:\n kwargs['uuid'] = Process.strip_uuid(kwargs['uuid'])\n\n kwargs = {'process__{}'.format(k): v for k, v in kwargs.items()}\n\n trees = (ProcessNode.objects.filter(*args, **kwargs)\n .order_by('tree_id')\n .values_list('tree_id', flat=True)\n .distinct())\n return self.filter(process_tree__tree_id__in=trees)", "def relevant_processes(self):\n return set(self._required_processes.keys())", "def getProcs(**options):\n procSeq = search.ProcSearch.byOptions(**options).procs\n return [Proc(p) for p in procSeq.procs]", "def processes(self):\n return self._getint('processes')", "def get_processes(sort_by_name=True):\r\n if sort_by_name:\r\n return sorted(_list_processes(), key=cmp_to_key(\r\n lambda p1, p2: (cmp(p1.name, p2.name) or cmp(p1.pid, p2.pid))\r\n ))\r\n else:\r\n return sorted(_list_processes(), key=cmp_to_key(\r\n lambda p1, p2: (cmp(p1.pid, p2.pid) or cmp(p1.name, p2.name))\r\n ))", "def identify_processes(self) -> Dict[int, dict]:\n\n processes = {}\n\n for process in self.behavior[\"generic\"]:\n\n proc_name, proc_path = split_path(process[\"process_path\"])\n\n processes[int(process[\"pid\"])] = {\n FieldNames.PROCESS_IMAGE: proc_name,\n FieldNames.PROCESS_IMAGE_PATH: proc_path,\n FieldNames.PROCESS_ID: int(process[\"pid\"]),\n }\n\n return processes", "def get_open_fds(self):\n #By shaunc - http://stackoverflow.com/questions/2023608/check-what-files-are-open-in-python \n import subprocess\n import os\n \n pid = os.getpid()\n procs = subprocess.check_output( \n [ \"lsof\", '-w', '-Ff', \"-p\", str( pid ) ] )\n \n fprocs = filter(\n lambda s: s and s[ 0 ] == 'f' and s[1: ].isdigit(),\n procs.split( '\\n' ) \n )\n \n return fprocs", "def find(name, arg=None):\r\n for p in get_processes():\r\n if p.name.lower().find(name.lower()) != -1:\r\n if arg is not None:\r\n for a in (p.cmdline or []):\r\n if a.lower().find(arg.lower()) != -1:\r\n return p\r\n else:\r\n return p\r\n return None", "def job_ids(config):\n errcode, output = queue(config)\n parse_line = False\n current_sched = None\n ids = []\n if errcode != 0:\n logger.debug('queue command issued return code: %s', errcode)\n return ids\n\n for line in output.splitlines():\n line = line.strip()\n parse_line = parse_line and bool(line)\n if parse_line:\n assert current_sched\n ids.append( (current_sched, line.split()[0]) )\n continue\n\n if line.startswith('--'):\n current_sched = line.split()[2].strip()\n\n if line.startswith('ID'):\n parse_line = True\n\n logger.debug('found the following jobs in Condor queue: %s', ids)\n return ids", "def get_processes(self):\n processes={}\n for (server_ip, server_port) in self.hosts:\n try:\n server = xmlrpclib.ServerProxy(\"http://%s:%d\"%(server_ip, server_port))\n uid = server.get_id()\n if uid != self.uid:\n processes[uid] = server\n except socket.error:\n pass\n return processes", "def psa(line):\n from stefco.get_terminal_size import get_terminal_size\n import textwrap\n cmd, paths = _cmd_path_lex(line)\n pids, cmds, procs = _psa(cmd, allmatching=True, paths=paths)\n print(\"Matching processes:\\nPID\\tCOMMAND\\n\" + 80*\"~\" + \"\\n\\n\")\n procdict = dict()\n termwidth = get_terminal_size().columns\n for i, pid in enumerate(pids):\n procdict[pid] = procs[i]\n wrappedcmd = textwrap.wrap(str(cmds[i]), width=(termwidth - 8))\n # print pid on first line of command\n print(\"{}\\t{}\".format(pid, wrappedcmd.pop(0)))\n # print any remaining lines of the command\n if not len(wrappedcmd) == 0:\n print(\"\\t\" + \"\\n\\t\".join(wrappedcmd))\n # print an extra blank line after each process\n print(\"\")\n return procdict", "def getMatchIds(self):\n return sorted(self._matches.keys())", "def pfind(pid):\n for p in list_foreach(\"allproc\", \"p_list\"):\n if p['p_pid'].cast(gdb.lookup_type(\"int\")) == pid:\n return p\n raise gdb.error(\"No process with pid {} exists\".format(pid))", "def waiting_procs(self):\n return [p.model_id for p in self.primary_scheduler.queue_nodes.wait_q]", "def _determineProcessStatus(self, procs):\n beforePids = set(self._deviceStats.pids)\n afterPidToProcessStats = {}\n pStatsWArgsAndSums, pStatsWoArgs = self._splitPStatMatchers()\n for pid, (name, psargs) in procs:\n pStats = self._deviceStats._pidToProcess.get(pid)\n if pStats:\n # We saw the process before, so there's a good\n # chance that it's the same.\n if pStats.match(name, psargs):\n # Yep, it's the same process\n log.debug(\"Found process %d on %s, matching %s %s with MD5\",\n pid, pStats._config.name, name, psargs)\n log.debug(\"%s found existing stat %s %s for pid %s - using MD5\", self._devId, pStats._config.name,\n pStats._config.originalName, pid)\n afterPidToProcessStats[pid] = pStats\n continue\n\n elif pStats.match(name, psargs, useMd5Digest=False):\n # In this case, our raw SNMP data from the\n # remote agent got futzed\n # It's the same process. Yay!\n log.debug(\"%s - Found process %d on %s, matching %s %s without MD5\",\n self._devId, pid, pStats._config.name, name, psargs)\n afterPidToProcessStats[pid] = pStats\n continue\n\n # Search for the first match in our list of regexes\n # that have arguments AND an MD5-sum argument matching.\n # Explicitly *IGNORE* any matchers not modeled by zenmodeler\n for pStats in pStatsWArgsAndSums:\n if pStats.match(name, psargs):\n log.debug(\"%s Found process %d on %s %s\",\n self._devId, pid, pStats._config.originalName, pStats._config.name)\n afterPidToProcessStats[pid] = pStats\n break\n else:\n # Now look for the first match in our list of regexes\n # that don't have arguments.\n for pStats in pStatsWoArgs:\n if pStats.match(name, psargs, useMd5Digest=False):\n log.debug(\"Found process %d on %s\",\n pid, pStats._config.name)\n afterPidToProcessStats[pid] = pStats\n break\n\n afterPids = set(afterPidToProcessStats)\n afterByConfig = reverseDict(afterPidToProcessStats)\n newPids = afterPids - beforePids\n deadPids = beforePids - afterPids\n\n restarted = {}\n for pid in deadPids:\n procStats = self._deviceStats._pidToProcess[pid]\n procStats.discardPid(pid)\n if procStats in afterByConfig:\n ZenProcessTask.RESTARTED += 1\n pConfig = procStats._config\n if pConfig.restart:\n restarted[procStats] = pConfig\n\n # Now that we've found all of the stragglers, check to see\n # what really is missing or not.\n missing = []\n for procStat in self._deviceStats.processStats:\n if procStat not in afterByConfig:\n missing.append(procStat._config)\n\n # For historical reasons, return the beforeByConfig\n beforeByConfig = reverseDict(self._deviceStats._pidToProcess)\n\n return (afterByConfig, afterPidToProcessStats,\n beforeByConfig, newPids, restarted, deadPids,\n missing)", "def get_pid(name, path=None):\n if name not in list_(limit=\"running\", path=path):\n raise CommandExecutionError(\n f\"Container {name} is not running, can't determine PID\"\n )\n info = __salt__[\"cmd.run\"](f\"lxc-info -n {name}\").split(\"\\n\")\n pid = [\n line.split(\":\")[1].strip()\n for line in info\n if re.match(r\"\\s*PID\", line) is not None\n ][0]\n return pid", "def get_jobs_by_process_id(self, process_id):\n\n jobs = list()\n for job in Job.objects.filter(process=process_id):\n jobs.append(job)\n return jobs", "def required_processes(self):\n return {name for name, flag in self._required_processes.items() if flag}", "def list_local_processes(self, process_type=''):\n if not process_type:\n return self.procs.values()\n\n return [p for p in self.procs.itervalues() if p.process_type == process_type]" ]
[ "0.7879332", "0.7805906", "0.7679771", "0.7294594", "0.70915145", "0.67127335", "0.6696709", "0.6683365", "0.6582418", "0.65806013", "0.64939845", "0.6404458", "0.63337654", "0.6324033", "0.6305915", "0.63057446", "0.6257686", "0.62433344", "0.6234016", "0.612377", "0.60830504", "0.6067506", "0.6035695", "0.60167396", "0.60003424", "0.5983646", "0.5939461", "0.59372294", "0.5909669", "0.59022146", "0.5901273", "0.58477527", "0.58059376", "0.5796053", "0.57902485", "0.5785101", "0.5769041", "0.57316786", "0.5698944", "0.5695813", "0.56757504", "0.5652571", "0.5652008", "0.5624965", "0.55662084", "0.5556187", "0.5538189", "0.5532997", "0.55294406", "0.55254215", "0.55222374", "0.5496858", "0.5482606", "0.54821336", "0.5481818", "0.5480126", "0.5470483", "0.5451948", "0.5434166", "0.54338086", "0.54192126", "0.54160815", "0.5379784", "0.5373439", "0.5366919", "0.5338634", "0.5316281", "0.5312912", "0.52864677", "0.5254893", "0.5245908", "0.523767", "0.52167094", "0.51952547", "0.518211", "0.51634496", "0.5162677", "0.51572984", "0.51299536", "0.510694", "0.50937057", "0.5093049", "0.50486046", "0.50442874", "0.5030152", "0.50048286", "0.49720463", "0.49713427", "0.4966555", "0.495602", "0.4953548", "0.49496907", "0.49312282", "0.49290976", "0.49290296", "0.4926062", "0.4919526", "0.4913593", "0.49114102", "0.49012968" ]
0.72393227
4
Set the PID of the process that was marked as $$TBD.
def set_pid(self, pid): # type: (int) -> None for i in range(len(self.__target_pids)): if self.__target_pids[i] == "$$TBD": self.__target_pids[i] = pid break
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def def_pid(self,pid):\n self.pid=int(pid)", "def pid(self, pid):\n\n self._pid = pid", "def pid(self, pid):\n\n self._pid = pid", "def _update_PID(self):\n self.pid = PID(p=self.paramP, i=self.paramI, d=self.paramD, setpoint=self.voltageSetpoint, memory=self.paramMemory)", "def set_hold():\n hold = request.params.get(\"hold\", 0) == \"true\"\n pid = request.params.get(\"pid\", 1, type=int)\n retval = RP_LIB.rp_PIDSetHold(pid, hold)\n if retval != 0:\n LOG.error(\"Failed to set PID internal state holding. Error code: %s\", ERROR_CODES[retval])", "def pid():\n return 0x0204", "def pid():\n return 0x0204", "def process_id(self, process_id):\n\n self._process_id = process_id", "def process_id(self, process_id):\n\n self._process_id = process_id", "def pid(self):", "def dynamic_pid(self):\n pass", "def test_missingPIDVariable(self):\n fakeEnvironment = self.initializeEnvironment(3, os.getpid())\n del fakeEnvironment['LISTEN_PID']\n sddaemon = ListenFDs.fromEnvironment(environ=fakeEnvironment)\n self.assertEqual([], sddaemon.inheritedDescriptors())", "def ppid(self):", "def cli_set_process_title():\n raise NotImplementedError()", "def setParentID(self, pid='0'):\n self.PUID = pid\n logger.debug('parentID set to: %s' % self.PID)", "def setInitialProcessUID(self, puid):\n\n self.p_uid = puid\n return", "def set_tid(self, tid):\n self.__tid = tid", "def try_set_process_name(self, name=None):\n if name is None:\n name = getattr(self, 'process_name', None)\n if name is None:\n return\n try:\n import setproctitle\n setproctitle.setproctitle(name)\n except (ImportError, AttributeError):\n pass", "async def setprob(self, ctx, problem_name=None):\n if problem_name:\n if not await problem_exists(ctx, problem_name):\n return\n current_problem[ctx.author.id] = problem_name\n if problem_name:\n await ctx.send('Problem successfully set.')\n else:\n await ctx.send('The bot will no longer check your submissions.')", "def free_pid():\n host, pid, tid = get_process_id()\n while True:\n # PIDs are often restricted to a small range. On Linux the range >32k is by default not used.\n pid = random.randint(33000, 65000)\n if not process_alive(host, pid, tid):\n return pid", "async def set_post_number(self, ctx: commands.Context, post_num: int = 0):\n await ctx.cfg_channel.current_post_num.set(post_num)\n await ctx.send(\"Current auto-post number has been set to {}\".format(post_num))\n await ctx.cfg_channel.last_post_time.set(0)", "def pid(self):\n\t\treturn self.__pid", "def __init__(self, pid):\n self.pid = pid\n self.refresh_code_ranges()", "def _kill(self) -> None:\n if not hasattr(self, \"proc\"):\n raise FuzzFrontendError(\"Attempted to kill non-running PID.\")\n\n self.proc.terminate()\n try:\n self.proc.wait(timeout=0.5)\n L.info(\"Fuzzer subprocess exited with `%d`\", self.proc.returncode)\n except subprocess.TimeoutExpired:\n raise FuzzFrontendError(\"Subprocess could not terminate in time\")\n\n self._on = False", "def set_mintty_title(title):\n\n is_mintty = False\n see('sys.platform')\n if 'win' in sys.platform.lower():\n try:\n p = subprocess.Popen(['ps'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n except Exception as e:\n parser.error('Error executing `ps`: {e!s}'.format(**locals()))\n (stdout, stderr) = p.communicate()\n stdout = stdout.decode('utf-8')\n stderr = stderr.decode('utf-8')\n rc = p.wait()\n log.info('ps: {rc}, {stdout!r}, {stderr!r}'.format(**locals()))\n lines = stdout.splitlines()\n see('lines')\n if lines:\n headings = lines[0].split()\n see('headings')\n processes = {}\n for line in lines[1:]:\n tokens = line.split()\n see('tokens')\n pid = tokens[0]\n if len(tokens) <= len(headings):\n see('pid')\n processes[pid] = {}\n for (pos, heading) in enumerate(headings):\n processes[pid][heading] = tokens[pos]\n see('processes')\n process = processes.get(str(os.getpid()))\n while True:\n see('process')\n if process:\n if process['COMMAND'] == '/usr/bin/mintty':\n is_mintty = True\n break\n else:\n process = processes.get(process['PPID'])\n else:\n break\n see('is_mintty')\n\n if is_mintty:\n log.info('changing mintty title to: {title!r}'.format(**locals()))\n sys.stdout.write('\\x1b]0;{title}\\x07'.format(**locals()))", "def pb_id(self, pb_id: str):\n # FIXME(BMo) instead of creating the object to check if the PB exists\n # use a method on PB List?\n # ProcessingBlock(pb_id)\n self.set_state(DevState.ON)\n self._pb_id = pb_id", "def set_pno(self, pno):\n self.__pno = pno", "def addSlavePid(self, pid):\n if self._logger is not None:\n self._logger.debug('Adding slave PID ' + str(pid))\n if not pid in self._all_processes_pid: # Make sure we don't add twice a PID\n self._all_processes_pid += [pid] # Add", "def test_003_pid(self):\n HEADING()\n pid = self.db.pid()\n print (pid)\n assert True", "def pid(self):\n return self.__pid", "def setPIDSourceType(self, pidSource: PIDSourceType) -> None:\n self.pidSource = ...", "def set_pid(self, kp=None, ki=None, kd=None):\n if kp is not None:\n self.k_p = kp\n if ki is not None:\n self.k_i = ki\n if kd is not None:\n self.k_d = kd\n\n self.reset_sum()", "def dunning_process_no(self, dunning_process_no):\n\n self._dunning_process_no = dunning_process_no", "def _kill_process(self, box_config):\n try:\n self.logger.info(f'kill: {box_config.process_name} {{')\n self.logger.info(f'target process pid={box_config.pid}')\n if box_config.pid and psutil.pid_exists(box_config.pid):\n p = psutil.Process(box_config.pid)\n p.kill()\n p.wait()\n box_config.pid = None\n self.bc_dao.update(box_config)\n remove_pid_file(box_config.process_name)\n except Exception:\n self.logger.error(f'Exception on killing: {box_config.process_name}', exc_info=True)\n finally:\n self.logger.info('}')", "def test_nonIntegerPIDVariable(self):\n fakeEnvironment = self.initializeEnvironment(3, \"hello, world\")\n sddaemon = ListenFDs.fromEnvironment(environ=fakeEnvironment)\n self.assertEqual([], sddaemon.inheritedDescriptors())", "def pid(self):\n return self._pid", "def pid(self):\n return self._pid", "def pid(self):\n return self._pid", "def get_pid(self):\n if self.status():\n file = open(os.path.join(self.data_dir, 'postmaster.pid'))\n pid = int(file.readline())\n return pid\n else:\n return None", "def setChildPIDs(self):\n\t\tpids = []\n\t\tfor proc in process_iter():\n\t\t\tfor child in self.expectedChildren:\n\t\t\t\tif child == proc.name():\n\t\t\t\t\tif proc.parent().name() == \"Python\": # Hardcoded string comparison. Sue me.\n\t\t\t\t\t\tpids.append(proc.pid)\n\t\tself.pids = pids", "def set_pid(self, pid, value):\n if type(value) in (list, tuple):\n value = \",\".join(map(hex, value))\n cmd = \"ATSET {}={}\\r\".format(pid, value)\n self.sendCMD(cmd)", "def pid(self):\n return self._process.pid", "def set_kd():\n kd = request.params.get(\"kd\", 0, type=int)\n pid = request.params.get(\"pid\", 1, type=int)\n retval = RP_LIB.rp_PIDSetKd(pid, ctypes.c_int(kd))\n if retval != 0:\n LOG.error(\"Failed to set PID Kd. Error code: %s\", ERROR_CODES[retval])\n LOG.info(\"Kd: %f\", kd)\n LOG.info(\"PID: %d\", pid)", "def process_kick_off(setting, script_dir, stage):\n print(\"Info: run started for %s.\"%script_dir)\n run_dir = setting['run_dir']\n _id = setting['_id']\n process = subprocess.Popen(\"/bin/bash %s\"%(script_dir), shell=True)\n process_pid = process.pid\n db = db_connector()\n db.run.update_one({'_id': _id},\n {\"$set\": {\"pid\": process_pid}})", "def pid(self):\n return self._get_process_id()", "def getmypid():\n raise NotImplementedError()", "def getPID(self):\r\n self._update('getPID')\r\n return self.supervisord.options.get_pid()", "def epid(self, epid):\n\n self._epid = epid", "def epid(self, epid):\n\n self._epid = epid", "def setPidx(self, pidx):\n self.keeper.setGbl(b\"pidx\", b\"%x\" % pidx)", "def set_name(name):\n\n\tassert ltrace(TRACE_PROCESS, u'| set_name({0})', (ST_NAME, name))\n\n\ttry:\n\t\timport ctypes\n\t\tctypes.cdll.LoadLibrary('libc.so.6').prctl(15, name + '\\0', 0, 0, 0)\n\n\texcept Exception, e:\n\t\tlogging.warning(_(u'Cannot set process name (was %s).') % e)", "def pid(self):\n\n return getpid() if self.__process is None else self.__process.pid", "def get_PID(self):\n return self.PID", "def pid(self):\n # type: () -> Optional[int]\n try:\n return self._process.pid # type: ignore # pylint: disable=no-member\n except:\n return None", "def set_goal(self, goal):\n self._pid_lock.acquire() # Acquire Lock\n self._goal = goal\n self._pid_lock.release() # Release Lock", "def set_pid(self, kp, ki, kd):\n for vehicle_id in self.vehicle_ids:\n self.frenets[vehicle_id].set_pid(kp, ki, kd)", "def _UpdateProcessingStatus(self, pid, process_status, used_memory):", "def put_pid(html):\n pid = 1\n while \"<p>\" in html:\n pttn = \"<p id=\\\"p\"+str(pid)+\"\\\">\"\n html = html.replace(\"<p>\", pttn, 1)\n pid += 1\n return html", "def setup_process_stats(pid):\n return psutil.Process(pid)", "def _set_processes(self, processes: int = 1):\n self.__processes = processes", "def set_kp():\n kp = request.params.get(\"kp\", 0, type=float)\n pid = request.params.get(\"pid\", 1, type=int)\n retval = RP_LIB.rp_PIDSetKp(pid, ctypes.c_float(kp))\n if retval != 0:\n LOG.error(\"Failed to set PID Kp. Error code: %s\", ERROR_CODES[retval])\n LOG.info(\"Kp: %f\", kp)\n LOG.info(\"PID: %d\", pid)", "def set_default_pids(config, gState):\n axes = dict(RADEC = \"raDec\", ROT = \"rot\", FOCUS = \"focus\", SCALE = \"scale\")\n for axis in config.options('PID'):\n axis = axes[axis.upper()]\n Kp, Ti_min, Ti_max, Td, Imax, nfilt = [float(v) for v in config.get('PID', axis).split()] \n gState.set_pid_defaults(axis, Kp=Kp, Ti_min=Ti_min, Ti_max=Ti_max, Td=Td, Imax=Imax, nfilt=int(nfilt))\n gState.pid[axis].setPID(Kp=Kp, Ti=Ti_min, Td=Td, Imax=Imax, nfilt=nfilt)", "def get_process_pid(robot_name):\n\n try:\n result = check_output(['pgrep', 'x{0}'.format(robot_name)])\n return int(result.strip())\n except:\n return None", "def setUp(self) -> None:\n self.pid = create_progress(total=1337, current=42)", "def setUp(self) -> None:\n self.pid = create_progress(total=1337, current=42)", "async def setprobdetails(self, ctx, problem_name, *, arg):\n if not await problem_exists(ctx, problem_name):\n return\n problems[problem_name].details = arg\n await ctx.send('Problem details set.')\n await write_problems()", "def checkPID(pid):\n\tif pid == 0:\t#If PID newly created return False\n\t\treturn False\n\ttry:\n\t\tos.kill(pid, 0)\n\texcept OSError:\n\t\treturn False\n\telse:\n\t\treturn True", "def kill_inference_process(VD):\n \n if VD['inference_process'] == None:\n VD['terminal_output'] += \"There is no active inference process.\\n\"\n else:\n VD['inference_process'] = kill_process(VD['inference_process'])\n VD['terminal_output'] += \"\\nKilled inference process.\\n\"", "def kill_process(self,PID):\n os.system(\"sudo kill {}\".format(PID))\n return True", "def setPTBR(self, PTBR_addr):\n self.PTBR = PTBR_addr", "def pid(self):\n if self.proc is None:\n return 0\n return self._pid()", "def set_setpoint():\n setpoint = request.params.get(\"setpoint\", 0, type=float)\n pid = request.params.get(\"pid\", 1, type=int)\n retval = RP_LIB.rp_PIDSetSetpoint(pid, ctypes.c_float(setpoint))\n if retval != 0:\n LOG.error(\"Failed to set PID setpoint. Error code: %s\", ERROR_CODES[retval])\n LOG.info(\"setpoint: %f\", setpoint)\n LOG.info(\"PID: %d\", pid)", "def set_replied_to(thread):\r\n conn = psycopg2.connect(DATABASE_URL)\r\n cur = conn.cursor()\r\n cur.execute(\"\"\"\r\n UPDATE threads\r\n SET replied_to=true\r\n WHERE id=%s\r\n \"\"\",\r\n (thread['id'],))\r\n \r\n conn.commit()\r\n cur.close()\r\n conn.close()", "def set_relock_enabled():\n relock_enabled = request.params.get(\"relock_enabled\", 0) == \"true\"\n pid = request.params.get(\"pid\", 1, type=int)\n retval = RP_LIB.rp_PIDSetRelock(pid, relock_enabled)\n if retval != 0:\n LOG.error(\"Failed to set PID relock enabled. Error code: %s\", ERROR_CODES[retval])", "def set_inverted():\n inverted = request.params.get(\"inverted\", 0) == \"true\"\n pid = request.params.get(\"pid\", 1, type=int)\n retval = RP_LIB.rp_PIDSetInverted(pid, inverted)\n if retval != 0:\n LOG.error(\"Failed to set PID feedback sign. Error code: %s\", ERROR_CODES[retval])", "def setTimingProcessName(self, string: str) -> None:\n ...", "def _set_task(self, task_idx):\n self.task_idx = task_idx", "def kill(pid):\n p = psutil.Process(pid)\n\n try:\n p.kill()\n except Exception:\n pass", "def pre_run(self):\n\n super(PidfileApp, self).pre_run()\n\n if self.verbose > 1:\n log.info(_(\"Creating pidfile %r ...\"), self.pidfile.filename)\n\n try:\n self.pidfile.create()\n except PidFileInUseError as e:\n self.handle_error(str(e), '', False)\n self.exit(2)\n except PidFileError as e:\n self.handle_error(str(e), '', False)\n self.exit(3)\n except Exception as e:\n self.handle_error(str(e), e.__class__.__name__, True)\n self.exit(5)", "def perform_arg_parser(self):\n\n super(PidfileApp, self).perform_arg_parser()\n\n pidfile = getattr(self.args, 'pidfile', None)\n if pidfile and (pidfile != self._default_pidfilename):\n log.debug(\n _(\"Setting pidfile to %r by commandline parameter.\"), pidfile)\n self._pidfilename = pidfile\n\n self._simulate = getattr(self.args, 'simulate', False)", "def pkill(process_name):\n try:\n killed = os.system('taskkill /im ' + process_name)\n except Exception:\n killed = 0\n return killed", "def _on_parent_process_kill(self):", "def perform_config(self):\n\n super(PidfileApp, self).perform_config()\n\n if ('general' in self.cfg and 'pidfile' in self.cfg['general']):\n # Not set by commandline, but set in configuration\n pidfile = to_str_or_bust(self.cfg['general']['pidfile'])\n if pidfile and (pidfile != self._default_pidfilename):\n log.debug(\n _(\"Setting pidfile to %r by configuration.\"), pidfile)\n self._pidfilename = pidfile", "def writePIDFile(self):\n pidFilePath = self.options.get(RunnerOptions.pidFilePath)\n if pidFilePath is not None:\n pid = getpid()\n pidFilePath.setContent(u\"{}\\n\".format(pid).encode(\"utf-8\"))", "def test_mismatchedPID(self):\n fakeEnvironment = self.initializeEnvironment(3, os.getpid() + 1)\n sddaemon = ListenFDs.fromEnvironment(environ=fakeEnvironment)\n self.assertEqual([], sddaemon.inheritedDescriptors())", "def exit_auto():\n # def_trackback()\n pid = os.getpid()\n logging.debug('Pid auto :' , pid)\n call(r'taskkill /pid %s /f' % pid)", "def set_regvar_cmt(*args):\n return _ida_frame.set_regvar_cmt(*args)", "def set_prompt(self, ps1=''):\n if not ps1:\n task = self.db.get_active_task()\n if task:\n ps1 = ('%s#%s' % (task['tname'], task['pname'])).encode('utf8')\n else:\n ps1 = self.bloody_prompt\n self.prompt = ('(%s)> ' % ps1)", "def test_different_pid(self):\n testcase = self.root.find('./testcase[@classname=\"support.PassingTest\"]')\n systemout = testcase.find('system-out')\n test_pid = systemout.text.replace('pid: ', '').replace('\\n', '')\n self.assertNotEqual(str(os.getpid()), test_pid)", "def write_PID(self):\n path = os.path.join(self.user_directory, \"tmp\")\n if not os.path.exists(path):\n os.makedirs(path)\n\n pid = os.getpid()\n pid_path = os.path.join(path, \"pid\")\n with open(pid_path, \"w\") as pid_file:\n pid_file.write(str(pid))", "def set_int_auto():\n int_auto = request.params.get(\"int_auto\", 0) == \"true\"\n pid = request.params.get(\"pid\", 1, type=int)\n retval = RP_LIB.rp_PIDSetResetWhenRailed(pid, int_auto)\n if retval != 0:\n LOG.error(\"Failed to set PID automatical integrator reset. Error code: %s\",\n ERROR_CODES[retval])", "def write_pid_file(pid_file):\n\tif pid_file is not None:\n\t\tassert ltrace(TRACE_PROCESS, u'| write_pid_file({0}) ↣ {1}',\n\t\t\t\t\t\t\t\t(ST_NAME, pid_file), (ST_UGID, os.getpid()))\n\n\t\twith open(pid_file, 'w') as f:\n\t\t\tf.write(\"%s\\n\" % os.getpid())", "def startd(pidfile=''):\n # do the UNIX double-fork magic, see Stevens' \"Advanced \n # Programming in the UNIX Environment\" for details (ISBN 0201563177)\n # http://code.activestate.com/recipes/66012/\n # CHITS SMS code from Bowei Du\n try:\n pid = os.fork()\n if pid > 0:\n log.info(\"Daemon PID %d\" % pid)\n sys.exit(0)\n except OSError, e:\n log.error(\"fork #1 failed: %d (%s)\" % (e.errno, e.strerror))\n sys.exit(1)\n\n os.chdir(\"/\")\n os.setsid()\n # os.umask(0)\n\n try:\n pid = os.fork()\n if pid > 0:\n # exit from second parent, print eventual PID before\n log.info(\"Daemon PID %d\" % pid)\n sys.exit(0)\n except OSError, e:\n log.error(\"fork #2 failed: %d (%s)\" % (e.errno, e.strerror))\n sys.exit(1)\n \n pid = os.getpid()\n pidfile = os.path.basename(pidfile)\n pidfile = os.path.join(PATH, 'log', pidfile)\n if not os.path.exists(pidfile):\n raise ConfigError(\"%s not found\" % pidfile)\n pf = file(pidfile,'r+')\n pf.write(\"%s\\n\" % pid)\n pf.close()\n \n return pid", "def set_draft(number):\n cmds = [github_cli, 'pr', 'ready', str(number), '--undo']\n\n with subprocess.Popen(cmds) as p:\n _, err = p.communicate()\n print(err)", "def writepid (processname = None, proc = None):\n processname = os.path.basename(processname)\n pidpath = os.path.join(pid_path,processname+\".pid\")\n\n if processname is not None and proc is not None:\n f = open (pidpath,\"a\")\n f.write(str(proc.pid)+'\\n')\n f.close()\n return True\n else:\n return False", "def penta_kills(self, penta_kills):\n\n self._penta_kills = penta_kills", "def pause(queue_id):\n r = update_queue_state(queue_id, 'pause')\n\n if r.status_code != 200:\n click.echo('Pause queue failed, please ensure input is correct.')\n else:\n click.echo(r.text)", "def kill(self):\n \n self.killSlavePids()", "def _DumpDebugPid(cls, log_level, pid):\n pid = str(pid)\n commands = (\n ('pstree', '-Apals', pid),\n ('lsof', '-p', pid),\n )\n for cmd in commands:\n cls._DebugRunCommand(cmd, debug_level=log_level, error_code_ok=True,\n log_output=True)\n\n stdin = '\\n'.join(['echo \\\\n>>> %s\\\\n\\n%s' % (x, x)\n for x in cls.GDB_COMMANDS])\n cmd = ('gdb', '--nx', '-q', '-p', pid, '-ex', 'set prompt',)\n cls._DebugRunCommand(cmd, debug_level=log_level, error_code_ok=True,\n log_output=True, input=stdin)", "def __init__(\n self, appname=None, pidfile=None, verbose=0, version=__version__,\n base_dir=None, use_stderr=False, initialized=False, usage=None,\n description=None, argparse_epilog=None, argparse_prefix_chars='-',\n env_prefix=None, cfg_dir=None, cfg_stem=None, cfg_encoding='utf8',\n cfg_spec=None, hide_default_config=False, need_config_file=False):\n\n self.pidfile = None\n \"\"\"\n @ivar: after initialisation the pidfile object to handle it.\n @type: PidFile\n \"\"\"\n\n self._default_pidfilename = pidfile\n \"\"\"\n @ivar: a default filename for a pidfile\n @type: str\n \"\"\"\n\n self._pidfilename = None\n \"\"\"\n @ivar: the resulting filename of the pidfile after evaluating\n configuration and commandline parameters\n @type: str\n \"\"\"\n\n self._simulate = False\n \"\"\"\n @ivar: simulation mode, nothing is really done\n @type: bool\n \"\"\"\n\n super(PidfileApp, self).__init__(\n appname=appname,\n verbose=verbose,\n version=version,\n base_dir=base_dir,\n use_stderr=use_stderr,\n initialized=False,\n usage=usage,\n description=description,\n argparse_epilog=argparse_epilog,\n argparse_prefix_chars=argparse_prefix_chars,\n env_prefix=env_prefix,\n cfg_dir=cfg_dir,\n cfg_stem=cfg_stem,\n cfg_encoding=cfg_encoding,\n cfg_spec=cfg_spec,\n hide_default_config=hide_default_config,\n need_config_file=need_config_file,\n )\n\n if not self.pidfilename:\n self._pidfilename = self._default_pidfilename\n if not os.path.isabs(self.pidfilename):\n self._pidfilename = os.path.join(self.base_dir, self.pidfilename)\n if self.verbose > 3:\n log.debug(_(\"Using pidfile: %r.\"), self.pidfilename)\n\n self._simulate = getattr(self.args, 'simulate', False)" ]
[ "0.6045698", "0.6005458", "0.6005458", "0.5414311", "0.5351061", "0.5235025", "0.5235025", "0.52202576", "0.52202576", "0.51928914", "0.51871926", "0.50754386", "0.5040368", "0.5033574", "0.49666995", "0.49178597", "0.49152836", "0.49008197", "0.4899533", "0.48833144", "0.4882286", "0.48132563", "0.48048356", "0.47864658", "0.4761764", "0.47297344", "0.47267908", "0.4726245", "0.47245863", "0.4705036", "0.46885264", "0.46882886", "0.46874124", "0.46566698", "0.46333504", "0.46291012", "0.46291012", "0.46291012", "0.4626826", "0.46261036", "0.46258542", "0.4618352", "0.45872608", "0.4576748", "0.45721048", "0.45596704", "0.45564795", "0.45533928", "0.45533928", "0.45514536", "0.45503014", "0.45481554", "0.45472786", "0.45407298", "0.4524165", "0.45069894", "0.45065632", "0.4505799", "0.44918796", "0.44918364", "0.44862747", "0.4483997", "0.44601578", "0.44492656", "0.44492656", "0.44374046", "0.4436916", "0.44289973", "0.44231233", "0.44210237", "0.44161287", "0.44024345", "0.43937507", "0.4384866", "0.43737876", "0.4367133", "0.43599418", "0.4359625", "0.43487236", "0.43395448", "0.43343312", "0.43340242", "0.43298957", "0.43268597", "0.4326468", "0.43107623", "0.4310138", "0.4306344", "0.43056124", "0.42896444", "0.4285192", "0.42814136", "0.42798227", "0.4278707", "0.4274218", "0.42739174", "0.42552906", "0.42549288", "0.42439386", "0.42374167" ]
0.79088074
0
Infer class scores from the input image. This function defines the networks architecture.
def inference(self, image_rgb): s = image_rgb.get_shape().as_list() with tf.name_scope('image-preprocessing'): MEAN = [103.939, 116.779, 123.68] assert s[1:] == [227, 227, 3] red, green, blue = tf.split(image_rgb, 3, 3) bgr = tf.concat([ blue - MEAN[0], green - MEAN[1], red - MEAN[2], ], 3) # 1st Layer: Conv (w ReLu) -> Pool -> Lrn conv1 = conv(bgr, 11, 11, 96, 4, 4, padding='VALID', name='conv1') pool1 = max_pool(conv1, 3, 3, 2, 2, padding='VALID', name='pool1') norm1 = lrn(pool1, 2, 2e-05, 0.75, name='norm1') # 2nd Layer: Conv (w ReLu) -> Pool -> Lrn conv2 = conv(norm1, 5, 5, 256, 1, 1, name='conv2', groups=2) pool2 = max_pool(conv2, 3, 3, 2, 2, padding='VALID', name='pool2') norm2 = lrn(pool2, 2, 2e-05, 0.75, name='norm2') # 3rd Layer: Conv (w ReLu) conv3 = conv(norm2, 3, 3, 384, 1, 1, name='conv3') # 4th Layer: Conv (w ReLu) conv4 = conv(conv3, 3, 3, 384, 1, 1, name='conv4', groups=2) # 5th Layer: Conv (w ReLu) -> Pool conv5 = conv(conv4, 3, 3, 256, 1, 1, name='conv5', groups=2) pool5 = max_pool(conv5, 3, 3, 2, 2, padding='VALID', name='pool5') # 6th Layer: Flatten -> FC (w ReLu) -> Dropout flattened = tf.reshape(pool5, [-1, 6*6*256]) fc6 = fc(flattened, 6*6*256, 4096, name='fc6') dropout6 = dropout(fc6, self.KEEP_PROB) # 7th Layer: FC (w ReLu) -> Dropout fc7 = fc(dropout6, 4096, 4096, name='fc7') dropout7 = dropout(fc7, self.KEEP_PROB) # 8th Layer: FC and return unscaled activations (for tf.nn.softmax_cross_entropy_with_logits) score_imagenet_classes = fc(dropout7, 4096, 1000, relu=False, name='fc8') # New score layer for the new task (network is shared up to this point) score_retrained = fc(dropout7, 4096, self.NUM_CLASSES, relu=False, name='fc8_new') return score_imagenet_classes, score_retrained
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def classify_images():\n\n # Load the desired image\n img_path = 'dataset/colorize_images/n02085782_919.jpg'\n img = image.load_img(img_path, target_size=(299, 299))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n\n model = InceptionV3(weights=\"imagenet\")\n preds = model.predict(x)\n # decode the results into a list of tuples (class, description, probability)\n # (one such list for each sample in the batch)\n print('Predicted:', decode_predictions(preds, top=3)[0])", "def classify(img, c_model):\n #global class_graph\n\n #img = load_img(im_path,target_size=(input_height, input_width))\n #img = img_to_array(img)\n im_size = 128\n # resize \n\n img = cv2.resize(img, (im_size,im_size))\n\n img = img.astype(\"float\") / 255.0\n img = np.expand_dims(img, axis=0)\n with class_graph.as_default():\n predictions = c_model.predict(img)[0]\n\n return predictions", "def apply(self, image):\n if isinstance(image, hyperread):\n image = image.image()\n\n reshape = False\n if len(image.shape) > 2:\n (n,m,k) = np.shape(image)\n image = np.reshape(image, (n*m, k))\n reshape = True\n\n if self.apply_preprocessing:\n image = self.preprocess_data_matrix(image)\n\n classes = self.random_forest.predict(image)\n\n if reshape:\n classes = np.reshape(classes, (n,m))\n\n if self.apply_postprocessing:\n classes = post_processing(classes, self.region_selection)\n\n return classes", "def get_classification(self, image):\n\n image_np_expanded = np.expand_dims(image, axis=0)\n\n # Perform network inference\n with self.detection_graph.as_default():\n (boxes, scores, classes, num) = self.sess.run(\n [self.detection_boxes, self.detection_scores,\n self.detection_classes, self.num_detections],\n feed_dict={self.image_tensor: image_np_expanded})\n\n boxes = np.squeeze(boxes)\n scores = np.squeeze(scores)\n classes = np.squeeze(classes).astype(np.int32)\n\n if self.RUNNING_ON_CARLA == True:\n for i in range(boxes.shape[0]):\n if scores is None or scores[i] > .05:\n if classes[i] == 10:\n classname = self.category_index[classes[i]]['name']\n print(classname, scores[i])\n\n # Extract image from best bounding box and pass through light classifier\n ymin, xmin, ymax, xmax = boxes[i]\n im_height, im_width, im_depth = image.shape\n (left, right, top, bottom) = (xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height)\n tf_image_cropped = image[int(top):int(bottom), int(left):int(right), :]\n\n PILImage = Image.fromarray(tf_image_cropped)\n resized_img = PILImage.resize((85, 256), Image.ANTIALIAS)\n image_np_resized = self.load_image_into_numpy_array(resized_img)\n x = np.expand_dims(image_np_resized, axis=0)\n x = np.vstack([x])\n\n #model = load_model('tf_classifier_1.h5')\n #model.compile(loss='categorical_crossentropy',\n # optimizer='adam',\n # metrics=['accuracy'])\n classes = self.keras_model.predict_classes(x, batch_size=1)\n print(classes)\n\n if classes[0] == 0:\n self.current_light = TrafficLight.GREEN\n elif classes[0] == 2:\n self.current_light = TrafficLight.YELLOW\n else:\n self.current_light = TrafficLight.RED\n\n break\n\n else:\n # Check the detections. If it has a good score\n # then set the current light to the detected label. The\n # first one is always the best (they are returned sorted \n # in score order).\n # Note that we have trained for 14 categories, including\n # left/right arrows etc. Here we are only looking for \n # standard red, yellow and green light and ignore others.\n for i in range(boxes.shape[0]):\n if scores is None or scores[i] > .05:\n classname = self.category_index[classes[i]]['name']\n print(classname, scores[i])\n\n if classname == 'Green':\n self.current_light = TrafficLight.GREEN\n elif classname == 'Yellow':\n self.current_light = TrafficLight.YELLOW\n elif classname == 'Red':\n self.current_light = TrafficLight.RED\n else:\n self.current_light = TrafficLight.UNKNOWN\n\n break\n\n return self.current_light", "def classify_images(net, images):\n prediction = net.predict(images) # predict takes any number of images, and formats them for the Caffe net automatically\n return prediction", "def get_classification(self, image):\n #TODO implement light color prediction\n \n with self.graph.as_default():\n img_expand = np.expand_dims(image, axis=0)\n start = datetime.now() #start = datetime.datetime.now() if import datetime\n (boxes, scores, classes, num_detections) = self.sess.run(\n [self.boxes, self.scores, self.classes, self.num_detections],\n feed_dict={self.image_tensor: img_expand}) \n end = datetime.now() #end = datetime.datetime.now()\n c = end - start\n #rospy.logwarn(\"tl_classifier - Image predicted in: {0} seconds\".format(c.total_seconds()))\n #print(c.total_seconds())\n\n boxes = np.squeeze(boxes)\n scores = np.squeeze(scores)\n classes = np.squeeze(classes).astype(np.int32)\n\n print('tl_classifier - CLASSES: 1=Green, 2=Red, 3=Yellow, 4=Unknown: ', classes[0])\n #print('tl_classifier - SCORES: ', scores[0])\n #print('tl_classifier - TrafficLight.GREEN: ', TrafficLight.GREEN) = 2 CLASSES: 1\n #print('tl_classifier - TrafficLight.RED: ', TrafficLight.RED) = 0 CLASSES: 2\n #print('tl_classifier - TrafficLight.YELLOW: ', TrafficLight.YELLOW) = 1 CLASSES: 3\n #print('tl_classifier - TrafficLight.UNKNOWN: ', TrafficLight.UNKNOWN) = 4 CLASSES: 4\n\n if scores[0] > self.threshold:\n if classes[0] == 1:\n print('GREEN')\n return TrafficLight.GREEN\n elif classes[0] == 2:\n print('RED')\n return TrafficLight.RED\n elif classes[0] == 3:\n print('YELLOW')\n return TrafficLight.YELLOW\n else:\n rospy.logwarn(\"Light: UNKNOWN\")\n\n \n return TrafficLight.UNKNOWN", "def infer(self, image_path: str = None):\n # Class labels\n labels = [\n \"airplane\",\n \"automobile\",\n \"bird\",\n \"cat\",\n \"deer\",\n \"dog\",\n \"frog\",\n \"horse\",\n \"ship\",\n \"truck\",\n ]\n # Retrieve the image\n img = np.array(\n tf.keras.preprocessing.image.load_img(image_path, color_mode=\"rgb\")\n )\n # Convert it into model suitable form\n img = 2.0 * tf.cast(tf.expand_dims(img, axis=0), tf.float32) / 255.0 - 1.0\n\n # Prepare the model object\n if self.train_mode == \"combined\":\n model = KM.Model(\n inputs=self.combined.input,\n outputs=self.combined.get_layer(\"logits\").output,\n )\n elif self.train_mode == \"classifier\":\n model = KM.Model(\n inputs=self.classifier.input,\n outputs=self.classifier.get_layer(\"logits\").output,\n )\n\n # Run through the model\n pred_logits = model.predict(img)\n\n # Split the logits from different levels\n pred_logits = tf.split(\n tf.expand_dims(pred_logits, axis=-1),\n num_or_size_splits=self.n_blocks,\n axis=1,\n )\n # Predicted label by taking an elementwise maximum across all layers\n pred_logits = tf.reduce_max(tf.concat(pred_logits, axis=2), axis=2)\n # Get pred labels\n pred_labels = tf.argmax(pred_logits, axis=-1)\n label = labels[pred_labels[0]]\n upper = \"_\" * (31 + len(label))\n lower = \"-\" * (31 + len(label))\n print(f\"{upper}\\nThis image belongs to '{label}' class.\\n{lower}\")", "def get_classification(self, image):\n\n # Convert image to PIL RGB image\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n # add a fourth batch dimension to array\n image = np.expand_dims(image, axis=0)\n\n ## Predict images class\n if image.shape==(1, self.img_height, self.img_width, self.img_channels):\n y_pred = self.model.predict(image)\n else:\n rospy.logwarn(\"tl_classifier: Wrong image shape: {},{},{},{}\".format(image.shape[0],image.shape[1],image.shape[2],image.shape[3]))\n return TrafficLight.UNKNOWN\n\n # Filter predictions\n confidence_threshold = 0.7\n y_pred_thresh = [y_pred[k][y_pred[k,:,1] > confidence_threshold] for k in range(y_pred.shape[0])]\n\n # Output predicted classes and scores\n #rospy.loginfo(\"tl_classifier: class conf xmin ymin xmax ymax\")\n \n # Filter classes prediction\n tl_pred_classes = y_pred_thresh[0][:,0]\n tl_pred_scores = y_pred_thresh[0][:,1]\n # Find classes that contains tl's\n tl_pred_classes = [cl for cl in tl_pred_classes if 1<=cl<=3]\n\n\n # Test light state (if prediction is not empty)\n if len(tl_pred_classes) > 0:\n if (tl_pred_classes[0]==1):\n tl_return = TrafficLight.GREEN\n rospy.loginfo(\"tl_classifier: Green detected, score {:.2f}\".format(tl_pred_scores[0]))\n elif (tl_pred_classes[0]==2):\n tl_return = TrafficLight.YELLOW\n rospy.loginfo(\"tl_classifier: Yellow detected, score {:.2f}\".format(tl_pred_scores[0]))\n elif (tl_pred_classes[0]==3):\n tl_return = TrafficLight.RED\n rospy.loginfo(\"tl_classifier: Red detected, score {:.2f}\".format(tl_pred_scores[0]))\n else:\n tl_return = TrafficLight.UNKNOWN\n rospy.loginfo(\"tl_classifier: Other class detected!\")\n else:\n tl_return = TrafficLight.UNKNOWN\n rospy.loginfo(\"tl_classifier: Unknown detected!\")\n\n\n return tl_return", "def get_classification(self, image):\n if self.correct_gamma:\n if self.gamma == 1.0:\n self.gamma = 0.6\n elif self.gamma == 0.6:\n self.gamma = 1.0\n image = self.adjust_gamma(image, self.gamma)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image_np = np.asarray(image, dtype=\"uint8\")\n image_np_expanded = np.expand_dims(image_np, axis=0)\n\n detected = False\n\n with self.detection_graph.as_default():\n (boxes, scores, classes, num) = self.sess.run(\n [self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],\n feed_dict={self.image_tensor: image_np_expanded})\n boxes = np.squeeze(boxes)\n classes = np.squeeze(classes).astype(np.int32)\n scores = np.squeeze(scores)\n best_scores = []\n\n for idx, classID in enumerate(classes):\n if self.MODEL_NAME == 'ssdlite_mobilenet_v2_coco_2018_05_09':\n if classID == 10: # 10 is traffic light\n if scores[idx] > 0.10: #confidence level\n best_scores.append([scores[idx], idx, classID])\n detected = True\n else: # we tuned the model to classify only traffic lights\n if scores[idx] > 0.10: # confidence level\n best_scores.append([scores[idx], idx, classID])\n detected = True\n\n tl_index = TrafficLight.UNKNOWN\n if detected:\n best_scores.sort(key=lambda tup: tup[0], reverse=True)\n\n best_score = best_scores[0]\n rospy.logdebug(\"number of TL found %d, best score: %f, color: %f\", len(best_scores), best_score[0], best_score[2])\n nbox = boxes[best_score[1]]\n\n height = image.shape[0]\n width = image.shape[1]\n\n box = np.array([nbox[0]*height, nbox[1]*width, nbox[2]*height, nbox[3]*width]).astype(int)\n box_height = box[2] - box[0]\n box_width = box[3] - box[1]\n ratio = float(box_height)/float(box_width)\n rospy.logdebug(\"ratio: %f\", ratio)\n if ratio >= 2.0 and ratio < 3.0: #started from 2.4\n tl_cropped = image[box[0]:box[2], box[1]:box[3]]\n tl_color, tl_index = self.get_color(tl_cropped)\n #color = ['RED', 'YELLOW', 'GREEN', 'UNKNOWN']\n #tl_index = best_score[2]\n #tl_color = color[tl_index]\n #augment image with detected TLs\n cv2.rectangle(image, (box[1], box[0]), (box[3], box[2]), (0, 255, 0), 2)\n font = cv2.FONT_HERSHEY_SIMPLEX\n font_color = (255, 255, 255)\n cv2.putText(image, tl_color, (box[1], box[0]), font, 2.0, font_color, lineType=cv2.LINE_AA)\n return image, tl_index", "def classify(self):\n infer = self.model.signatures['serving_default']\n for i, original_image in enumerate(self.images):\n image = original_image.copy()\n image = cv.cvtColor(image, cv.COLOR_BGR2RGB)\n image = cv.resize(image, (self.image_size, self.image_size))\n image = image / 255.\n\n image = [image]\n image = np.asarray(image).astype(np.float32)\n batch_data = tf.constant(image)\n pred_bbox = infer(batch_data)\n for key, value in pred_bbox.items():\n boxes = value[:, :, 0:4]\n pred_conf = value[:, :, 4:]\n\n boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(\n boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),\n scores=tf.reshape(\n pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),\n max_output_size_per_class=10,\n max_total_size=10,\n iou_threshold=FLAGS.iou,\n score_threshold=FLAGS.score\n )\n\n height, width, _ = original_image.shape\n\n print(scores)\n classes = classes[0]\n print(classes)\n\n bbox = boxes[0][0].numpy()\n bbox[0] = int(bbox[0] * height)\n bbox[2] = int(bbox[2] * height)\n bbox[1] = int(bbox[1] * width)\n bbox[3] = int(bbox[3] * width)\n\n if BIRD_CLASS in classes:\n idx = np.where(classes == BIRD_CLASS)\n bbox = bbox.astype(np.int)\n x = int((bbox[1] + bbox[3]) / 2)\n y = int((bbox[0] + bbox[2]) / 2)\n self.thumbnail_center.append((x, y))\n cropped_img = original_image[bbox[0]:bbox[2], bbox[1]: bbox[3]]\n self.bird_images.append(cropped_img)\n self.confidence_arr.append(scores[idx[0][0]][0])\n\n self.generate_thumbnail(size=150)", "def get_classification(self, image):\n start_time = time.time()\n # Runs inference on one image on the loaded graph\n with self.inference_graph.as_default():\n # Image is expanded to 4 dims - 1st dim batch size (=1)\n image_4d = np.expand_dims(image, axis=0)\n (boxes, scores, classes, num_det) = self.sess.run([self.d_boxes, self.d_scores, self.d_classes, self.num_d], feed_dict = {self.image_tensor: image_4d})\n\n end_time = time.time()\n\n rospy.logdebug(\"Time for classification: {0}s\".format(end_time - start_time))\n\n # Inference returns a (fixed) total of self.num_d detections - even those with low probabilities\n r_boxes = []\n r_scores = []\n r_classes = []\n idx = 0\n\n # If the highest score is below detection probability, there is no traffic light visible or not clear enough, return unknown\n if scores[0][0] < DETECTION_LIMIT:\n rospy.logdebug(\"No traffic light detected: UNKNOWN\")\n return TrafficLight.UNKNOWN\n\n # Keep all results above probability of DETECTION_LIMIT\n while (scores[0][idx] > DETECTION_LIMIT):\n r_scores.append(scores[0][idx])\n r_boxes.append(boxes[0][idx])\n r_classes.append(classes[0][idx])\n idx+=1\n\n # Classes for red, yellow and green lights\n red = 1\n yellow = 2\n green = 3\n yellow_or_not = False\n\n img_shape = image.shape\n height = img_shape[0]\n width = img_shape[1]\n\n # In simulator mode, feed each detection box to a color detector\n # and return the classification result\n if SIMULATOR_MODE:\n \n rectangles = [] # List of rectangles to cut out regions\n\n # Loops through all boundary boxes with found traffic lights and expands them to full image size (0..1 to 0..image_height and width)\n for idx, box in enumerate(r_boxes):\n box_norm = [int(box[0]*height), int(box[1]*width), int(box[2]*height), int(box[3]*width)]\n rectangles.append(box_norm)\n\n # Loops through all the boundary boxes and detects their dominant light color\n for rect in rectangles:\n crop_image = image[rect[0]:rect[2], rect[1]:rect[3]]\n classification = detect_color(crop_image)\n if classification == red:\n rospy.logdebug(\"Red traffic light detected\")\n return TrafficLight.RED\n elif classification == yellow:\n yellow_or_not = True\n\n if yellow_or_not:\n rospy.logdebug(\"Yellow traffic light detected\")\n return TrafficLight.YELLOW\n else:\n rospy.logdebug(\"Green traffic light detected\")\n return TrafficLight.GREEN\n\n # If not in simulator mode, use the detection result from the FRCNN classifier directly\n else:\n for classification in r_classes:\n if classification == red:\n rospy.logdebug(\"Red traffic light detected\")\n return TrafficLight.RED\n elif classification == yellow:\n yellow_or_not = True\n if yellow_or_not:\n rospy.logdebug(\"Yellow traffic light detected\")\n return TrafficLight.YELLOW\n else:\n rospy.logdebug(\"Green traffic light detected\")\n return TrafficLight.GREEN", "def predict(model, images):\n return model.predict_classes(images)", "def image_inference(self, model_name: str, input_data):\n exec_net, image_input, image_info_input, (n, c, h, w), postprocessor = self.model_loading.load_model(model_name)\n cap, visualizer, tracker, presenter = self.image_visualizer.visualizer(input_data,model_name)\n\n while cap.isOpened():\n ret, frame = cap.read()\n if not ret:\n break\n # Resize the image to keep the same aspect ratio and to fit it to a window of a target size.\n scale_x = scale_y = min(h / frame.shape[0], w / frame.shape[1])\n input_image = cv2.resize(frame, None, fx=scale_x, fy=scale_y)\n\n input_image_size = input_image.shape[:2]\n input_image = np.pad(input_image, ((0, h - input_image_size[0]),\n (0, w - input_image_size[1]),\n (0, 0)),\n mode='constant', constant_values=0)\n # Change data layout from HWC to CHW.\n input_image = input_image.transpose((2, 0, 1))\n input_image = input_image.reshape((n, c, h, w)).astype(np.float32)\n input_image_info = np.asarray([[input_image_size[0], input_image_size[1], 1]], dtype=np.float32)\n # Run the net.\n feed_dict = {image_input: input_image}\n if image_info_input:\n feed_dict[image_info_input] = input_image_info\n outputs = exec_net.infer(feed_dict)\n # Parse detection results of the current request\n scores, classes, boxes, masks = postprocessor(\n outputs, scale_x, scale_y, *frame.shape[:2], h, w, 0.5)\n os.remove(input_data.filename)\n class_labels = self.fetch_labels.get_labels(model_name)\n\n t = 0\n for key2 in [class_labels[i] for i in classes]:\n x1 = str(boxes[t][0])\n y1 = str(boxes[t][1])\n x2 = str(boxes[t][2])\n y2 = str(boxes[t][3])\n\n if key2 in self.prediction.keys():\n value_init = self.prediction.get(key2)\n self.prediction[key2] = x1, y1, x2, y2\n value = value_init, self.prediction.get(key2)\n self.prediction[key2] = value\n\n else:\n self.prediction[key2] = x1, y1, x2, y2\n\n t = t + 1\n\n with open('./final_json.json', 'w') as file:\n json.dump(self.prediction, file)\n\n with open('./final_json.json','r') as file:\n json_object = json.load(file)\n\n return json_object\n cv2.destroyAllWindows()\n cap.release()", "def get_classification(self, image):\n #TODO implement light color prediction\n max_idx = 4\n with self.detection_graph.as_default():\n with tf.Session(graph=self.detection_graph) as sess:\n # Definite input and output Tensors for detection_graph\n image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')\n \n # Each box represents a part of the image where a particular object was detected.\n detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')\n \n # Each score represent how level of confidence for each of the objects.\n # Score is shown on the result image, together with the class label.\n detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')\n detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')\n num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')\n # Expand dimensions since the model expects images to have shape: [1, None, None, 3]\n image_np_expanded = np.expand_dims(image, axis=0)\n # Actual detection.\n (boxes, scores, classes, num) = sess.run(\n [detection_boxes, detection_scores, detection_classes, num_detections],\n feed_dict={image_tensor: image_np_expanded})\n\n boxes = np.squeeze(boxes)\n scores = np.squeeze(scores)\n classes = np.squeeze(classes).astype(np.int32)\n min_score_thresh = .50\n # find majority light state\n counter = [0, 0, 0, 0, 0]\n for i in range(boxes.shape[0]):\n if scores is None or scores[i] > min_score_thresh:\n counter[classes[i]] += 1\n for i in range(1, 5):\n if counter[i] > counter[max_idx]:\n max_idx = i\n return self.classmap[max_idx]", "def get_classification(self, image, wp = 0):\n\n cv2_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) \n input_image = np.expand_dims(cv2_image, axis=0)\n (boxes, scores, classes) = self.sess.run([self.boxes, self.scores, self.classes], \n feed_dict={self.image_tensor: input_image})\n\n prediction = 4\n min_score_thresh=.6\n sq_boxes = np.squeeze(boxes)\n sq_classes = np.squeeze(classes).astype(np.int32)\n sq_scores = np.squeeze(scores)\n\n for i in range(sq_boxes.shape[0]):\n if sq_scores is None or sq_scores[i] > min_score_thresh:\n prediction = sq_classes[i]\n min_score_thresh = sq_scores[i]\n print(\"Found traffic light: {i:%d prediction:%s pred_score:%.4f}\"%(i, prediction, sq_scores[i]))\n \n if prediction == 1:\n return TrafficLight.RED\n elif prediction == 2:\n return TrafficLight.YELLOW\n elif prediction == 3:\n return TrafficLight.GREEN\n return TrafficLight.UNKNOWN", "def infer_classes(png_fname):\n # 1 as speech\n \n from fastai.vision.image import open_image\n classes = model_classes.predict(open_image(png_fname))\n\n return classes", "def get_classification(self, image):\n\n rospy.logdebug('tl_classifier.get_classification() called')\n\n if self._detection_graph.as_default() == None:\n rospy.logerror('Error: self._detection_graph.as_default() is None')\n return TrafficLight.UNKNOWN\n\n # Preprocess the image\n if self.mode == \"SIM\":\n cv_image = self.bridge.imgmsg_to_cv2(image, \"bgr8\")\n cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)\n rospy.logdebug('Converting image from BGR to RGB in SIM mode')\n else:\n cv_image = self.bridge.imgmsg_to_cv2(image, \"bgr8\")\n cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)\n rospy.logdebug('Converting image from BGR to RGB in CALRA mode')\n pass\n\n with self._detection_graph.as_default():\n\n # Expand dimensions since the model expects\n # images to have shape: [1, None, None, 3]\n image_np_expanded = np.expand_dims(cv_image, axis=0)\n image_tensor = self._detection_graph.get_tensor_by_name('image_tensor:0')\n # Get bounding boxes for each object detection\n boxes = self._detection_graph.get_tensor_by_name('detection_boxes:0')\n # Get confidence scores\n scores = self._detection_graph.get_tensor_by_name('detection_scores:0')\n classes = self._detection_graph.get_tensor_by_name('detection_classes:0')\n num_detections = self._detection_graph.get_tensor_by_name('num_detections:0')\n\n rospy.logdebug('Starting image detection...')\n start = time.time()\n\n # Feed dictionary and start tensorflow session for detection\n (boxes, scores, classes, num_detections) = self.sess.run(\n [boxes, scores, classes, num_detections],\n feed_dict={image_tensor: image_np_expanded})\n\n rospy.logdebug('Ending image detection...')\n end = time.time()\n rospy.logdebug('Time needed for detection in milliseconds: %s' , int(round((end-start)*1000,0)))\n\n\n # Finally process the detection results\n boxes = np.squeeze(boxes)\n scores = np.squeeze(scores)\n classes = np.squeeze(classes).astype(np.int32)\n\n # Annotate the image and publish as topic /tl_classifier/image_raw/compressed\n vis_util.visualize_boxes_and_labels_on_image_array(\n cv_image, boxes, classes, scores,\n self.category_index,\n use_normalized_coordinates=True,\n line_thickness=6)\n\n # Create CompressedIamge #\n msg = CompressedImage()\n msg.header.stamp = rospy.Time.now()\n msg.format = \"jpeg\"\n msg.data = np.array(cv2.imencode('.jpg', cv_image)[1]).tostring()\n # Publish new image\n self.publish_trafficlight_image.publish(msg)\n\n\n # loop through all bounding boxes which have been found\n for i in range(boxes.shape[0]):\n # only loop through bounding boxes which score is higher\n # than the minimal threshold MIN_DETECTION_PROB_THRESHOLD\n if scores is not None and scores[i] > MIN_DETECTION_PROB_THRESHOLD:\n class_name = self.category_index[classes[i]]['name']\n perceived_width_x = (boxes[i][3] - boxes[i][1])*800\n perceived_width_y = (boxes[i][2] - boxes[i][0])*600\n diagonal = math.sqrt(perceived_width_x*perceived_width_y)\n rospy.logdebug('TL_Classifier: Color = %s , Probability = %s' , class_name , round(scores[i],2))\n rospy.logdebug('TL_Classifier: Diagonal of Bounding box = %s' , round(diagonal,0))\n\n\n # immediately return the detection with the highest score\n # other detections are ignored\n if class_name == 'Red':\n return TrafficLight.RED\n elif class_name == 'Yellow':\n return TrafficLight.YELLOW\n elif class_name == 'Green':\n return TrafficLight.GREEN\n\n\n rospy.logdebug('No detection results found...')\n\n return TrafficLight.UNKNOWN", "def predict(image_data):\n PAYLOAD = {}\n PAYLOAD[\"timestamp\"] = str(datetime.now())\n PAYLOAD[\"inference-type\"] = \"image-classification\"\n PAYLOAD[\"inference-description\"] = \"Top {} predictions with score {} or above \".format(\n config_utils.MAX_NO_OF_RESULTS, config_utils.SCORE_THRESHOLD\n )\n PAYLOAD[\"inference-results\"] = []\n\n try:\n # Run DLR to perform inference with DLC optimized model\n model_output = dlr_model.run(image_data)\n config_utils.logger.info(\"pred shape: '{}'.\".format(model_output[0][0].shape)) \n probabilities = softmax(model_output[0][0])\n config_utils.logger.info(\"pred shape softmax: '{}'.\".format(probabilities.shape)) \n sort_classes_by_probability = argsort(probabilities)[::-1]\n\n config_utils.logger.info(\"pred classes: '{}'.\".format(sort_classes_by_probability[: config_utils.MAX_NO_OF_RESULTS])) \n\n for i in sort_classes_by_probability[: config_utils.MAX_NO_OF_RESULTS]:\n if probabilities[i] >= config_utils.SCORE_THRESHOLD:\n result = {\"Label\": str(synset[i]), \"Score\": str(probabilities[i])}\n PAYLOAD[\"inference-results\"].append(result)\n\n config_utils.logger.info(dumps(PAYLOAD))\n\n if config_utils.TOPIC.strip() != \"\":\n ipc_utils.IPCUtils().publish_results_to_cloud(PAYLOAD)\n else:\n config_utils.logger.info(\"No topic set to publish the inference results to the cloud.\")\n\n except Exception as e:\n config_utils.logger.error(\"Exception occured during prediction: {}\".format(e))", "def get_classification(self, image):\n\n\tif 'session' in locals() and session is not None:\n \t print('Close interactive session')\n session.close()\n\n time_start = time.time()\n #TODO implement light color prediction\n #image_np = self.__preprocess_image(image)\n \timage_np = image \n \n \t# Expand dimensions since the model expects images to have shape: [1, None, None, 3]\n image_np_expanded = np.expand_dims(image_np, axis=0)\n time0 = time.time()\n\n # Actual detection.\n with self.detection_graph.as_default():\n (boxes, scores, classes, num) = self.sess.run(\n [self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],\n feed_dict={self.image_tensor: image_np_expanded})\n\n time1 = time.time()\n\n output = self.__postprocessing_detected_box(scores[0], classes[0])\n rospy.loginfo('Time in seconds' + str(time1-time_start)+' Result:'+self.__traffic_id_to_name(output))\n return output", "def classify(neural_net, image_file):\n\timg = Image.open(image_file)\n\timg.load()\n\timg_array = np.asarray(img)\n\timg_array.shape = (1, 100, 100, 3)\n\n\tprediction = model.predict(img_array)[0][0]\n\treturn prediction", "def classify(\n self, image_path: Optional[str] = None, image_data: Optional[bytes] = None\n ):\n\n tensor = None\n if image_path:\n tensor = self.__load_image(image_path=image_path)\n elif image_data:\n tensor = self.__load_image_from_bytes(image_data=image_data)\n if tensor is None:\n raise Exception(\"Please provide image path or data of your image!\")\n\n output = self.model(self.__batch_data(tensor))\n predicted = torch.argmax(output)\n classes = constant.classes\n prediction_class = classes[int(predicted.item())]\n return prediction_class", "def inference(self, image, score_threshold=None):\n h, w, c = image.shape\n image_batch = np.expand_dims(image, axis=0)\n # get operators from graph\n image_tensor = self.graph.get_tensor_by_name('image_tensor:0')\n detection_boxes = self.graph.get_tensor_by_name('detection_boxes:0')\n detection_scores = self.graph.get_tensor_by_name('detection_scores:0')\n detection_classes = self.graph.get_tensor_by_name('detection_classes:0')\n num_detections = self.graph.get_tensor_by_name('num_detections:0')\n # run inference\n with self.graph.as_default():\n t0 = datetime.now()\n (boxes, scores, classes, num) = self.sess.run(\n [detection_boxes, detection_scores, detection_classes, num_detections],\n feed_dict={image_tensor: image_batch})\n t1 = datetime.now()\n num = int(num)\n self._log_info('*TF Detection*: {}'.format(get_tdiff(t0, t1)))\n # post processing ...\n # purge useless dimension \n boxes, scores, classes = np.squeeze(boxes), np.squeeze(scores), np.squeeze(classes)\n # take only valid results\n boxes, scores, classes = boxes[:num,:], scores[:num], classes[:num]\n # score threshold\n if score_threshold is None:\n score_threshold = self.score_threshold\n boxes = boxes[scores>score_threshold,:]\n classes = classes[scores>score_threshold]\n scores = scores[scores>score_threshold]\n num = scores.shape[0]\n self._log_info('{} objects found'.format(num))\n # x-y reorder\n boxes = boxes[:,np.array([1,0,3,2])]\n # transform from 0-1 to 0-w and 0-h\n boxes = np.multiply(boxes, np.array([w,h,w,h])).astype(np.int32)\n return boxes, scores, classes", "def build(self, input_image, num_class):\n x = build_resnet(101)\n # add classifier\n x = Conv2D(num_class, (1, 1), kernel_initializer='he_normal', activation='linear', padding='valid', strides=(1, 1), kernel_regularizer=l2(weight_decay))(x)", "def vr_http_classify(self, img):\n\n img = cv2.resize(img, (self.Helpers.confs[\"cnn\"][\"data\"][\"dim\"], \n self.Helpers.confs[\"cnn\"][\"data\"][\"dim\"]))\n img = self.reshape(img)\n \n return self.get_predictions(img)", "def get_classification(self, image):\n # Run inference on image\n prediction = None\n prediction = inferOnImage(self.sess, self.model_logits, self.X, image)\n\n # Convert number into label just for debug\n prediction_label = None\n if prediction[0] == 0:\n prediction_label = \"RED\"\n elif prediction[0] == 1:\n prediction_label = \"GREEN\"\n elif prediction[0] == 2:\n prediction_label = \"NOLIGHT\"\n\n # Log the message\n rospy.loginfo(\"The label returned is %s\", prediction_label)\n\n # Return Unknown for now\n return TrafficLight.UNKNOWN", "def get_classification(self, image):\n # return TrafficLight.RED\n # TODO implement light color prediction\n # creating an image object \n img_np = np.array(image) \n\n # convert np array to tensor\n input_tensor = tf.convert_to_tensor(img_np)\n\n # The model expects a batch of images, so add an axis with `tf.newaxis`.\n input_tensor = input_tensor[tf.newaxis, ...]\n\n\n detections = self.loaded(input_tensor)\n\n num_detections = int(detections.pop('num_detections'))\n\n # detection_classes should be ints.\n detections_dict = {key: value[0, :num_detections].numpy() for key, value in detections.items()}\n\n\n # detection_classes should be ints.\n detections_dict['detection_classes'] = detections_dict['detection_classes'].astype(np.int64)\n\n label_id_offset = 1\n\n # DEBUG - can do it in a cleaner way :0\n tl_classes = {3: 'green', 2: 'red'}\n top_classes_prediction = list(detections_dict['detection_classes']+label_id_offset)[:5] \n #print(top_classes_prediction)\n for i in range(len(top_classes_prediction)):\n if top_classes_prediction[i] == 2:\n top_classes_prediction[i] = 'green'\n elif top_classes_prediction[i] == 3:\n top_classes_prediction[i] = 'red'\n\n\n #print(\"--------->\", image_path, \"<-----------\")\n #print( top_classes_prediction ) \n #print(detections_dict['detection_scores'][:5], '\\n' )\n\n # basic red tl logic\n if top_classes_prediction[0] == 'red' and detections_dict['detection_scores'][0] >= 0.60:\n #print(\"-------------> RED TRAFFIC LIGHT <----------------\\n\")\n self.current_light = TrafficLight.RED\n #rospy.logwarn( \"----------------- Taffic light is RED !!! -------------------- \" )\n self.display_predictions_scores( top_classes_prediction, detections_dict['detection_scores'] )\n else:\n #print(\"No red traffic is detected\\n\")\n self.current_light = TrafficLight.GREEN\n #rospy.logwarn( \"----------------- You're good to go !!! --------: {0} - {1} \".format(top_classes_prediction[0], detections_dict['detection_scores'][0]) )\n self.display_predictions_scores( top_classes_prediction, detections_dict['detection_scores'] )\n\n return self.current_light", "def main():\n args = get_arguments()\n \n # Create queue coordinator.\n coord = tf.train.Coordinator()\n \n # Load reader.\n with tf.name_scope(\"create_inputs\"):\n reader = ImageReader_MultiClass_Loss(\n args.data_dir,\n args.data_list,\n None, # No defined input size.\n RANDOM_SEED,\n False, # No random scale.\n False, # No random mirror.\n coord)\n image, l2_catg, binary_catg, hinge_catg = reader.image, reader.l2_catg, reader.binary_catg, reader.hinge_catg\n image_batch = tf.expand_dims(image, dim=0)\n binary_catg_batch = tf.expand_dims(binary_catg, dim=0)\n\n # Create network.\n net = DeepLabResNetModel({'data': image_batch}, is_training=False)\n\n # Which variables to load.\n restore_var = tf.global_variables()\n \n # Predictions.\n raw_output = net.layers['fc1_voc12']\n\n # Do the global average pooling\n raw_output_bcgd_rmvd = raw_output[:,:,:,1:]\n g_avg_pool = tf.reduce_mean(tf.reduce_mean(raw_output_bcgd_rmvd, axis=1, keep_dims=True),\\\n axis=2, keep_dims=True) # Avg across the width and height dimension -> [Bx21]\n g_avg_pool_sqzd = tf.squeeze(g_avg_pool, axis=[1, 2])\n pred = tf.nn.softmax(g_avg_pool_sqzd)\n\n # Get the class activation map\n raw_output_up = tf.image.resize_bilinear(raw_output_bcgd_rmvd, tf.shape(image_batch)[1:3,])\n raw_output_up = raw_output_up - tf.reduce_min(tf.reduce_min(raw_output_up, axis=1, keep_dims=True), axis=2, keep_dims=True) + EPSILON\n raw_output_up = raw_output_up / tf.reduce_max(tf.reduce_max(raw_output_up, axis=1, keep_dims=True), axis=2, keep_dims=True)\n cam_m_1 = tf.argmax(raw_output_up, dimension=3) + 1\n raw_output_catgs_rmvd = raw_output_up * tf.expand_dims(tf.expand_dims(binary_catg_batch, 1), 2)\n cam_m_2 = tf.argmax(raw_output_catgs_rmvd, dimension=3) + 1\n cam = tf.cast(tf.equal(cam_m_1, cam_m_2), tf.int64) * cam_m_1\n\n cam_batch = tf.expand_dims(cam, dim=3)\n\n # Set up tf session and initialize variables. \n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n init = tf.global_variables_initializer()\n \n sess.run(init)\n sess.run(tf.local_variables_initializer())\n \n # Load weights.\n loader = tf.train.Saver(var_list=restore_var)\n if args.restore_from is not None:\n load(loader, sess, args.restore_from)\n \n # Start queue threads.\n threads = tf.train.start_queue_runners(coord=coord, sess=sess)\n \n # Iterate over training steps.\n for step in range(args.num_steps):\n preds, images, cams, bin_catg = sess.run([pred, image_batch, cam_batch, binary_catg])\n \"\"\"\n print(bin_catg)\n print(np.unique(np.unique(cams)))\n \"\"\"\n img = inv_preprocess(images)\n attMap = decode_labels(cams)\n output_dir = './output_maps_binary_without_norm/'\n img_name = output_dir + str(step) + '.jpg'\n map_name = output_dir + str(step) + '.png'\n misc.imsave(img_name, img[0,:,:,:])\n misc.imsave(map_name, attMap[0,:,:,:])\n coord.request_stop()\n coord.join(threads)", "def get_classification(self, images):\n # 1. resize images to (x, 64, 64, 3)\n images_np = np.zeros((len(images), 64, 64, 3), dtype=np.float32)\n for i, image in enumerate(images):\n images_np[i] = ((scipy.misc.imresize(image, (64, 64), interp=\"bicubic\"))/255.) - 0.5\n\n # 2. do classification\n classifications = self.session.run(self.predictions_tensor, feed_dict={self.image_tensor: images_np, self.keep_prob_tensor: 1.})\n\n # 3. calculate state\n has_red = False\n has_yellow = False\n has_green = False\n\n for cl in classifications:\n has_red = has_red | (cl == RED_CLASS)\n has_yellow = has_yellow | (cl == YELLOW_CLASS)\n has_green = has_green | (cl == GREEN_CLASS)\n\n state = TrafficLight.RED if has_red else \\\n (TrafficLight.YELLOW if has_yellow else\n (TrafficLight.GREEN if has_green else\n TrafficLight.UNKNOWN))\n\n return state, classifications", "def get_classification(self, image):\n\tresult = TrafficLight.UNKNOWN\n\n\t\"\"\"Convert the image into the proper format\"\"\"\n#\timage_np = self.load_image_into_numpy_array(image)\n\tself.image_np_expanded = np.expand_dims(image, axis=0)\n\n\t\"\"\"Apply detection\"\"\"\n\t(boxes, scores, classes, num) = self.sess.run(\n [self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],\n feed_dict={self.image_tensor: self.image_np_expanded})\n\n\t\"\"\"And check if a green/Red light has been found\"\"\"\n\tif( num > 0):\n\t #find the highest score in the scores list\n\t max_score_idx = np.squeeze(scores).argmax()\n\t # and get the class going with this score\n\t tf_result = np.squeeze(classes).astype(np.int32)[max_score_idx]\n\t # convert from the TF result to internal format\n\t if( tf_result == 1 ):\n\t\tresult = TrafficLight.GREEN\n\t elif( tf_result == 2 ):\n\t\tresult = TrafficLight.RED\n\t elif( tf_result == 3 ):\n\t\tresult = TrafficLight.YELLOW\n\n\treturn result", "def get_classification(self, image):\n if self.model is not None:\n im = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n im = im.astype('float32')\n im = preprocess_input(im)\n im_array = np.asarray(im)\n transformed_im_array = im_array[None, :, :, :]\n with self.graph.as_default():\n preds = self.model.predict(transformed_im_array, batch_size=1)\n return np.argmax(preds[0])\n return TrafficLight.UNKNOWN", "def http_classify(self, req):\n \n if len(req.files) != 0:\n img = np.fromstring(req.files['file'].read(), np.uint8)\n else:\n img = np.fromstring(req.data, np.uint8)\n \n img = cv2.imdecode(img, cv2.IMREAD_UNCHANGED)\n img = cv2.resize(img, (self.Helpers.confs[\"cnn\"][\"data\"][\"dim\"], \n self.Helpers.confs[\"cnn\"][\"data\"][\"dim\"]))\n img = self.reshape(img)\n \n return self.get_predictions(img)", "def get_classification(self, image):\n #TODO implement light color prediction\n #imgfile = str(rospy.Time.now().to_sec()) + '.jpg'\n #cv2.imwrite(imgfile, image)\n tlScore, tlClass = self.pipeline(image)\n rospy.logwarn(\"score = {0}\".format(tlScore))\n rospy.logwarn(\"class = {0}\".format(tlClass))\n #rospy.logwarn(\"classifier time = {0}\".format(time.time()))\n if (tlClass.size == 0 or tlScore.size == 0):\n #rospy.logwarn(\"light state = UNKNOWN\")\n return TrafficLight.UNKNOWN\n elif (tlClass[np.argmax(tlScore)] == 2):\n #rospy.logwarn(\"light state = RED\")\n return TrafficLight.RED\n elif (tlClass[np.argmax(tlScore)] == 1):\n #rospy.logwarn(\"light state = GREEN\")\n return TrafficLight.GREEN\n elif (tlClass[np.argmax(tlScore)] == 3):\n #rospy.logwarn(\"light state = YELLOW\")\n return TrafficLight.YELLOW\n else:\n #rospy.logwarn(\"light state = UNKNOWN\")\n return TrafficLight.UNKNOWN\n #return TrafficLight.RED", "def LeNet5_architecture(self, input_shape):\r\n\r\n # Convolution layer (C1) hyperparameters\r\n s1 = self.hparameters[\"s1\"]\r\n f1 = self.hparameters[\"f1\"]\r\n n1 = self.hparameters[\"n1\"]\r\n\r\n # Average pooling layer(S2) hyperparameters\r\n s2 = self.hparameters[\"s2\"]\r\n f2 = self.hparameters[\"f2\"]\r\n\r\n # Convolutional layer (C3) hyperparameters\r\n s3 = self.hparameters[\"s3\"]\r\n f3 = self.hparameters[\"f3\"]\r\n n3 = self.hparameters[\"n3\"]\r\n\r\n # Average pooling layers (S4) hyperparameters\r\n s4 = self.hparameters[\"s4\"]\r\n f4 = self.hparameters[\"f4\"]\r\n\r\n # Convolutional layer (C5) hyperparameters\r\n s5 = self.hparameters[\"s5\"]\r\n f5 = self.hparameters[\"f5\"]\r\n n5 = self.hparameters[\"n5\"]\r\n\r\n # Number of outputs\r\n num_classes = self.num_classes\r\n\r\n X_input = Input(input_shape)\r\n X = X_input\r\n\r\n # Convolution layer 1\r\n X = Conv2D(n1, (f1,f1), strides = (s1, s1), padding = 'valid', name = 'C1', kernel_initializer = glorot_uniform(seed = 0))(X)\r\n # Average pooling\r\n X = AveragePooling2D(pool_size= (f2,f2), strides = (s2,s2), padding = 'valid', name = 'S2')(X)\r\n # Activation\r\n X = Activation('tanh')(X)\r\n # Convolution layer 2\r\n X = Conv2D(n3, (f3,f3), strides = (s3, s3), padding = 'valid', name = 'C3', kernel_initializer = glorot_uniform(seed = 0))(X)\r\n #Average pooling\r\n X = AveragePooling2D(pool_size= (f4,f4), strides = (s4,s4), padding = 'valid', name = 'S4')(X)\r\n # Activation\r\n X = Activation('tanh')(X)\r\n # Convolutional layer 3\r\n X = Conv2D(n5, (f5,f5), strides = (s5, s5), padding = 'valid', name = 'C5', kernel_initializer = glorot_uniform(seed = 0))(X)\r\n # Activation\r\n X = Activation('tanh')(X)\r\n # Flatten\r\n X = Flatten()(X)\r\n # Fully Connected layer\r\n X = Dense(num_classes, activation = 'softmax', name = 'FC', kernel_initializer = glorot_uniform(seed = 0))(X)\r\n\r\n #create model\r\n model = Model(inputs = X_input, outputs = X, name = 'LeNet5')\r\n\r\n return model", "def predict(self, sess, img_data):\n\n with sess.as_default():\n new_image = self.preprocess(img_data, self.input_shape)\n input_feed = self.create_input_feed(sess, new_image, img_data)\n output_fetch = self.create_output_fetch(sess)\n all_classes, all_scores, all_bboxes = sess.run(output_fetch, input_feed)\n\n return all_classes, all_scores, all_bboxes", "def model_predict(img_path):\n img = open_image(img_path)\n pred_class, pred_idx, outputs = learn.predict(img)\n print(pred_class)\n return pred_class", "def keras_inference(input_image, model_type, labels, return_image):\r\n # Loading the image\r\n img = image.load_img(input_image, target_size=(50, 50))\r\n # Converting the image to numpy array\r\n x = image.img_to_array(img) \r\n # convert 3D tensor to 4D tensor with shape (1, 512, 512, 3)\r\n x = np.expand_dims(x, axis=0)\r\n\r\n image_to_predict = x.astype('float32')/255\r\n \r\n # image_to_plot = path_to_tensor(input_image)\r\n\r\n # model's weight for localization\r\n model = load_model(model_type)\r\n prediction = model.predict(image_to_predict)\r\n # print(\"X shape : \", x.shape)\r\n # prediction_final = \"Not_cancer: \" + str(np.round(prediction[0][0]*100, decimals = 2)) + \"%\" + \\\r\n # \" | Cancer: \" + str(np.round(prediction[0][1]*100, decimals = 2)) + \"%\"\r\n print(\"Prediction : \",prediction[0])\r\n print(\"Argmax : \", np.argmax(prediction[0]))\r\n confidence = np.max(prediction[0]) * 100\r\n classify = labeled_class[int(np.argmax(prediction[0]))]\r\n print(\"classify :\", classify)\r\n output = {\r\n \"label\": \"{}\".format(task),\r\n \"type\" : \"classification\",\r\n \"output\" : {\r\n \"confidence\" : \"{0:.2f}\".format(round(confidence,2)),\r\n \"results\" : classify,\r\n \"image\" : return_image\r\n }\r\n } \r\n \r\n return output", "def get_classification(self, image):\n time = 0\n with self.graph.as_default(): \n img_expand = np.expand_dims(image, axis=0)\n start = datetime.datetime.now()\n (boxes, scores, classes, num_detections) = self.sess.run(\n [self.boxes, self.scores, self.classes, self.num_detections],\n feed_dict={self.image_tensor: img_expand})\n end = datetime.datetime.now()\n time = end - start\n\n boxes = np.squeeze(boxes)\n scores = np.squeeze(scores)\n classes = np.squeeze(classes).astype(np.int32)\n\n #print('SCORES: ', scores[0])\n #print('CLASSES: ', classes[0])\n\n if scores[0] > self.threshold:\n if classes[0] == 1:\n print('Traffic Light: *** GREEN ***, Detection Speed: ', time.total_seconds())\n return TrafficLight.GREEN\n elif classes[0] == 2:\n print('Traffic Light: *** RED ***, Detection Speed: ', time.total_seconds())\n return TrafficLight.RED\n elif classes[0] == 3:\n print('Traffic Light: *** YELLOW ***, Detection Speed: ', time.total_seconds())\n return TrafficLight.YELLOW\n\n return TrafficLight.UNKNOWN", "def get_classification(self, image):\n run_network = True # flag to disable running network if desired\n if run_network is True:\n image_np_expanded = np.expand_dims(image, axis=0)\n\n time0 = time.time()\n\n # Actual detection.\n with self.detection_graph.as_default():\n (boxes, scores, classes, num) = self.sess.run(\n [self.detection_boxes, self.detection_scores,\n self.detection_classes, self.num_detections],\n feed_dict={self.image_tensor: image_np_expanded})\n\n time1 = time.time()\n\n #print(\"Time in milliseconds\", (time1 - time0) * 1000)\n\n boxes = np.squeeze(boxes)\n scores = np.squeeze(scores)\n classes = np.squeeze(classes).astype(np.int32)\n \n #Add extra stuff here\n self.current_light = TrafficLight.UNKNOWN\n min_score_thresh = .50\n for i in range(boxes.shape[0]):\n if scores is None or scores[i] > min_score_thresh:\n\n class_name = self.category_index[classes[i]]['name']\n # class_id = self.category_index[classes[i]]['id'] # if needed\n\n #print('{}'.format(class_name))\n\n # Traffic light thing\n self.current_light = TrafficLight.UNKNOWN\n\n if class_name == 'Red':\n self.current_light = TrafficLight.RED\n elif class_name == 'Green':\n self.current_light = TrafficLight.GREEN\n elif class_name == 'Yellow':\n self.current_light = TrafficLight.YELLOW\n \n '''if self.current_light == TrafficLight.RED:\n print('RED')\n elif self.current_light == TrafficLight.GREEN:\n print('GREEN')\n elif self.current_light == TrafficLight.YELLOW:\n print('YELLOW')\n else:\n print('NO_LIGHT')'''\n\n return self.current_light", "def classifier():\n\tprint(\"Classifying\")\n\t#initialize important variables\n\tminConfidence = 0.5\n\tthresholdValue = 0.3\n\t\n\t\"\"\"\n\tfile = request.files#['image']\n\tfile.save(\"./classifier_image.jpg\")\n\tframe = cv2.imread(\"./classifier_image.jpg\")\n\t\"\"\"\n\tfile = request.json\n\tframe = np.array(file[\"Frame\"], dtype = \"uint8\") \n\n\t#file = request.files['image']\n\t#file.save(\"./classifier_image.jpg\")\n\t#frame = cv2.imread(\"./classifier_image.jpg\")\n\t#file = request.json\n\t#frame = np.array(file[\"contour\"], dtype=\"uint8\")\n\t\n\t#Get Image dimensions\n\timage = cv2.copyMakeBorder(frame, 30, 30, 30, 30, cv2.BORDER_CONSTANT, value=255)\n\t(H, W) = image.shape[:2]\n\t\n\t#Get the output layers parameters\n\tln = net.getLayerNames()\n\tln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\t\n\t#Create a blob to do a forward pass\n\tblob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416), swapRB=True, crop=False)\n\tnet.setInput(blob)\n\t#print(H, W)\n\tlayerOutputs = net.forward(ln)\n\tprint(type(net))\n\tboxes = []\n\tconfidences = []\n\tclassIDs = []\n\tfor output in layerOutputs:\n\t\tprint(\"detecting\")\n\t\t#loop over each detection\n\t\tfor detection in output:\n\t\t\t# extract the class ID and confidence (i.e., probability) of\n\t\t\t# the current object detection\n\t\t\tscores = detection[5:]\n\t\t\tclassID = np.argmax(scores)\n\t\t\tconfidence = scores[classID]\n\n\t\t\t# filter out weak predictions by ensuring the detected\n\t\t\t# probability is greater than the minimum probability\n\t\t\tif confidence > minConfidence:\n\t\t\t\t# scale the bounding box coordinates back relative to the\n\t\t\t\t# size of the image, keeping in mind that YOLO actually\n\t\t\t\t# returns the center (x, y)-coordinates of the bounding\n\t\t\t\t# box followed by the boxes' width and height\n\t\t\t\tbox = detection[0:4] * np.array([W, H, W, H])\n\t\t\t\t(centerX, centerY, width, height) = box.astype(\"int\")\n\n\t\t\t\t# use the center (x, y)-coordinates to derive the top and\n\t\t\t\t# and left corner of the bounding box\n\t\t\t\tx = int(centerX - (width / 2))\n\t\t\t\ty = int(centerY - (height / 2))\n\n\t\t\t\t# update our list of bounding box coordinates, confidences,\n\t\t\t\t# and class IDs\n\t\t\t\tboxes.append([x, y, int(width), int(height)])\n\t\t\t\tconfidences.append(float(confidence))\n\t\t\t\tclassIDs.append(classID)\n\n\t# apply non-maxima suppression to suppress weak, overlapping bounding\n\t# boxes\n\tidxs = cv2.dnn.NMSBoxes(boxes, confidences, minConfidence, thresholdValue)\n\n\t# ensure at least one detection exists\n\tif len(idxs) > 0:\n\t\toutput = json.load(open(outputFile))\n\t\t# loop over the indexes we are keeping\n\t\tfor i in idxs.flatten():\n\t\t\t# extract the bounding box coordinates\n\t\t\t(x, y) = (boxes[i][0], boxes[i][1])\n\t\t\t(w, h) = (boxes[i][2], boxes[i][3])\n\n\t\t\tprint(LABELS[classIDs[i]], output[LABELS[classIDs[i]]]+1, confidences[i])\n\t\t\toutput[LABELS[classIDs[i]]]+=1\n\t\t\n\t\tjson.dump(output, open(outputFile, \"w\"))\n\t\treturn LABELS[classIDs[i]]\n\telse:\n\t\treturn Response(status=200)", "def get_classification(self, image):\n # Detect bounding boxes\n box_coords, _ = self.tld.predict(image)\n \n if len(box_coords) == 0:\n rospy.loginfo('No boxes detected')\n return TrafficLight.UNKNOWN\n \n # Identify light state\n num_detected = [0] * 3 # count how many each light detected in case not all boxes agree\n \n for box in box_coords:\n x1 = int(box[0])\n y1 = int(box[1])\n x2 = int(box[2])\n y2 = int(box[3])\n\n tl_img = image[x1:x2,y1:y2]\n dsize = (15, 30)\n tl_img = cv2.resize(tl_img, dsize)\n\n image_array = np.asarray(tl_img)\n\n with self.graph.as_default():\n labels = self.simulator_model.predict(image_array[None,:,:,:])\n predict = np.argmax(labels)\n \n num_detected[predict] += 1\n \n predict = num_detected.index(max(num_detected))\n rospy.loginfo('Each light detected (%d,%d,%d) times. '%(num_detected[0],num_detected[1],num_detected[2]))\n rospy.loginfo('Predicted state: %d.'%predict)\n\n return predict", "def get_classification(self, image):\n \n img = cv2.resize(src=image, dsize=(IN_IMAGE_HEIGHT,IN_IMAGE_WIDTH))\n img = img.astype(float)\n img = img / 255.0\n\n img = img[np.newaxis,:,:,:]\n\n with self.graph.as_default():\n predictions = self.model.predict(img)\n predicted_cat = np.argmax(predictions,axis=1)\n\n light = predicted_cat[0]\n# rospy.logwarn(\"Predicted = %i \", light)\n if(light==0):\n return TrafficLight.GREEN\n elif(light==1):\n return TrafficLight.YELLOW\n elif(light==2):\n return TrafficLight.RED\n return TrafficLight.UNKNOWN", "def apply_classifier(self):\n for detected_object in self.detected_objects:\n detected_object.predict_class(self.original_image)", "def get_classification(self, cv2_image):\n def get_green_mask(img_hsv):\n lower_green = np.array([40, 10, 10])\n upper_green = np.array([90, 255, 255])\n mask = cv2.inRange(img_hsv, lower_green, upper_green)\n return mask\n\n def get_red_mask(img_hsv):\n # red lower mask (0-10)\n lower_red = np.array([20, 1, 150])\n upper_red = np.array([30, 120, 255])\n mask0 = cv2.inRange(img_hsv, lower_red, upper_red)\n\n # Red upper mask\n lower_red = np.array([170, 50, 50])\n upper_red = np.array([180, 255, 255])\n mask1 = cv2.inRange(img_hsv, lower_red, upper_red)\n\n # join my masks\n mask = mask0 + mask1\n return mask\n\n def get_traffic_light_color(cv2_image):\n # Convert BGR to HSV\n img_hsv = cv2.cvtColor(cv2_image, cv2.COLOR_BGR2HSV)\n height, width, _ = img_hsv.shape\n\n green_mask = get_green_mask(img_hsv)\n red_mask = get_red_mask(img_hsv)\n\n dico = {\n TrafficLight.RED: np.count_nonzero(red_mask[0:int(height / 3), :]),\n TrafficLight.YELLOW: np.count_nonzero(red_mask[int(height / 3):int(height * 2 / 3), :]),\n TrafficLight.GREEN: np.count_nonzero(green_mask[int(height * 2 / 3):height, :])\n }\n\n v = list(dico.values())\n k = list(dico.keys())\n return k[v.index(max(v))]\n\n output_dict = self.run_inference_for_single_image(cv2_image)\n traffic_light_image = self.get_traffic_light(cv2_image, output_dict)\n\n # no traffic light found\n if traffic_light_image is None:\n return TrafficLight.UNKNOWN\n\n return get_traffic_light_color(traffic_light_image)", "def get_classification(self, image):\n if self.model_type == 'tf':\n return self.run_tf_classifier(image)\n elif self.model_type == 'keras':\n return self.run_keras_classifier(image)\n else:\n return TrafficLight.UNKNOWN", "def class_imgs(list_img):\n numberimg = len(list_img)\n resize(net, numberimg, cursize)\n i = 0\n for img in list_img:\n image = caffe.io.load_image(img)\n transformed_image = transformer.preprocess('data', image)\n net.blobs['data'].data[i] = transformed_image\n i = i + 1\n\n output = net.forward()\n\n results = []\n for n in range(0, numberimg):\n themax = output['prob'][n].argmax()\n results.append({'filename':list_img[n], 'class': themax, 'prob': output['prob'][n].tolist()})\n\n return results", "def get_classification(self, image):\n #TODO implement light color prediction\n \"\"\" example code\n if result == 0:\n state = TrafficLight.GREEN\n else\n state = TrafficLight.RED\n \"\"\"\n with self.detection_graph.as_default():\n boxes, scores, classes, num_detections = self.sess.run([self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections], feed_dict={self.image_tensor: np.expand_dims(image, axis=0)})\n boxes = np.squeeze(boxes)\n scores = np.squeeze(scores)\n classes = np.squeeze(classes)\n max_score_idx = np.argmax(scores)\n result = classes[max_score_idx]\n if result == 1:\n print('RED Light')\n return TrafficLight.RED\n elif result == 2:\n print('Yellow Light')\n return TrafficLight.YELLOW\n elif result == 3:\n print('Green Light')\n return TrafficLight.GREEN\n return TrafficLight.UNKNOWN", "def classify_image(image):\n image_path = image.filename\n image_data = np.array(Image.open(image.stream))\n image_data = skimage.img_as_float(image_data).astype(np.float2)\n with classifier_lock:\n classification = classifier.predict([image_data])[0]\n return {\"suggested_tags\": predicted_tags(classification),\n \"classification_vector\": classification,\n \"image_url\": image_path}", "def get_classification(self, image):\n\n imrs = cv2.resize(image, (64, 64)) \n imrs = imrs.astype(float)\n imrs = imrs / 255.0\n \n imrs = imrs[newaxis, :, :, :]\n\n with self.graph.as_default():\n preds = self.model.predict(imrs)\n \n predicted_class = np.argmax(preds, axis=1)\n\n choices = {0: TrafficLight.RED,\n 1: TrafficLight.YELLOW,\n 2: TrafficLight.GREEN,\n 3: TrafficLight.UNKNOWN}\n return choices.get(predicted_class[0], TrafficLight.GREEN)", "def predict(self, images, batch_size):\n pass", "def main():\n # initialize the class labels and set the seed of the pseudorandom\n # number generator so we can reproduce our results\n labels = [\"dog\", \"cat\", \"panda\"]\n np.random.seed(1)\n\n # be * learned * by our model, but for the sake of this example, let's use random values\n W = np.random.randn(3, 3072)\n b = np.random.randn(3)\n\n # load our example image, resize it, and then flatten it into our\n # \"feature vector\" representation\n orig = cv2.imread(\"beagle.png\")\n image = cv2.resize(orig, (32, 32)).flatten()\n\n # compute the output scores by taking the dot product between the\n # weight matrix and image pixels, followed by adding in the b\n scores = W.dot(image) + b\n\n # loop over the scores + labels and display them\n for (label, score) in zip(labels, scores):\n print(\"[INFO] {}: {:.2f}\".format(label, score))\n\n # draw the label with the highest score on the image as our prediction\n cv2.putText(\n orig, \"Label: {}\".format(labels[np.argmax(scores)]), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2\n )\n\n # display our input image\n cv2.imshow(\"Image\", orig)\n cv2.waitKey(0)", "def detect(self, images, verbose=0):\n assert self.mode == \"inference\", \"Create model in inference mode.\"\n assert len(\n images) == self.config.BATCH_SIZE, \"len(images) must be equal to BATCH_SIZE\"\n\n if verbose:\n log(\"Processing {} images\".format(len(images)))\n for image in images:\n log(\"image\", image)\n\n # Mold inputs to format expected by the neural network\n molded_images, image_metas, windows = self.mold_inputs(images)\n\n # Validate image sizes\n # All images in a batch MUST be of the same size\n image_shape = molded_images[0].shape\n for g in molded_images[1:]:\n assert g.shape == image_shape,\\\n \"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes.\"\n\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n\n if verbose:\n log(\"molded_images\", molded_images)\n log(\"image_metas\", image_metas)\n log(\"anchors\", anchors)\n # Run object detection\n detections, _, _, mrcnn_mask, _, _, _ =\\\n self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)\n # Process detections\n results = []\n for i, image in enumerate(images):\n final_rois, final_class_ids, final_scores, final_masks =\\\n self.unmold_detections(detections[i], mrcnn_mask[i],\n image.shape, molded_images[i].shape,\n windows[i])\n results.append({\n \"rois\": final_rois,\n \"class_ids\": final_class_ids,\n \"scores\": final_scores,\n \"masks\": final_masks,\n })\n return results", "def get_classification(self, image):\n # Image pre-processing pipleine\n img = np.float32(image)\n img = preprocess_input(img)\n img = cv2.resize(img, (299, 299))\n img = np.expand_dims(img, 0)\n # Execute model's predictions - return probability value for each of 4 classes\n probs = self.model.predict(img)[0]\n # get class with max probability\n g_x = np.argmax(probs)\n\n # reject if model is not confident about the prediction\n if probs[g_x] < CONFIDENCE_THRESHOLD:\n return TrafficLight.UNKNOWN\n\n # Swap label values as model was trained with different label values\n if g_x == 2:\n prediction = 0 # Red\n elif g_x == 0:\n prediction = 2 # Green\n elif g_x == 3:\n prediction = 1 # Yellow\n else:\n prediction = 3 # No light\n\n # Log the message\n rospy.loginfo(\"The label returned is %d\", prediction)\n\n # Return the light state corresponding to the index\n return prediction", "def classify_image(img_path: str, model=None, pretrained_state_path: str = None):\n if model is None:\n if pretrained_state_path is None:\n model = models.vgg16(pretrained=True)\n else:\n state_dict = torch.load(pretrained_state_path)\n model = models.vgg16()\n model.load_state_dict(state_dict)\n img = preprocess_image(img_path)\n output = model(img)\n # Getting the max of the soft max layer.\n prediction = output.data.numpy().argmax()\n return labels[prediction]", "def resnet101_classifier(num_rois, num_classes, base_model = None, weight_regularizer=None, bias_regularizer=None):\n roi_input = Input(shape=(None, 4), name='roi_input')\n\n pooling_input = base_model.output if base_model else Input(shape=(None, None, FINAL_CONV_FILTERS))\n model_input = base_model.input if base_model else pooling_input\n resize_out = RoiResizeConv(POOLING_REGIONS, num_rois)([pooling_input, roi_input])\n\n out = td_conv_block(resize_out, 3, [512, 512, 2048], stage=5, block='a', strides=(1,1),\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,\n td_input_shape=(num_rois, POOLING_REGIONS, POOLING_REGIONS, 1024),\n use_conv_bias=False, separate_scale=True)\n out = td_identity_block(out, 3, [512, 512, 2048], stage=5, block='b',\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,\n use_conv_bias=False, separate_scale=True)\n out = td_identity_block(out, 3, [512, 512, 2048], stage=5, block='c',\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,\n use_conv_bias=False, separate_scale=True)\n out = TimeDistributed(AveragePooling2D((7, 7)), name='avg_pool')(out)\n\n out = TimeDistributed(Flatten(name='flatten'))(out)\n\n gaussian_initializer_cls = TruncatedNormal(stddev=0.01)\n gaussian_initializer_bbreg = TruncatedNormal(stddev=0.001)\n\n out_class = TimeDistributed(Dense(num_classes, activation='softmax',\n kernel_initializer=gaussian_initializer_cls,\n kernel_regularizer=weight_regularizer,\n bias_regularizer=bias_regularizer\n ),\n name='dense_class_{}'.format(num_classes))(out)\n out_reg = TimeDistributed(Dense(4 * (num_classes - 1), activation='linear',\n kernel_initializer=gaussian_initializer_bbreg,\n kernel_regularizer=weight_regularizer,\n bias_regularizer=bias_regularizer\n ),\n name='dense_reg_{}'.format(num_classes))(out)\n\n cls_model = Model(inputs=[model_input, roi_input], outputs=[out_class, out_reg])\n\n this_dir = os.path.dirname(__file__)\n weights_path = os.path.join(this_dir, '../models/resnet101_weights_tf.h5')\n cls_model.load_weights(weights_path, by_name=True)\n\n return cls_model", "def get_classification(self, image):\n total = None\n with self.graph.as_default():\n img_expand = np.expand_dims(image, axis=0)\n start = datetime.datetime.now()\n (boxes, scores, classes, num_detections) = self.sess.run(\n [self.boxes, self.scores, self.classes, self.num_detections],\n feed_dict={self.image_tensor: img_expand})\n end = datetime.datetime.now()\n total = end - start\n\n # boxes = np.squeeze(boxes)\n scores = np.squeeze(scores)\n classes = np.squeeze(classes).astype(np.int32)\n\n rospy.logwarn(\"{}: signal={}, sec={}, scores={}, classes={}\".format(\n self.__class__.__name__, self.category_index[classes[0]]['name'], total.total_seconds(), scores[0], classes[0]))\n\n if scores[0] > self.threshold:\n if classes[0] == 1:\n return TrafficLight.GREEN\n elif classes[0] == 2:\n return TrafficLight.RED\n elif classes[0] == 3:\n return TrafficLight.YELLOW\n\n return TrafficLight.UNKNOWN", "def forward(self, train_imgs, test_imgs, train_bb, test_bb, test_win, test_bb_inwin, *args, **kwargs):\n\n assert train_imgs.dim() == 5 and test_imgs.dim() == 5, 'Expect 5 dimensional inputs'\n # Extract backbone features, OrderedDict, layer2 [30, 512, 36, 36] layer3 [30, 1024, 18, 18]\n train_feat = self.extract_backbone_features(train_imgs.reshape(-1, *train_imgs.shape[-3:]))\n test_feat = self.extract_backbone_features(test_imgs.reshape(-1, *test_imgs.shape[-3:]))\n\n test_win = test_win.reshape(-1, 4)\n\n # Classification features\n train_feat_clf = self.get_backbone_clf_feat(train_feat)\n test_feat_clf = self.get_backbone_clf_feat(test_feat)\n\n # Run classifier module, train_skfeat.dim()=5, test_skfeat.dim()=5\n target_scores, train_skfeat, test_skfeat, channel_importance = self.classifier(train_feat_clf, test_feat_clf,\n train_bb, *args, **kwargs)\n\n train_feat_se, test_feat_se = self.adjust_skfeat(train_skfeat, test_skfeat, channel_importance)\n # Using the same feature with the classifier to estimate the target state\n bboxes, cls = self.state_estimator(train_feat_se, test_feat_se,\n train_bb, test_win, test_bb_inwin, target_scores)\n\n return target_scores, bboxes, cls", "def trainNet():", "def classify(self, model_type='random_forest', version=None):\n if model_type == 'random_forest':\n if self.type in ['Composite Image', 'Classified Image']:\n raise ValueError(f'Unable to perform {model_type} classification on a {self.type}.')\n url = self.server + '/recent-tiles-classifier'\n params = {'img_id': self.attributes.get('provider')}\n r = requests.get(url, params=params)\n if r.status_code == 200:\n classified_tiles = r.json().get('data').get('attributes').get('url')\n tmp = {'instrument': self.instrument,\n 'date_time': self.date_time,\n 'cloud_score': self.cloud_score,\n 'source': self.source,\n 'band_viz': None,\n 'ring': self.ring,\n 'server': self.server,\n 'thumb_url': self.thumb_url,\n 'tile_url': classified_tiles,\n 'type': 'Classified Image',\n 'bbox': self.bbox,\n 'np_array_bounds': self.np_array_bounds\n }\n return Image(**tmp)\n else:\n raise ValueError(f'Classification failed ({r.status_code} response): {r.json()}')\n return None\n if model_type in ['segnet', 'deepvel']: #and self.type == 'Composite Image':\n if self.type in ['Classified Image']:\n raise ValueError(f\"Unable to perform {model_type} classification on a {self.type}.\")\n payload = {'thumb_url': self.thumb_url,\n 'model_name': 'deepvel',\n 'model_version': None}\n url = f'https://us-central1-skydipper-196010.cloudfunctions.net/classify'\n headers = {'Content-Type': 'application/json'}\n r = requests.post(url, data=json.dumps(payload), headers=headers)\n if r.status_code == 200:\n image = np.array(r.json().get('output'), dtype=np.uint8)\n hash_code = random.getrandbits(128)\n thumb_path = f\"./{str(hash_code)[0:5]}.png\"\n p = png.from_array(image, mode='RGB').save(thumb_path)\n tmp = {'instrument': self.instrument,\n 'date_time': self.date_time,\n 'cloud_score': self.cloud_score,\n 'source': self.source,\n 'band_viz': None,\n 'ring': self.ring,\n 'server': self.server,\n 'thumb_url': thumb_path,\n 'tile_url': None,\n 'type': 'Classified Image',\n 'bbox': self.bbox,\n 'np_array': image,\n 'np_array_bounds': self.np_array_bounds\n }\n return Image(**tmp)\n else:\n raise ValueError(f\"Classification service responded with {r.status_code}: {r.url}\")\n else:\n raise ValueError(f\"Model type {model_type} not reccognised. type property should be one of 'random_forest', 'segnet', or 'deepvel'\")", "def predict_classification_net(X_test, image_name):\n\t# Load training data mean \n\tmeans = np.load(PATH + 'Datasets/means_classification.npy')\n\t# Zero center\n\tX_test -= means\n\t# Create model\n\tmodel = build_classif_net()\n\t# Load weights\n\tmodel.load_weights(PATH + 'Weights/weights_classification_net.hdf5')\n\t# Compile model\n\tmodel.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['accuracy'])\n\t# Predict model\n\tscores = model.predict(X_test)\n\t# Get indexes of the windows labeled as sealions (0 because sealions are [1 0])\n\tprediction = np.argmax(scores, axis=1)\n\n\tnp.save(PATH + 'Results/classification_'+ image_name + '.npy', prediction)\n\treturn prediction", "def detection(self, model_infos, trained_images=None):\n # Index of the class in the list is its ID. For example, to get ID of\n class_names = ['BG', 'red_s', 'red_m', 'red_l', 'yellow_s', 'yellow_m', 'yellow_l', 'green_s', 'green_m',\n 'green_l', 'blue_s', 'blue_m', 'blue_l', 'orange_s', 'orange_m', 'orange_l']\n config = ShapesConfig()\n detect_model = modellib.MaskRCNN(mode=\"inference\", model_dir=MODEL_DIR, config=config, model_info=model_infos)\n # Load weights trained on current model\n cur_model_path = os.path.join(model_infos[0], model_infos[1]+'.h5')\n cur_model_weights = os.path.join(MODEL_DIR, cur_model_path)\n detect_model.load_weights(cur_model_weights, by_name=True)\n # Traverse all the packages(the pool)\n result_of_detection = {}\n for package in self.images_pool:\n image_dir = os.path.join(DATA_DIR, package)\n images_in_package = os.listdir(image_dir)\n # import ground truth to check out the detection result\n instance_nums_of_images = self.count_instances_in_images(package)\n for img in images_in_package:\n # Skip detection of those images that already used for training\n if trained_images:\n if img in trained_images:\n continue\n image = skimage.io.imread(os.path.join(image_dir, img), as_gray=False)\n # Run detection\n results = detect_model.detect([image], verbose=0)\n r = results[0]\n \"\"\"\n # average entropy model\n total_entropy = 0\n for prob in r['scores']:\n total_entropy -= prob * math.log2(prob) + (1 - prob) * math.log2(1 - prob)\n result_of_detection[img] = total_entropy / len(r['scores']) if r['scores'] != [] else total_entropy\n \"\"\"\n # use dict to save the info of the detected instances of each images\n # min detection model\n\n gt_instances = instance_nums_of_images[img.split('.')[0]]\n result_of_detection[img] = abs(len(r['scores']) - gt_instances)\n\n # print(result_of_detection)\n print(\"+++++++detection finished\")\n del detect_model\n del config\n return result_of_detection", "def train_classifier(images_path):\n car_imgs = get_images(images_path + '/vehicles/')\n non_car_imgs = get_images(images_path + '/non-vehicles/')\n\n print('Computing car features')\n car_features = extract_features(car_imgs,\n color_space=COLOR_SPACE,\n spatial_size=SPATIAL_SIZE,\n hist_bins=HIST_BINS,\n orient=ORIENT,\n pix_per_cell=PIX_PER_CELL,\n cell_per_block=CELL_PER_BLOCK,\n hog_channel=HOG_CHANNEL,\n spatial_feat=SPATIAL_FEAT,\n hist_feat=HIST_FEAT,\n hog_feat=HOG_FEAT)\n print(len(car_features))\n\n print('Computing non-car features')\n non_car_features = extract_features(non_car_imgs,\n color_space=COLOR_SPACE,\n spatial_size=SPATIAL_SIZE,\n hist_bins=HIST_BINS,\n orient=ORIENT,\n pix_per_cell=PIX_PER_CELL,\n cell_per_block=CELL_PER_BLOCK,\n hog_channel=HOG_CHANNEL,\n spatial_feat=SPATIAL_FEAT,\n hist_feat=HIST_FEAT,\n hog_feat=HOG_FEAT)\n print(len(non_car_features))\n \n X = np.vstack((car_features, non_car_features)).astype(np.float64) \n print('X shape: {}'.format(X.shape))\n # Fit a per-column scaler\n X_scaler = StandardScaler().fit(X)\n # Apply the scaler to X\n scaled_X = X_scaler.transform(X)\n\n # Define the labels vector\n y = np.hstack((np.ones(len(car_features)), np.zeros(len(non_car_features))))\n\n # Split up data into randomized training and test sets\n rand_state = np.random.randint(0, 100)\n X_train, X_test, y_train, y_test = train_test_split(\n scaled_X, y, test_size=0.2, random_state=rand_state)\n\n # Use a linear SVC \n svc = LinearSVC()\n # Check the training time for the SVC\n t=time.time()\n svc.fit(X_train, y_train)\n t2 = time.time()\n print(round(t2-t, 2), 'Seconds to train SVC...')\n # Check the score of the SVC\n print('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4))\n # Check the prediction time for a single sample\n t=time.time()\n\n return svc, X_scaler", "def get_classification(self, image):\n # Light color prediction\n detections = self.run_detection(image)\n boxes, scores, classes = self.filter_boxes(0.6, detections)\n # Scores are ordered highest -> lowest\n if len(classes) > 0:\n if self.label_map[classes[0]] == 'red':\n # rospy.logwarn('Red Light: {}'.format(scores[0]))\n return TrafficLight.RED\n # rospy.logwarn('Proceeding')\n \n return TrafficLight.UNKNOWN", "def _build_model(self):\n\n with tf.variable_scope(\"Matchnet\", reuse=tf.AUTO_REUSE):\n # For determining the runtime shape\n x_shp = tf.shape(self.x_in)\n\n # -------------------- Network archintecture --------------------\n # Build graph\n print(\"Building Graph\")\n self.logits = build_graph(self.x_in, self.is_training, self.config)\n # ---------------------------------------------------------------\n\n # Turn into weights for each sample\n weights = tf.nn.relu(tf.tanh(self.logits))\n\n # Make input data (num_img_pair x num_corr x 4)\n xx = tf.transpose(tf.reshape(\n self.x_in, (x_shp[0], x_shp[2], 4)), (0, 2, 1))\n\n # Create the matrix to be used for the eight-point algorithm\n X = tf.transpose(tf.stack([\n xx[:, 2] * xx[:, 0], xx[:, 2] * xx[:, 1], xx[:, 2],\n xx[:, 3] * xx[:, 0], xx[:, 3] * xx[:, 1], xx[:, 3],\n xx[:, 0], xx[:, 1], tf.ones_like(xx[:, 0])\n ], axis=1), (0, 2, 1))\n print(\"X shape = {}\".format(X.shape))\n wX = tf.reshape(weights, (x_shp[0], x_shp[2], 1)) * X\n print(\"wX shape = {}\".format(wX.shape))\n XwX = tf.matmul(tf.transpose(X, (0, 2, 1)), wX)\n print(\"XwX shape = {}\".format(XwX.shape))\n\n # Recover essential matrix from self-adjoing eigen\n e, v = tf.self_adjoint_eig(XwX)\n self.e_hat = tf.reshape(v[:, :, 0], (x_shp[0], 9))\n # Make unit norm just in case\n self.e_hat /= tf.norm(self.e_hat, axis=1, keep_dims=True)", "def perform_image_classification_by_model(model, input_image, is_image_array = False, top_n_classes=5, show_info=True):\n global CLASS_INDEX\n results = pd.DataFrame.empty\n if show_info is True:\n print(\"Starting process to generate classes probabilities now..\")\n if is_image_array == True:\n preds = model.predict(input_image)\n else:\n input_image_array = imageassist.ImageUtils.convert_image_array(input_image)\n input_image_array = imageassist.ImageUtils.preprocess_image_array(input_image_array)\n preds = model.predict(input_image_array)\n\n if len(preds.shape) != 2:\n print(\"Error: The predictions values are not in the shape of tupple as (1,1000).\")\n return results\n\n if CLASS_INDEX is None:\n fpath = get_file('imagenet_class_index.json',\n IMAGENET_CLASS_JSON,\n cache_subdir='models')\n CLASS_INDEX = json.load(open(fpath))\n\n class_count = len(CLASS_INDEX)\n if class_count == 0:\n print(\"Error: There was some problem reading imagenet classes...\")\n return results\n\n if top_n_classes > class_count:\n top_n_classes=class_count\n\n cols = [\"ClassName\", \"ClassId\", \"Probability\"]\n results = pd.DataFrame(columns=cols, index=range(top_n_classes))\n\n if show_info is True:\n print(\"Classification completed, now generating prediction dataframe ..\")\n for pred in preds:\n # Getting top results in the prediction through index\n top_indices = pred.argsort()[-top_n_classes:][::-1]\n result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]\n result.sort(key=lambda x: x[2], reverse=True)\n for k in range(len(result)):\n results.loc[k].ClassName = result[k][1]\n results.loc[k].ClassId = result[k][0]\n results.loc[k].Probability = result[k][2]\n return results", "def classify_image(img_pil):\n results = tpu.ClassifyWithImage(img_pil, top_k=1)\n if len(results) == 0:\n return None, None\n i, score = results[0]\n label = labels[i]\n # print(label + \": \" + str(score))\n return label, score", "def class_predict(trained_model, X_test, y_test, image_name):\n if MODEL == 1:\n return class_predict_3(trained_model, X_test, y_test, image_name)\n elif MODEL == 3:\n return class_predict_3(trained_model, X_test, y_test, image_name)\n elif MODEL == 2:\n return class_predict_2(trained_model, X_test, y_test)\n else:\n # For models 4, 5 and 6\n return class_predict_3(trained_model, X_test, y_test, image_name)", "def run_inference_on_image(image):\n if not tf.gfile.Exists(image):\n tf.logging.fatal('File does not exist %s', image)\n\n # Read an image\n image_data = tf.gfile.FastGFile(image, 'rb').read()\n img_data_jpg = tf.image.decode_jpeg(image_data) # Decode image\n img_data_jpg = tf.image.convert_image_dtype(img_data_jpg, dtype=tf.float32) # Convert uint8 to float32\n img_data_jpg = tf.image.resize_image_with_crop_or_pad(img_data_jpg,IMAGE_SIZE,IMAGE_SIZE)\n\n # Creates graph from saved GraphDef.\n create_graph()\n\n with tf.Session() as sess:\n image_data = img_data_jpg.eval().reshape(-1,IMAGE_SIZE,IMAGE_SIZE,CHANNEL)\n softmax_tensor = sess.graph.get_tensor_by_name('lg/InceptionV3/Predictions/Reshape_1:0')\n predictions = sess.run(softmax_tensor, {'lg/Placeholder:0': image_data})\n predictions = np.squeeze(predictions)\n print('predictions: ',predictions)\n # Read the labels from label.txt.\n label_path = os.path.join(FLAGS.model_dir, '/home/lg/projects/labels.txt')\n label = np.loadtxt(fname=label_path,dtype=str)\n\n top_k = predictions.argsort()[-FLAGS.num_top_predictions:][::-1]\n for node_id in top_k:\n label_string = label[node_id]\n score = predictions[node_id]\n print('%s (score = %.5f)' % (label_string, score))", "def predict_car():\n img = open_image(request.files['image'])\n pred_class, pred_idx, outputs = learn.predict(img)\n return str(pred_class)", "def detect(self, images, verbose=0):\n assert self.mode == \"inference\", \"Create model in inference mode.\"\n assert len(images) == self.config.BATCH_SIZE, \"len(images) must be equal to BATCH_SIZE\"\n\n if verbose:\n log(f\"Processing {len(images)} images\")\n for image in images:\n log(\"image\", image)\n\n # Mold inputs to format expected by the neural network\n molded_images, image_metas, windows = self.mold_inputs(images)\n\n # Validate image sizes\n # All images in a batch MUST be of the same size\n image_shape = molded_images[0].shape\n for g in molded_images[1:]:\n assert g.shape == image_shape,\\\n \"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes.\"\n\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n\n if verbose:\n log(\"molded_images\", molded_images)\n log(\"image_metas\", image_metas)\n log(\"anchors\", anchors)\n # Run object detection\n # ************************* NOTE for 2 label dataset \n\n predict = self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)\n detections,mrcnn_mask = predict[:2]\n # Process detections\n results = []\n for i, image in enumerate(images):\n result = self.unmold_detections(detections[i], mrcnn_mask[i],\n image.shape, molded_images[i].shape,\n windows[i])\n results.append(result)\n return results", "def get_classification(self, image):\n\tif image.size <= 0:\n\t rospy.loginfo('COLOR: unknown')\n return TrafficLight.UNKNOWN\n\n\timg_copy = np.copy(image)\n img_copy = cv2.cvtColor(img_copy, cv2.COLOR_BGR2RGB)\n\n\timg_resize = cv2.resize(img_copy, (32, 32))\n\timg_resize = np.expand_dims(img_resize, axis=0).astype('float32')\n\n\timg_resize = (img_resize / 255.)\n\n\twith self.graph.as_default():\n\t predict = self.model.predict(img_resize)\n\n\t rospy.loginfo('Prediction: %s', predict)\n\n\t tl_color = self.sign_classes[np.argmax(predict)]\n\n\trospy.loginfo('COLOR: %s', tl_color)\n\tif tl_color == 'Red':\n\t return TrafficLight.RED\n\telif tl_color == 'Green':\n\t return TrafficLight.GREEN\n\telif tl_color == 'Yellow':\n\t return TrafficLight.YELLOW\n\t\n return TrafficLight.UNKNOWN", "def compute_classifications(depc, gid_list, config=None):\n logger.info('[ibs] Process Image Classifications')\n logger.info('config = {!r}'.format(config))\n # Get controller\n ibs = depc.controller\n depc = ibs.depc_image\n if config['classifier_algo'] in ['cnn']:\n config_ = {\n 'draw_annots': False,\n 'thumbsize': (192, 192),\n }\n thumbnail_list = depc.get_property('thumbnails', gid_list, 'img', config=config_)\n result_list = ibs.generate_thumbnail_class_list(thumbnail_list, **config)\n elif config['classifier_algo'] in ['svm']:\n from wbia.algo.detect.svm import classify\n\n config_ = {'algo': 'resnet'}\n vector_list = depc.get_property('features', gid_list, 'vector', config=config_)\n classifier_weight_filepath = config['classifier_weight_filepath']\n result_list = classify(vector_list, weight_filepath=classifier_weight_filepath)\n elif config['classifier_algo'] in ['densenet']:\n from wbia.algo.detect import densenet\n\n config_ = {\n 'draw_annots': False,\n 'thumbsize': (densenet.INPUT_SIZE, densenet.INPUT_SIZE),\n }\n thumbpath_list = ibs.depc_image.get(\n 'thumbnails', gid_list, 'img', config=config_, read_extern=False, ensure=True\n )\n result_list = densenet.test(thumbpath_list, ibs=ibs, gid_list=gid_list, **config)\n elif config['classifier_algo'] in ['tile_aggregation', 'tile_aggregation_quick']:\n classifier_weight_filepath = config['classifier_weight_filepath']\n classifier_weight_filepath = classifier_weight_filepath.strip().split(';')\n\n assert len(classifier_weight_filepath) == 2\n classifier_algo_, model_tag_ = classifier_weight_filepath\n\n include_grid2 = config['classifier_algo'] in ['tile_aggregation']\n tid_list = ibs.scout_get_valid_tile_rowids(\n gid_list=gid_list, include_grid2=include_grid2\n )\n ancestor_gid_list = ibs.get_tile_ancestor_gids(tid_list)\n confidence_list = ibs.scout_wic_test(\n tid_list, classifier_algo=classifier_algo_, model_tag=model_tag_\n )\n\n gid_dict = {}\n for ancestor_gid, tid, confidence in zip(\n ancestor_gid_list, tid_list, confidence_list\n ):\n if ancestor_gid not in gid_dict:\n gid_dict[ancestor_gid] = []\n gid_dict[ancestor_gid].append(confidence)\n\n result_list = []\n for gid in tqdm.tqdm(gid_list):\n gid_confidence_list = gid_dict.get(gid, None)\n assert gid_confidence_list is not None\n best_score = np.max(gid_confidence_list)\n\n if best_score >= 0.5:\n best_key = 'positive'\n else:\n best_key = 'negative'\n best_score = 1.0 - best_score\n\n result = (\n best_score,\n best_key,\n )\n result_list.append(result)\n elif config['classifier_algo'] in ['densenet+neighbors']:\n raise NotImplementedError\n # ut.embed()\n # classifier_weight_filepath = config['classifier_weight_filepath']\n\n # all_bbox_list = ibs.get_image_bboxes(gid_list)\n # wic_confidence_list = ibs.scout_wic_test(gid_list, classifier_algo='densenet',\n # model_tag=classifier_weight_filepath)\n #\n # ancestor_gid_list = list(set(ibs.get_tile_ancestor_gids(gid_list)))\n # all_tile_list = list(set(ibs.scout_get_valid_tile_rowids(gid_list=ancestor_gid_list)))\n # all_bbox_list = ibs.get_image_bboxes(all_tile_list)\n # all_confidence_list = ibs.scout_wic_test(all_tile_list, classifier_algo='densenet',\n # model_tag=classifier_weight_filepath)\n #\n # TODO: USE THRESHOLDED AVERAGE, NOT MAX\n # result_list = []\n # for gid, wic_confidence in zip(gid_list, wic_confidence_list):\n # best_score = wic_confidence\n # for aid in aid_list:\n # wic_confidence_ = aid_conf_dict.get(aid, None)\n # assert wic_confidence_ is not None\n # best_score = max(best_score, wic_confidence_)\n #\n # if wic_confidence < 0.5:\n # best_key = 'negative'\n # best_score = 1.0 - best_score\n # else:\n # best_key = 'positive'\n # if best_score > wic_confidence:\n # recovered += 1\n # result = (best_score, best_key, )\n # result_list.append(result)\n elif config['classifier_algo'] in ['scout_detectnet']:\n import json\n\n json_filepath = join(ibs.dbdir, config['classifier_weight_filepath'])\n assert exists(json_filepath)\n with open(json_filepath, 'r') as json_file:\n values = json.load(json_file)\n annotations = values.get('annotations', {})\n\n gpath_list = ibs.get_image_paths(gid_list)\n gname_list = [split(gpath)[1] for gpath in gpath_list]\n\n result_list = []\n for gname in gname_list:\n annotation = annotations.get(gname, None)\n assert annotation is not None\n\n best_score = 1.0\n if len(annotation) == 0:\n best_key = 'negative'\n else:\n best_key = 'positive'\n result = (\n best_score,\n best_key,\n )\n result_list.append(result)\n elif config['classifier_algo'] in ['scout_detectnet_csv', 'scout_faster_rcnn_csv']:\n uuid_str_list = list(map(str, ibs.get_image_uuids(gid_list)))\n\n manifest_filepath = join(ibs.dbdir, 'WIC_manifest_output.csv')\n csv_filepath = join(ibs.dbdir, config['classifier_weight_filepath'])\n\n assert exists(manifest_filepath)\n assert exists(csv_filepath)\n\n manifest_dict = {}\n with open(manifest_filepath, 'r') as manifest_file:\n manifest_file.readline() # Discard column header row\n manifest_line_list = manifest_file.readlines()\n for manifest_line in manifest_line_list:\n manifest = manifest_line.strip().split(',')\n assert len(manifest) == 2\n manifest_filename, manifest_uuid = manifest\n manifest_dict[manifest_filename] = manifest_uuid\n\n csv_dict = {}\n with open(csv_filepath, 'r') as csv_file:\n csv_file.readline() # Discard column header row\n csv_line_list = csv_file.readlines()\n for csv_line in csv_line_list:\n csv = csv_line.strip().split(',')\n assert len(csv) == 2\n csv_filename, csv_score = csv\n csv_uuid = manifest_dict.get(csv_filename, None)\n assert (\n csv_uuid is not None\n ), 'Test image {!r} is not in the manifest'.format(\n csv,\n )\n csv_dict[csv_uuid] = csv_score\n\n result_list = []\n for uuid_str in uuid_str_list:\n best_score = csv_dict.get(uuid_str, None)\n assert best_score is not None\n\n if config['classifier_algo'] in ['scout_detectnet_csv']:\n assert best_score in ['yes', 'no']\n best_key = 'positive' if best_score == 'yes' else 'negative'\n best_score = 1.0\n elif config['classifier_algo'] in ['scout_faster_rcnn_csv']:\n best_score = float(best_score)\n if best_score >= 0.5:\n best_key = 'positive'\n else:\n best_key = 'negative'\n best_score = 1.0 - best_score\n else:\n raise ValueError\n\n result = (\n best_score,\n best_key,\n )\n result_list.append(result)\n elif config['classifier_algo'] in [\n 'lightnet',\n 'densenet+lightnet',\n 'densenet+lightnet!',\n ]:\n min_area = 10\n\n classifier_weight_filepath = config['classifier_weight_filepath']\n classifier_weight_filepath = classifier_weight_filepath.strip().split(',')\n\n if config['classifier_algo'] in ['lightnet']:\n assert len(classifier_weight_filepath) == 2\n weight_filepath, nms_thresh = classifier_weight_filepath\n wic_thresh = 0.0\n nms_thresh = float(nms_thresh)\n wic_confidence_list = [np.inf] * len(gid_list)\n wic_filter = False\n elif config['classifier_algo'] in ['densenet+lightnet', 'densenet+lightnet!']:\n assert len(classifier_weight_filepath) == 4\n (\n wic_model_tag,\n wic_thresh,\n weight_filepath,\n nms_thresh,\n ) = classifier_weight_filepath\n wic_thresh = float(wic_thresh)\n nms_thresh = float(nms_thresh)\n wic_confidence_list = ibs.scout_wic_test(\n gid_list, classifier_algo='densenet', model_tag=wic_model_tag\n )\n wic_filter = config['classifier_algo'] in ['densenet+lightnet']\n else:\n raise ValueError\n\n flag_list = [\n wic_confidence >= wic_thresh for wic_confidence in wic_confidence_list\n ]\n if wic_filter:\n gid_list_ = ut.compress(gid_list, flag_list)\n else:\n gid_list_ = gid_list[:]\n config = {\n 'grid': False,\n 'algo': 'lightnet',\n 'config_filepath': weight_filepath,\n 'weight_filepath': weight_filepath,\n 'nms': True,\n 'nms_thresh': nms_thresh,\n 'sensitivity': 0.0,\n }\n prediction_list = depc.get_property(\n 'localizations', gid_list_, None, config=config\n )\n prediction_dict = dict(zip(gid_list_, prediction_list))\n\n result_list = []\n for gid, wic_confidence, flag in zip(gid_list, wic_confidence_list, flag_list):\n if not flag:\n best_key = 'negative'\n best_score = 1.0 - wic_confidence\n else:\n prediction = prediction_dict.get(gid, None)\n assert prediction is not None\n\n best_score = 0.0\n if prediction is not None:\n score, bboxes, thetas, confs, classes = prediction\n for bbox, conf in zip(bboxes, confs):\n xtl, ytl, w, h = bbox\n area = w * h\n if area >= min_area:\n best_score = max(best_score, conf)\n\n if best_score >= 0.5:\n best_key = 'positive'\n else:\n best_key = 'negative'\n best_score = 1.0 - best_score\n result = (\n best_score,\n best_key,\n )\n result_list.append(result)\n else:\n raise ValueError(\n 'specified classifier algo is not supported in config = {!r}'.format(config)\n )\n\n # yield detections\n for result in result_list:\n yield result", "def predict(cls, image_path: str) -> tuple:\n\n print(\"Classify input image: \")\n return cls.model.predict(image_path)", "def get_classification(self, image):\n #TODO implement light color prediction\n predict = TrafficLight.UNKNOWN\n if self.predict is not None:\n # expand image dimensions\n image_expanded = np.expand_dims(image, axis=0)\n # run detection\n (scores, classes, num) = self.tf_session.run(\n [self.detection_scores, self.detection_classes, self.num_detections],\n feed_dict={self.image_tensor: image_expanded})\n\n # reduce the dimensions\n scores = np.squeeze(scores)\n classes = np.squeeze(classes).astype(np.int32)\n\n # calculate prediction\n cc = classes[0]\n confidence = scores[0]\n \n if cc > 0 and cc <= 4 and confidence is not None and confidence > THRESHOLD:\n predict = self.clabels[cc]\n else:\n predict = TrafficLight.UNKNOWN\n\n if predict == TrafficLight.RED: \n Light_status = 'Red'\n elif predict == TrafficLight.GREEN:\n Light_status = 'Green'\n elif predict == TrafficLight.YELLOW:\n Light_status = 'Yellow'\n else:\n Light_status = 'Unknown'\n print('Light is ',Light_status)\n\n return predict", "def eval_net(net, loader, device, batch_size, threshold):\n net.eval()\n dice = 0\n acc_score = 0\n rec_score = 0\n f1_score = 0\n pres_score = 0\n jacc_score = 0\n\n for batch in loader:\n imgs = batch['image']\n true_masks = batch['mask']\n\n imgs = imgs.to(device=device, dtype=torch.float32)\n mask_type = torch.float32 if net.n_classes == 1 else torch.long\n true_masks = true_masks.to(device=device, dtype=mask_type)\n\n mask_pred = net(imgs)\n\n for true_mask, pred in zip(true_masks, mask_pred):\n pred = (pred > threshold).float()\n if net.n_classes > 1:\n dice += F.cross_entropy(pred.unsqueeze(dim=0), true_mask.unsqueeze(dim=0)).item()\n else:\n dice += dice_coeff(pred, true_mask.squeeze(dim=1)).item()\n pred = pred.detach().cpu().numpy()\n pred = pred.astype(int)\n pred = np.matrix.flatten(pred)\n\n true_mask = true_mask.cpu().numpy()\n true_mask = true_mask.astype(int)\n true_mask = np.matrix.flatten(true_mask)\n\n jacc_score += jaccard_score(true_mask, pred)\n acc_score += accuracy_score(true_mask, pred)\n pres_score += precision_score(true_mask, pred)\n rec_score += recall_score(true_mask, pred)\n\n dice = (dice / (len(loader) * batch_size))\n jacc_score = (jacc_score / (len(loader) * batch_size))\n acc_score = (acc_score / (len(loader) * batch_size))\n pres_score = (pres_score / (len(loader) * batch_size))\n rec_score = (rec_score / (len(loader) * batch_size))\n if (pres_score + rec_score) > 0:\n f1_score = 2 * (pres_score * rec_score) / (pres_score + rec_score)\n else:\n f1_score = 0\n\n print(\"Dice: \", dice)\n print(\"Jaccard_score: \", jacc_score)\n print(\"Accuracy: \", acc_score)\n print(\"Precision: \", pres_score)\n print(\"Recall: \", rec_score)\n print(\"F1_score: \", f1_score)\n return dice, jacc_score, acc_score, pres_score, rec_score, f1_score", "def get_classification(self, image):\n #TODO implement light color prediction\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n box = self.detection(image)\n if box == None:\n #rospy.loginfo('Classifier: No box found')\n return TrafficLight.UNKNOWN\n\n left, right, top, bottom = box\n img_crop = image[top:bottom, left:right]\n traffic_light = cv2.resize(img_crop, (32, 32))\n classification = self.classification(traffic_light)\n return classification", "def get_classification(self, image):\n # Image pre-processing pipeline\n img = cv2.resize(image, None, fx=0.5, fy=0.5)\n img = img.astype(np.float32)\n img = keras.applications.vgg16.preprocess_input(img)\n # Execute prediction\n probs = self.model.predict(np.array([img]), batch_size=1, verbose=1)[0]\n # get label with max probability\n g_x = np.argmax(probs)\n\n # reject if model is not confident\n if probs[g_x] < CONFIDENCE_THRESHOLD:\n return TrafficLight.UNKNOWN\n\n label = self.predictionary[g_x]\n rospy.loginfo(\"label: %d, conf: %f, %f, %f, %f\", g_x, probs[0], probs[1], probs[2], probs[3])\n return label", "def forward(sess, net, img, CONF_THRESH=0.8, NMS_THRESH=0.7):\n\n results = {'face': [],\n 'lp': []}\n\n # Detect all object classes and regress object bounds\n timer = Timer()\n timer.tic()\n scores, boxes = im_detect(sess, net, img)\n timer.toc()\n\n for cls in ('face', 'lp') :# enumerate(CLASSES[1:]):\n cls_ind = CLASSES.index(cls) # because we skipped background\n cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32)\n keep = nms(dets, NMS_THRESH)\n results[cls] = [\n [x0, y0, x1-x0, y1-y0] for (x0, y0, x1, y1, score) in dets[keep, :]]\n\n return results, timer.total_time", "def classify_image(image, model, image_box=None):\n images_list = []\n image = image.resize((IMAGE_WIDTH, IMAGE_HEIGHT), box=image_box)\n # box argument clips image to (x1, y1, x2, y2)\n image = np.array(image)\n images_list.append(image)\n \n return [np.argmax(model.predict(np.array(images_list)))]", "def do_classify(img,mask,n_sigmas,multichannel,intensity,edges,texture,sigma_min,sigma_max, downsample_value):\n if np.ndim(img)==3:\n features = extract_features(\n img,\n n_sigmas,\n multichannel=multichannel,\n intensity=intensity,\n edges=edges,\n texture=texture,\n sigma_min=sigma_min,\n sigma_max=sigma_max,\n )\n else:\n features = extract_features(\n np.dstack((img,img,img)),\n n_sigmas,\n multichannel=multichannel,\n intensity=intensity,\n edges=edges,\n texture=texture,\n sigma_min=sigma_min,\n sigma_max=sigma_max,\n )\n\n if mask is None:\n raise ValueError(\"If no classifier clf is passed, you must specify a mask.\")\n training_data = features[:, mask > 0].T\n\n training_data = memmap_feats(training_data)\n\n training_labels = mask[mask > 0].ravel()\n\n training_data = training_data[::downsample_value]\n training_labels = training_labels[::downsample_value]\n\n lim_samples = 100000 #200000\n\n if training_data.shape[0]>lim_samples:\n logging.info('Number of samples exceeds %i'% lim_samples)\n ind = np.round(np.linspace(0,training_data.shape[0]-1,lim_samples)).astype('int')\n training_data = training_data[ind,:]\n training_labels = training_labels[ind]\n logging.info('Samples have been subsampled')\n logging.info('Number of samples in training data: %i' % (training_data.shape[0]))\n print(training_data.shape)\n\n clf = make_pipeline(\n StandardScaler(),\n MLPClassifier(\n solver='adam', alpha=1, random_state=1, max_iter=2000,\n early_stopping=True, hidden_layer_sizes=[100, 60],\n ))\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Initializing MLP model')\n\n clf.fit(training_data, training_labels)\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('MLP model fit to data')\n\n del training_data, training_labels\n\n logging.info('Create and memory map model input data')\n\n data = features[:, mask == 0].T\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n\n data = memmap_feats(data)\n logging.info('Memory mapped model input data')\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n\n labels = clf.predict(data)\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Model used on data to estimate labels')\n\n if mask is None:\n result = labels.reshape(img.shape[:2])\n result2 = result.copy()\n else:\n result = np.copy(mask)#+1\n result[mask == 0] = labels\n del labels, mask\n result2 = result.copy()\n del result\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('RF feature extraction and model fitting complete')\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n\n return result2", "def predict(self, img_path):\n\n img = cv2.imread(img_path)\n img0 = img.copy()\n \n #This happens inside datasets\n # Convert\n img = letterbox(img, new_shape=self.img_size)[0]\n\n # Convert\n img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416\n img = np.ascontiguousarray(img)\n \n #this happens on detect\n img = torch.from_numpy(img).to(self.device)\n img = img.float() # uint8 to fp16/32\n img /= 255.0 # 0 - 255 to 0.0 - 1.0\n if img.ndimension() == 3:\n img = img.unsqueeze(0)\n\n # Inference\n pred = self.model(img)[0]\n\n # Apply NMS\n pred = non_max_suppression(pred, self.conf_thres, self.iou_thres, classes=self.classes, agnostic=self.agnostic_nms)\n \n # Process detections\n for i, det in enumerate(pred): # detections per image\n if det is not None and len(det):\n # Rescale boxes from img_size to im0 size\n det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round()\n\n pred = [d.cpu().detach().numpy() for d in pred if d is not None]\n pred = pred[0] if len(pred) else pred\n \n pred = [[[x1, y1, x2, y2],conf] for x1, y1, x2, y2, conf, clss in pred]\n\n return pred", "def __init__(self, class_definitions, network, batch_size, batch_image_size,\n create_learn_epoch_fn, create_evaluate_epoch_fn, create_synthesis_fn,\n experiment, negative_labels=None, auxiliary_processors=None,\n config_proto=None, graph=None, device_string=None):\n super(LearnDiscriminatively, self).__init__(\n experiment.run_path, config_proto, graph, device_string)\n\n # the base network parameters: what to learn and on which architecture to learn it.\n self.network = network\n self.batch_size = batch_size\n self.batch_image_size = ensure_shape_3d(batch_image_size)\n self.experiment = experiment\n self.offline_file = self.experiment.file\n self.negative_labels = negative_labels\n self.auxiliary_processors = auxiliary_processors or []\n self.auxiliary_inputs = []\n\n self.class_definitions = class_definitions\n self.classes = self.create_training_classes(self.class_definitions, self.negative_labels)\n \n with tf.name_scope(LearnDiscriminatively.MAIN_SCOPE_NAME):\n self.global_step = tf.train.create_global_step(graph=self.graph)\n \n self.input = self.create_input()\n\n auxiliary_images = self.input.images\n auxiliary_labels = self.input.labels\n\n # we go through the auxiliary processors in the given order. this means\n # the innermost part of the composition is the first element, etc.\n for auxiliary_processor in self.auxiliary_processors:\n auxiliary_images, auxiliary_labels, auxiliary_names = auxiliary_processor(auxiliary_images, auxiliary_labels)\n\n self.auxiliary_inputs.append(\n AuxiliaryInput(images=auxiliary_images, labels=auxiliary_labels, names=auxiliary_names))\n \n self.discriminator = self.create_discriminator(self.network, auxiliary_images, auxiliary_labels)\n\n with tf.name_scope(\"input_loader\"):\n self.batch_loader = self.create_batch_loader(self.input)\n\n self.scaffold = tf.train.Scaffold( \n init_op=tf.global_variables_initializer(),\n local_init_op=tf.local_variables_initializer())\n self.saver = self.discriminator.saver()\n\n self.learn_epoch = create_learn_epoch_fn(self.discriminator, self.auxiliary_inputs)\n self.evaluate_epoch = create_evaluate_epoch_fn(self.discriminator, self.auxiliary_inputs)\n self.synthesize = create_synthesis_fn(self.discriminator, self.input)", "def classify_all_images(cc):\n print 'Classify images'\n images = cc.d.images\n for img_idx in range(comm_rank, len(images), comm_size): # PARALLEL\n print 'classify image %d/%d at %d'%(img_idx/comm_size, len(images)/comm_size, comm_rank)\n img = images[img_idx]\n scores = classify_image(cc, img_idx)\n savefile = config.get_classifier_score_name(img, cc.L)\n cPickle.dump(scores, open(savefile,'w'))", "def run(self, images):\n\n # Apply filtering\n if len(self.preprocessing) > 0: \n print('Applying', len(self.preprocessing), 'filter(s) to input images')\n for filter in self.preprocessing:\n for i in range(len(images)):\n images[i] = filter(images[i])\n\n # Apply feature extraction\n if len(self.features) > 0:\n print('Extracting', len(self.features), 'feature(s) from input images')\n scaler = MinMaxScaler(feature_range=(0, 1))\n for i in range(len(images)):\n features = []\n for feature in self.features:\n features.append(feature(images[i]))\n images[i] = np.hstack(features)\n images = scaler.fit_transform(images)\n else:\n # Flatten images (not necessary when using feature extraction)\n train_data = np.array(train_data).reshape((len(train_data), -1))\n\n # Run predictions\n print('Predicting presence of parasites in', len(images), 'images\\n')\n return self.classifier.predict(images)", "def predict_class(self, original_image_numpy: np.ndarray) -> None:\n from app.dl_model.image import ClassifierInput\n # scale up coordinates\n self.scale_up_coordinates()\n x1, y1, x2, y2 = [int(coord) for coord in self.scale_coordinates.round()]\n # crop original numpy image\n numpy_image = original_image_numpy[y1:y2, x1:x2, :].copy()\n # create classifier input object\n classifier_input = ClassifierInput(numpy_image, new_shape=(224, 224))\n # classify input\n prediction = classifier_input.predict_class()\n # set attributes\n self.class_name = prediction.class_name # update class_name\n self.conf = prediction.conf # update probability\n self.product_id = prediction.product_id # set product external id\n self.detection_index = prediction.detection_index # set detection index\n self.top_k_names = prediction.top_k_names # set top k names list\n self.top_k_indices = prediction.top_k_indices # set top k detection index\n self.top_k_confidences = prediction.top_k_confidences # set top k confidieces values\n self.top_k_product_ids = prediction.top_k_product_ids # set top k product external ids", "def create_classification_model(include_top=True,\n input_tensor=None, input_shape=None,\n pooling=None,\n classes=1000):\n\n\n img_input = Input(shape=input_shape)\n # Block 1\n x = Conv2D(16, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)\n\n # Block 2\n x = Conv2D(32, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)\n\n # Block 3\n x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)\n\n # Block 4\n x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)\n\n # Block 5\n x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)\n\n if include_top:\n # Classification block\n x = Flatten(name='flatten')(x)\n x = Dense(512, activation='relu', name='fc1')(x)\n x = Dense(128, activation='relu', name='fc2')(x)\n x = Dense(classes, activation='softmax', name='predictions')(x)\n else:\n if pooling == 'avg':\n x = GlobalAveragePooling2D()(x)\n elif pooling == 'max':\n x = GlobalMaxPooling2D()(x)\n\n # Ensure that the model takes into account\n # any potential predecessors of `input_tensor`.\n if input_tensor is not None:\n inputs = get_source_inputs(input_tensor)\n else:\n inputs = img_input\n # Create model.\n model = Model(inputs, x, name='vgg19')\n\n # # load weights\n # if weights == 'imagenet':\n # if include_top:\n # weights_path = get_file('vgg19_weights_tf_dim_ordering_tf_kernels.h5',\n # WEIGHTS_PATH,\n # cache_subdir='models')\n # else:\n # weights_path = get_file('vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5',\n # WEIGHTS_PATH_NO_TOP,\n # cache_subdir='models')\n # model.load_weights(weights_path)\n # if K.backend() == 'theano':\n # layer_utils.convert_all_kernels_in_model(model)\n #\n # if K.image_data_format() == 'channels_first':\n # if include_top:\n # maxpool = model.get_layer(name='block5_pool')\n # shape = maxpool.output_shape[1:]\n # dense = model.get_layer(name='fc1')\n # layer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first')\n #\n # if K.backend() == 'tensorflow':\n # warnings.warn('You are using the TensorFlow backend, yet you '\n # 'are using the Theano '\n # 'image data format convention '\n # '(`image_data_format=\"channels_first\"`). '\n # 'For best performance, set '\n # '`image_data_format=\"channels_last\"` in '\n # 'your Keras config '\n # 'at ~/.keras/keras.json.')\n return model", "def get_classification(self, image):\n #TODO implement light color prediction\n choices = {0: \"GREEN\", 1: \"YELLOW\", 2: \"RED\", 3: \"UNKNOWN\"}\n\n if self.capture_images:\n cv2.imwrite(self.imgPath+str(int(time.clock()*1000))+'.jpg', image)\n print('[TLClassifier] Saved Image ... ')\n\n if self.debug:\n print('[TL Classifier] invoked... ')\n\n if image.shape != (300, 200, 3):\n print('[TL Classifier] image shape NOK: ' + str(image.shape))\n return \"UNKNOWN shape\"\n \n assert image.shape == (300, 200, 3)\n if self.debug:\n print('[TL Classifier] assertion ok: ')\n\n res = None\n res = cv2.resize(image, (32,32), interpolation = cv2.INTER_CUBIC)\n image = res.reshape(1, 32, 32, 3)\n classification = self.model.predict_classes(image, verbose=0)[0]\n result = choices.get(classification, 'UNKNOWN')\n\n if self.verbose:\n print('[TL Classifier] ' + result + ' detected.')\n\n return result", "def get_classification_site(self, image):\n #TODO implement light color prediction\n \n\timg=cv2.resize(image,(224,224))\n\timg=img/255.0\n\timg = np.expand_dims(img, axis=0)\n with self.graph.as_default():\n\t pred=self.model.predict(img)\n\tpclass=np.argmax(pred)\n\n \ttf_color=TrafficLight.UNKNOWN\n if (pclass==1):\n\t tf_color=TrafficLight.RED\n elif (pclass==2):\n\t tf_color=TrafficLight.GREEN\n\n return tf_color", "def _predict(self, inputs):\n node = self.tree_\n while node.left:\n if inputs[node.feature_index] < node.split:\n node = node.left\n else:\n node = node.right\n return node.predicted_class", "def process_images_class(images_class):\n\n images_class_to_int_map = {image: i for (i, image) in enumerate(sorted(set(images_class)))}\n print('Mapping:\\n{}'.format(images_class_to_int_map))\n images_class_int = [images_class_to_int_map[i] for i in images_class]\n\n return to_categorical(images_class_int, num_classes=len(set(images_class)))", "def classifier(base_layers, input_rois, num_rois, nb_classes=21):\n\n # compile times on theano tend to be very high, so we use smaller ROI pooling regions to workaround\n\n if K.backend() == 'tensorflow':\n pooling_regions = 7\n input_shape = (num_rois,7,7,512)\n elif K.backend() == 'theano':\n pooling_regions = 7\n input_shape = (num_rois,512,7,7)\n\n out_roi_pool = RoiPoolingConv(pooling_regions, num_rois)([base_layers, input_rois])\n\n out = TimeDistributed(Flatten(name='flatten'))(out_roi_pool)\n out = TimeDistributed(Dense(4096, activation='relu', name='fc1'))(out)\n out = TimeDistributed(Dropout(0.5))(out)\n out = TimeDistributed(Dense(4096, activation='relu', name='fc2'))(out)\n out = TimeDistributed(Dropout(0.5))(out)\n\n out_class = TimeDistributed(Dense(nb_classes, activation='softmax', kernel_initializer='zero'), name='dense_class_{}'.format(nb_classes))(out)\n # note: no regression target for bg class\n out_regr = TimeDistributed(Dense(4 * (nb_classes-1), activation='linear', kernel_initializer='zero'), name='dense_regress_{}'.format(nb_classes))(out)\n\n return [out_class, out_regr]", "def model(images, is_training=True):\n images = tf.reshape(images, [-1, 28, 28, 1])\n\n # First convolutional layer with max pooling and ReLU activation.\n conv1 = slim.conv2d(images, 32, [5, 5], activation_fn=tf.nn.relu, scope='conv1')\n pool1 = slim.max_pool2d(conv1, [2, 2], scope='pool1')\n\n # Second convolutional layer with max pooling and ReLU activation.\n conv2 = slim.conv2d(pool1, 64, [5, 5], activation_fn=tf.nn.relu, scope='conv2')\n pool2 = slim.max_pool2d(conv2, [2, 2], scope='pool2')\n\n # First fully connected layer with ReLU activation.\n flat = slim.flatten(pool2)\n fc1 = slim.fully_connected(flat, 1024, activation_fn=tf.nn.relu, scope='fc1')\n\n # Dropout.\n drop = slim.dropout(fc1, 0.5, is_training=is_training)\n\n # Fully connected output layer (logits).\n fc2 = slim.fully_connected(drop, 10, activation_fn=None, scope='fc2')\n return fc2", "def predict_data(img): \n return gennet.predict_data(img, 'Resnet50')", "def run_inference_on_image(image):\n if not tf.gfile.Exists(image):\n tf.logging.fatal('File does not exist %s', image)\n image_data = tf.gfile.FastGFile(image, 'rb').read()\n\n # Creates graph from saved GraphDef.\n #create_graph()\n\n with tf.Session() as sess:\n # Some useful tensors:\n # 'softmax:0': A tensor containing the normalized prediction across\n # 1000 labels.\n # 'pool_3:0': A tensor containing the next-to-last layer containing 2048\n # float description of the image.\n # 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG\n # encoding of the image.\n # Runs the softmax tensor by feeding the image_data as input to the graph.\n global RESULTS_ANALYSIS\n softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')\n\t\n predictions = sess.run(softmax_tensor,\n {'DecodeJpeg/contents:0': image_data})\n predictions = np.squeeze(predictions)\n\n # Creates node ID --> English string lookup.\n node_lookup = NodeLookup()\n #top_k = predictions.argsort()[-FLAGS.num_top_predictions:][::-1]\n top_k = predictions.argsort()[-3:][::-1]\n RESULTS_ANALYSIS=''\n for node_id in top_k:\n human_string = node_lookup.id_to_string(node_id)\n score = predictions[node_id]\n print('%s (score = %.5f)' % (human_string, score))\n RESULTS_ANALYSIS=RESULTS_ANALYSIS+'%s (score = %.5f)' % (human_string, score)+';'", "def preprocess(example, num_classes=10, is_training=True):\n features = {'scores': tf.VarLenFeature(tf.float32),\n 'image': tf.FixedLenFeature((), tf.string)}\n parsed = tf.parse_single_example(example, features)\n image = tf.image.decode_jpeg(parsed['image'], channels=3)\n image = nima.preprocess_image(image, is_training=is_training)\n scores = parsed['scores']\n scores = tf.sparse_tensor_to_dense(scores)\n scores = tf.reshape(scores, [num_classes])\n scores = scores / tf.reduce_sum(scores, axis=-1, keepdims=True)\n return image, scores", "def define_model(input_shape=(32,32,3), depth=110, num_classes=10):\n if (depth - 2) % 6 != 0:\n raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')\n # Start model definition.\n num_filters = 16\n num_res_blocks = int((depth - 2) / 6)\n\n inputs = Input(shape=input_shape)\n x = resnet_layer(inputs=inputs)\n # Instantiate the stack of residual units\n for stack in range(3):\n for res_block in range(num_res_blocks):\n strides = 1\n if stack > 0 and res_block == 0: # first layer but not first stack\n strides = 2 # downsample\n y = resnet_layer(inputs=x,\n num_filters=num_filters,\n strides=strides)\n y = resnet_layer(inputs=y,\n num_filters=num_filters,\n activation=None)\n if stack > 0 and res_block == 0: # first layer but not first stack\n # linear projection residual shortcut connection to match\n # changed dims\n x = resnet_layer(inputs=x,\n num_filters=num_filters,\n kernel_size=1,\n strides=strides,\n activation=None,\n batch_normalization=False)\n x = keras.layers.add([x, y])\n x = Activation('relu')(x)\n num_filters *= 2\n\n # Add classifier on top.\n # v1 does not use BN after last shortcut connection-ReLU\n x = AveragePooling2D(pool_size=8)(x)\n y = Flatten()(x)\n outputs = Dense(num_classes,\n activation='softmax',\n kernel_initializer='he_normal')(y)\n\n # Instantiate model.\n model = Model(inputs=inputs, outputs=outputs)\n return model", "def predict(self, image) -> tuple:\n if type(image) == bytes: # allow to pass binary image file content\n img = Image.open(BytesIO(image))\n img = img.convert(\"L\").resize(self.input_size) # convert(\"L\") -> grayscale\n else: # otherwise expect filepath\n img = load_img(image, color_mode='grayscale', target_size=self.input_size)\n data = img_to_array(img)/255 # normalize pixel intensity -> [0,1]\n data = data.reshape((1,) + data.shape)\n with self.graph.as_default():\n with self.session.as_default():\n prediction = self.model.predict(data)\n # generate and return the (class, confidence) tuple\n if self.is_binary:\n if prediction[0][0] <= 0.5:\n return (self.classes[0], float(1.0 - prediction[0][0]))\n return (self.classes[1], float(prediction[0][0]))\n return (self.classes[np.argmax(prediction[0])], float(np.max(prediction[0])))", "def classify_type(self):\n a = model_classifier(self.model_filepath)\n\n size = os.stat(self.model_filepath).st_size\n size = size / 1000000 # in MB\n print('Model size in MB: {}'.format(size))\n self.model_size = size\n\n if abs(size - a.resnet50_size) < abs(size - a.inceptionv3_size):\n if abs(size - a.resnet50_size) < abs(size - a.densenet121_size):\n print('RESNET 50 Model is analyzed')\n model_type = 1\n min_model_size_delta = a.resnet50_size - size\n else:\n print('DENSENET 121 Model is analyzed')\n model_type = 3\n min_model_size_delta = a.densenet121_size - size\n elif abs(size - a.inceptionv3_size) < abs(size - a.densenet121_size):\n print('INCEPTIONV3 Model is analyzed')\n model_type = 2\n min_model_size_delta = a.inceptionv3_size - size\n else:\n print('DENSENET Model is analyzed')\n model_type = 3\n min_model_size_delta = a.densenet121_size - size\n\n #print('classified the model as:\\t', a.switch_architecture(model_type))\n return model_type, min_model_size_delta", "def inference(config_file, image_file):\n # Get config\n FLAGS = Flags(config_file).get()\n out_charset = load_charset(FLAGS.charset)\n num_classes = len(out_charset)\n net = get_network(FLAGS, out_charset)\n\n if FLAGS.use_rgb:\n num_channel = 3\n mode = cv2.IMREAD_COLOR\n else:\n num_channel = 1\n mode = cv2.IMREAD_GRAYSCALE\n\n # Input node\n image = tf.placeholder(tf.uint8,\n shape=[None, None, num_channel],\n name='input_node')\n\n # Network\n proc_image = net.preprocess_image(image, is_train=False)\n proc_image = tf.expand_dims(proc_image, axis=0)\n proc_image.set_shape(\n [None, FLAGS.resize_hw.height, FLAGS.resize_hw.width, num_channel])\n logits, sequence_length = net.get_logits(proc_image,\n is_train=False,\n label=None)\n prediction, log_prob = net.get_prediction(logits, sequence_length)\n prediction = tf.sparse_to_dense(sparse_indices=prediction.indices,\n sparse_values=prediction.values,\n output_shape=prediction.dense_shape,\n default_value=num_classes,\n name='output_node')\n\n # Restore\n restore_model = get_init_trained()\n sess = tf.Session()\n restore_model(sess, FLAGS.eval.model_path)\n\n # Run\n img = cv2.imread(image_file, mode)\n img = np.reshape(img, [img.shape[0], img.shape[1], num_channel])\n predicted = sess.run(prediction, feed_dict={image: img})\n string = get_string(predicted[0], out_charset)\n string = adjust_string(string, FLAGS.eval.lowercase,\n FLAGS.eval.alphanumeric)\n print(string)\n\n return string", "def runClassifier(interpreter, image, threshold):\n set_input_tensor(interpreter, image)\n interpreter.invoke()\n\n # Get all output details\n boxes = get_output_tensor(interpreter, 0)\n classes = get_output_tensor(interpreter, 1)\n scores = get_output_tensor(interpreter, 2)\n count = int(get_output_tensor(interpreter, 3))\n\n results = []\n for i in range(count):\n if scores[i] >= threshold:\n result = {\n \"bounding_box\": boxes[i],\n \"class_id\": classes[i],\n \"score\": scores[i],\n }\n results.append(result)\n return results", "def _classify(self, loader):\n ypred = []\n ytrue = []\n\n for _, inputs, targets in loader:\n inputs = inputs.to(self._device)\n logits = F.softmax(self._network(inputs), dim=1)\n preds = logits.argmax(dim=1).cpu().numpy()\n\n ypred.extend(preds)\n ytrue.extend(targets)\n\n return np.array(ypred), np.array(ytrue)", "def model(inputs, target_images, is_training):\n # if isinstance(inputs, tuple):\n assert mask_augs >= 0. and mask_augs <= 1., \"mask_augs must be in [0, 1]\"\n if FLAGS.use_td_loss and isinstance(inputs, tuple):\n # print('#'*80)\n # print(inputs)\n assert metric is not None, \"Metric function is None\"\n inputs, augs = inputs\n B = inputs.get_shape().as_list()[0]\n A = augs.get_shape().as_list()[1]\n if mask_augs > 0:\n mask = tf.cast(tf.greater(tf.random.uniform(shape=[B, A], minval=0., maxval=1.), 0.5), augs.dtype) # noqa\n bias = mask * -1\n augs = (augs * mask) + bias # Randomly mask out augs for difficulty and code those dims as -1\n with tf.variable_scope('encoder'): # variable_scope name_scope\n features, block_activities = encoder(inputs, is_training=is_training)\n print(\"Features: \")\n print(features)\n print(\"---\")\n # Global average pool of B 7 7 2048 -> B 2048\n if data_format == 'channels_last':\n outputs = tf.reduce_mean(features, [1, 2])\n else:\n outputs = tf.reduce_mean(features, [2, 3])\n outputs = tf.identity(outputs, 'final_avg_pool')\n print(\"Outputs: \")\n print(outputs)\n print(\"---\")\n # B 2048\n\n h_w = features.get_shape().as_list()[1]\n # print(h_w)\n\n augs = tf.tile(augs[:,None,None,:], tf.constant([1,h_w,h_w,1]))\n print(\"Augs: \")\n print(augs)\n print(\"---\")\n features = tf.concat([features, augs], axis=-1)\n \n with tf.variable_scope('decoder'):\n recon_images = decoder(\n features,\n block_activities,\n is_training=is_training,\n skip=skip)\n print(\"Reconstructed images and target images: \")\n print(recon_images)\n print(target_images)\n print(\"---\")\n with tf.variable_scope('metric'):\n # Squash both recon and target images\n recon_images_squash = tf.tanh(recon_images)\n target_images = (target_images * 2) - 1\n Bt = target_images.get_shape().as_list()[0]\n Br = recon_images_squash.get_shape().as_list()[0]\n if Bt == Br:\n # Attractive + repulsive loss\n pass\n elif Bt * 2 == Br:\n # Attractive-only loss\n target_images = tf.concat([target_images, target_images], 0)\n\n # Differentiable perceptual metric. First reconstruction.\n # both_images = tf.concat([recon_images, target_images], -1) # B H W 6\n all_images = tf.concat([recon_images_squash, target_images], 0) # Stack these in batch dim\n metric_all_images = metric(all_images, is_training=is_training)\n # B = metric_all_images.get_shape().as_list()[0]\n metric_all_images = tf.reshape(metric_all_images, [B, -1])\n metric_hidden_r, metric_hidden_t = tf.split(metric_all_images, 2, 0) # Split these in batch dim\n\n # Prep recon_images for visualization\n # recon_images = tf.clip_by_value(recon_images, clip_value_min=-5, clip_value_max=5)\n # recon_images = (recon_images + 5) / 10\n\n recon_mean, recon_std = tf.nn.moments(recon_images, axes=[1, 2], keep_dims=True)\n recon_images = (recon_images - recon_mean) / recon_std\n recon_images = tf.clip_by_value(recon_images, clip_value_min=-5, clip_value_max=5)\n recon_images = (recon_images + 5) / 10\n # recon_images = recon_images_squash\n if greyscale_viz:\n recon_images = tf.image.rgb_to_grayscale(recon_images)\n recon_images = tf.concat([recon_images, recon_images, recon_images], -1)\n print(\"Embedding output: \")\n print(metric_hidden_t)\n print(\"---\")\n return outputs, recon_images, metric_hidden_r, metric_hidden_t\n\n else:\n # augs = None\n \n with tf.variable_scope('encoder'): # variable_scope name_scope\n features, block_activities = encoder(inputs, is_training)\n \n if data_format == 'channels_last':\n print(\"Features:\")\n print(features)\n outputs = tf.reduce_mean(features, [1, 2])\n else:\n outputs = tf.reduce_mean(features, [2, 3])\n outputs = tf.identity(outputs, 'final_avg_pool')\n \n # filter_trainable_variables(trainable_variables, after_block=5)\n # add_to_collection(trainable_variables, 'trainable_variables_inblock_')\n\n return outputs" ]
[ "0.6846357", "0.658142", "0.6509911", "0.64659", "0.6432261", "0.6414406", "0.6399443", "0.6369863", "0.63570625", "0.6340332", "0.6332008", "0.6302619", "0.6292333", "0.6278609", "0.62669504", "0.6261403", "0.62539554", "0.62080777", "0.6207685", "0.61810887", "0.6176677", "0.61729866", "0.6161067", "0.6159674", "0.6149175", "0.6148176", "0.61398274", "0.6115614", "0.6114504", "0.61054516", "0.61001825", "0.6097833", "0.60924023", "0.60799426", "0.6075056", "0.6048512", "0.6018776", "0.59945405", "0.5992207", "0.5975079", "0.5970402", "0.59700125", "0.5969467", "0.5962449", "0.595557", "0.5954989", "0.59306103", "0.592945", "0.59278154", "0.58935696", "0.5891517", "0.5890882", "0.58875835", "0.5884077", "0.58706355", "0.5864019", "0.5859154", "0.5852107", "0.5842771", "0.58255506", "0.57943", "0.57894236", "0.57579017", "0.5745641", "0.5744975", "0.57281077", "0.57217723", "0.5720264", "0.5715117", "0.56976026", "0.5692068", "0.56909657", "0.56899744", "0.5687266", "0.5685645", "0.5684918", "0.5680278", "0.5660567", "0.5660324", "0.5654335", "0.5653968", "0.56533873", "0.56509405", "0.5650431", "0.56455237", "0.56419736", "0.56411904", "0.56361234", "0.56337327", "0.5631395", "0.56281227", "0.5618933", "0.5612557", "0.5608707", "0.5606462", "0.5598009", "0.5596177", "0.55934644", "0.5576747", "0.557295", "0.55653864" ]
0.0
-1
Return the count of the highest order model used.
def order(self): return self.n
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_last_order_number_used():\n return Order.__last_order_number_used", "def _get_top_models_to_improve(self):\n self._validate_top_models_to_improve()\n if self.top_models_to_improve == \"auto\":\n if self._get_mode() == \"Explain\":\n return 0\n if self._get_mode() == \"Perform\":\n return 2\n if self._get_mode() == \"Compete\":\n return 3\n if self._get_mode() == \"Optuna\":\n return 0\n else:\n return deepcopy(self.top_models_to_improve)", "def get_number_of_models():\n return 8", "def get_model_count(self):\n return len(self._model_start_i)", "def get_model_count_of_largest_tenant(database_file_path):\n # remove the line below in case you have implemented the query.\n raise NotImplementedError\n\n query = \"\"\"\n \"\"\"\n\n return _fetch_result_from_database(query, database_file_path)", "def count_models(self):\n return len(self.model_list)", "def free_tier_model_count(self) -> int:\n if self._free_tier_model_count is None:\n free_tier_model_count_key = \"FREE_TIER_MODEL_COUNT\"\n free_tier_model_count_str = self._get_env(free_tier_model_count_key)\n try:\n self._free_tier_model_count = int(free_tier_model_count_str)\n except ValueError as exc:\n raise AssertionError(\n f\"the {free_tier_model_count_key} environment variable value must \"\n f\"be an integer, {free_tier_model_count_str=}\"\n ) from exc\n\n return self._free_tier_model_count", "def get_number_of_instances(model):\n if model is None:\n return 0\n else:\n return float(len(model[0].split('d'))-2)", "def get_max_product_order_amount(self):\n out = 0\n for p in self.products:\n if p.max_order_amount() > out:\n out = p.max_order_amount()\n return out", "def count(self):\n return len(self.order_lst)", "def get_most_used_os(records):\n systems = {}\n for r in records:\n systems[r.os] = systems.get(r.os, 0) + 1\n max_req = 0\n max_system = None\n for k, v in systems.items():\n if v > max_req:\n max_req, max_source = v, k\n return max_system", "def get_most_ordered_products(self):\n return self.products.groupby(\"products\").agg(\n {\"id\": lambda x: len(np.unique(x))}).reset_index().rename(\n columns={\"id\": \"order_count\"}).sort_values(by='order_count', ascending=False)", "def max(self) -> int:\n return self._status['party_size'][1]", "def count(self):\n return len(self.order_items)", "def best_coupling(self):\n\n return self.coupling().max()", "def get_best_known_model(self) -> Tuple[Optional[Path], int]:\n return self._get_first_model(sort='total_score', desc=False)", "def mode(lyst):\n # Create a set of one occurance of the numbers\n nums = set(lyst)\n return_value = 0\n top_count = 0\n \n # Iterate over nums and count the occurance of each number in lyst\n for num in nums:\n if lyst.count(num) > top_count:\n return_value = num \n \n return return_value", "def mode(self):\n mode = max(self.data, key=self.data.count)\n return mode", "def get_most_popular(series):\n most_popular = series.mode()[0]\n count = sum(series == most_popular)\n return most_popular, count", "def _get_max_group_index(self):\n cursor = self.mongo.db.userfield.find({}).sort('index', -1)\n model = []\n for group in cursor:\n model = group\n break\n if not model:\n return 0\n else:\n return model['index']", "def getMostUsedCount( self, limit ):\n cur = self.__conn.cursor()\n cur.execute( \"\"\"SELECT Data, COUNT(Data) AS UseCount\n FROM PrivilegeUse\n GROUP BY Data\n ORDER BY UseCount DESC\n LIMIT %d\"\"\", limit )\n class Use:\n def __init__( self, faq, count ):\n self.faq = faq\n self.count = count\n \n return [ Use(row[0], row[1]) for row in cur.fetchall() ]", "def get_latest_model():\n return get_models()[-1]", "def order(self):\n return len(self.coeff)-1", "def max_known_number(self):\n return len(self.number_list)-1", "def highest_rank(self):\n return max(self.cards).rank", "def maximum_item_count(self):\n return self._maximum_item_count", "def GetLastOrderQuantity(self):\r\n return self.lastOrderQuantity", "def max_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_count\")", "def max_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_count\")", "def max_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_count\")", "def max_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_count\")", "def max_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_count\")", "def norders(self):\n return 21", "def num_models():\n N = input(\"How many models would you like to test?\")\n N = int(N)\n return N", "def orders_count(self):\n return Order.objects.filter(email=self.email).count()", "def max_counts(self):\n\n return np.nanmax(self.pre_proc_data)", "def total_models_produced(self):\n return self._total_models_produced", "def get_most_popular_merchants(self):\n if self.model:\n return self.model.wv.index_to_key[: self.num_rec]\n else:\n print(\"train the model before performing this step\")\n return None", "def max(self):\n return max(self.committed_together, default=0)", "def top_dimensionality(self):\n return self._vocab_size", "def get_num_objects(cls):\n return cls.mum_objects", "def __len__(self):\n try:\n return len(self.default_model)\n except TypeError:\n return 0", "def __findBestLogProbability(self):\n best_model = None\n highest_log_probability = -sys.maxsize# (np.finfo(float).eps)\n\n # Find the highest model\n for item in self.data_array:\n if item[1] > highest_log_probability:\n best_model = item\n highest_log_probability = item[1]\n\n return best_model", "def count_items(self):\n count = 0\n for o in self.order_lst:\n count += o.count()\n \n return count", "def maximum_count(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"maximum_count\")", "def top_coded(self):\n qs = self.model_qs().exclude(**{self.foreign_key_id_field_name: None})\n\n fk_id = qs.values(self.foreign_key_id_field_name)\n fk_id = fk_id.annotate(\n counted_fk_field=Count(self.foreign_key_id_field_name)\n )\n fk_id = fk_id.order_by(\"-counted_fk_field\")[:self.TOP_AMOUNT]\n\n return fk_id.values_list(\n \"{}_fk__name\".format(self.field_name), \"counted_fk_field\"\n )", "def control_edge_count_max(self) -> int:\n return int(self.graph_tuple_stats.control_edge_count_max or 0)", "def get_number_of_relations(model):\n if model == None:\n return 0\n counter = 0\n for line in model:\n if line.find('f(2') >= 0:\n counter += 1\n return float(counter)\n #TODO when multiples of same relation, the result is still 1", "def get_mode(x):\n mode, count = Counter(x).most_common(1)[0]\n return mode", "def object_count(request, model):\n active_tool_session_id = request.session[\"active_tool_session_id\"]\n num_of_objects = model.objects.filter(\n tool_session_id=active_tool_session_id\n ).count()\n return num_of_objects", "def edge_count_max(self) -> int:\n return int(self.graph_tuple_stats.edge_count_max or 0)", "def max_key(self):\n return self._price_list[-1]", "def get_mostFrequent(self, n=5):\r\n pass", "def get_mostFrequent(self, n=5):\r\n pass", "def get_count(self):\r\n return self.count", "def get_all_orders_count(): \n data = order_obj.get_all_orders(\"1\")\n return data", "def get_most_ordered_categories(self):\n return self.products.groupby(\"category\").agg({\"id\": lambda x: len(np.unique(x))}).reset_index().rename(\n columns={\"id\": \"order_count\"}).sort_values(by='order_count', ascending=False)", "def num_actions(self) -> int:\n return self.max_order_quantity + 1", "def get_n_best(self):\n pass", "def get_most_valuable(self):\n return self.most_valuable", "def get_vehicle_count(self):\n return len(self.vehicles)", "def get_max_quantity(self):\n try:\n additional_json = self.get_additional_json()\n return additional_json['ActionPanel']['isModel']['remainQty']\n except Exception as error:\n return None", "def max_delivery_count(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"max_delivery_count\")", "def get_count(self):\n return self.count", "def get_count(self):\n return self.count", "def get_number_of_agents(model):\n\n n_agents = len(model.schedule.agents_by_type['Customer'])\n return n_agents", "def max_tokens_for_prompt(self, prompt: str) -> int:\n num_tokens = self.get_num_tokens(prompt)\n\n # get max context size for model by name\n max_size = self.modelname_to_contextsize(self.model_name)\n return max_size - num_tokens", "def best_coupling_frequency(self):\n\n idx_best = self.coupling().argmax()\n\n return self.freq.f[idx_best]", "def getlastserialnumber(self, partmodel, batchnumber, pos):\r\n if hasattr(self.session.db, 'testsaver'):\r\n return self.session.db.testsaver.getmaxpartsn(partmodel, batchnumber, pos)", "def __get_model_length(self):\n self.__lock_acquire()\n length = len(self.__view.model)\n self.__lock_release()\n return length", "def count(self) -> Optional[float]:\n return pulumi.get(self, \"count\")", "def count(self):\n return self.get_count()", "def max_findings(self) -> float:\n return pulumi.get(self, \"max_findings\")", "def num_tree(self):\n if self.handle is None:\n raise AttributeError('Model not loaded yet')\n out = ctypes.c_size_t()\n _check_call(_LIB.TreeliteQueryNumTree(self.handle, ctypes.byref(out)))\n return out.value", "def top(self) -> int:\n return self.q[0]", "def top(self) -> int:\n return self.q[0]", "def top(self) -> int:\n return self.q[0]", "def count_chains(self):\n if self.default_model:\n return self.default_model.count_chains()\n return 0", "def most_stable():\n \n \n \n \n return Z", "def get_max_product_range(self):\n out = 0\n for p in self.products:\n if p.get_max_order_range() > out:\n out = p.get_max_order_range()\n return out", "def get_order_count(self):\n resp = self.app.get('/orders')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n return len(data)", "def getNumSubmodels(self):\n return _libsbml.CompModelPlugin_getNumSubmodels(self)", "def get_entity_count(cls):\n return int(cls.db.get(\"entity_count\"))", "def best_promo(order: Order) -> Decimal:\n return max(promo(order) for promo in promos) # <3>", "def count(self) -> Optional[int]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[int]:\n return pulumi.get(self, \"count\")", "def get_greatest_stock_price():\n greatest_stock_price = 0\n // your code here", "def get_count(self):\n\n\t\treturn self.__count", "def last_count(self):\n return self.__last_count", "def getMaxKey(self):\n print self.freq\n if self.freq:\n max_freq = max(self.freq.keys())\n return list(self.freq[max_freq])[0]\n\n return ''", "def MostUsedBuses(self):\n busKM = lambda bus: bus.getTimesUsedRoute() * self.__routeRepo.getObj(bus.getRouteCode()).getLength()\n buses = self.__busRepo.getAll()\n sortedBuses = sorted(buses,key = busKM,reverse=True)\n return sortedBuses", "def mode(self):\r\n\t\t_set\t= set(self.sample)\r\n\t\t_list\t= [self.sample.count(i) for i in _set]\r\n\t\treturn list(_set)[_list.index(max(_list))]", "def calculate_greatest(self):\n greatest = 0\n for resourceList in self.loading.values():\n for time, use in resourceList:\n if use > greatest:\n greatest = use\n self.emit(\"greatest_calculated\",greatest)\n return greatest", "def max_count_rule(self) -> Optional[pulumi.Input['ApplicationMaxCountRuleArgs']]:\n return pulumi.get(self, \"max_count_rule\")", "def max_count(self):\n return self.config.get('max_count', 500)", "def maximum_elastic_worker_count(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"maximum_elastic_worker_count\")", "def most_read_book(self):\n reading_max = 0\n most_reads = \"\"\n for book in self.books.keys():\n rating = book.get_average_rating()\n if rating > reading_max:\n most_reads = book\n reading_max = rating\n else:\n continue\n return most_reads", "def node_count_max(self) -> int:\n return int(self.graph_tuple_stats.node_count_max or 0)", "def max(self):\n return self.get_first()", "def getMaxMancount(self):\n return self.__size * 20", "def get_top_tags_counter(tags):\n return collections.Counter(tags).most_common(TOP_NUMBER)" ]
[ "0.6919758", "0.64155287", "0.63922703", "0.6360874", "0.62786955", "0.622468", "0.61927", "0.6129268", "0.6083907", "0.60793793", "0.6077859", "0.5981426", "0.5938052", "0.57794553", "0.5749997", "0.57499874", "0.57459366", "0.573959", "0.5728474", "0.57043415", "0.57005787", "0.5676792", "0.56732434", "0.56629753", "0.56595445", "0.5650793", "0.5650013", "0.56492877", "0.56492877", "0.56492877", "0.56492877", "0.56492877", "0.5648872", "0.5644538", "0.56360775", "0.5622482", "0.55925006", "0.55905986", "0.55786496", "0.5578455", "0.55623496", "0.55531734", "0.5541601", "0.5536267", "0.55329686", "0.55225194", "0.5521855", "0.5510588", "0.54978937", "0.54882145", "0.5485819", "0.5477115", "0.5475217", "0.5475217", "0.5468719", "0.54660296", "0.5462575", "0.54564667", "0.545641", "0.54456836", "0.544461", "0.5438489", "0.5429035", "0.542756", "0.542756", "0.54046094", "0.5392376", "0.5390079", "0.53839636", "0.5371581", "0.53617555", "0.53523904", "0.5344807", "0.53433233", "0.5339301", "0.5339301", "0.5339301", "0.53390086", "0.53366315", "0.5331009", "0.5327273", "0.5322928", "0.5312648", "0.5305394", "0.5305018", "0.5305018", "0.52862334", "0.5285127", "0.5274165", "0.5273806", "0.5273658", "0.5271256", "0.5269317", "0.52600396", "0.52558076", "0.5255804", "0.525575", "0.5246688", "0.52411216", "0.52391243", "0.52281004" ]
0.0
-1
Get the probability of a word following a context. i.e. The conditional probability P(word|context)
def prob(self, word, context=None): if not context: context = () else: context = tuple(context) prob = 0 for i in range(len(context) + 1): prob += self.weights[i] * self.ngram_cpd[context[i:]][word] return prob
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prob(self, word, context):\n\n context = tuple(context)\n \n context_lenth = len(context) \n if context_lenth == 0:\n line = ''\n elif context_lenth == 1:\n line = context[0]\n elif context_lenth >= 2:\n line = context[0]\n for each_word in context[1:]:\n line = line + ' ' + each_word\n line = line + ' ' + word\n \n try:\n #print self.slct % (line)\n self.cursor.execute(self.slct % (line))\n data = self.cursor.fetchall()\n except Exception, e:\n print \"Error happened when access gramc DB: \", e\n return 1\n \n if len(data):\n cnt = data[0][0]\n #result = 0.0\n result = cnt / self.cnt_sum[context_lenth+1]\n #print result\n if result == 0:\n result = 1\n return result\n elif context_lenth == 0:\n return 1\n else:\n return self.prob(word, context[1:])", "def get_word_probability(self, label, term):\n\n if 'sod' in label:\n return self.cond_prob_sod[term]\n elif 'pop' in label:\n return self.cond_prob_pop[term]\n else:\n print(\"Just run the doctest Dev\")\n \n pass", "def word_probability(self, word, prev):\n bg = \"{} {}\".format(prev, word)\n p_c = self.model[word] if word in self.model else 1e-10 \n p_cw = self.bigrams[bg] if bg in self.bigrams else 1e-10 \n p = p_c * p_cw if prev else p_c\n return p", "def cond_prob(self, event, context):\n count = self.table[event, context] + self.prior\n norm = self.margin[context] + (self.prior * len(self.alphabet))\n return count / norm", "def probability(self, words):\n prob = 1\n for w in words: # Loop through words\n if w not in self.mdl.index: # Not in tokens\n return 0\n else: # Get prob\n prob *= self.mdl.loc[w] \n return prob", "def probability(self, words):\n prob = 1\n for w in words: # Loop through words\n if w not in self.mdl.index: # Not in tokens\n return 0\n else: # Get prob\n prob *= self.mdl.loc[w] \n return prob", "def word_probability(self, word: str) -> int:\n try:\n return self.fdist[word.lower()] / len(self.fdist.keys())\n except KeyError:\n return 0.0", "def logprob(self, word, context):\n\n return -log(self.prob(word, context), 2)", "def get_probability(some_dict, some_string):\n lowercase_review = some_string.lower()\n split_review = lowercase_review.split()\n product = 1 \n for word in split_review:\n if word not in some_dict:\n probability = 0.00009\n #assigning unknown words a probability very close to zero\n else: \n probability = some_dict[word]\n product *= probability\n return product", "def cond_prob(self, token, prev_tokens=None):\n\n if not prev_tokens:\n assert self.n == 1\n prev_tokens = tuple()\n # ngram condicional probs are based on relative counts\n hits = self.count((tuple(prev_tokens)+(token,)))\n sub_count = self.count(tuple(prev_tokens))\n\n return hits / float(sub_count)", "def get_probability(self, word: Word):\n if len(word) == 0:\n return 0.0\n\n _check_is_legal_word(word, self.alphabet_size)\n result = 1.0\n current_state = self.initial_state\n for character in word:\n if current_state is None:\n return 0.0\n\n next_state, probability = self.transition_dict.get(current_state, {}).get(\n character, (None, 0.0)\n )\n current_state = next_state\n result *= probability\n\n return 0.0 if current_state != self.final_state else result", "def context_probabilities(self, context):\n if context not in self._cache.keys():\n self._cache[context] = {\n word: self.score(word, context) for word in self.vocab.counts.keys()\n }\n return self._cache[context]", "def prob(self, sequence):\n prob = 1\n for event, context in self.extract_ngrams(sequence):\n prob *= self.cond_prob(event, context)\n return prob", "def compute_propability(word, label, dict):\n return dict[label][word] / sum(dict[label].values())", "def calculate_word_probabilities(word):\n\n\tprobabilities = {\"one\":0,\"two\":0,\"three\":0,\"four\":0,\"five\":0}\n\n\tfor star in range(1,6):\n\t\tconditional = float(word[number_to_text[star]])/statements_with_star[star]\n\t\tprobabilities[number_to_text[star]]=conditional*10\n\n\tdb.words.update({\"_id\":ObjectId(word[\"_id\"])},{\"$set\":{\"conditionals\":probabilities}})\n\n\treturn 1", "def probability(self, words):\n if len(words) == 0:\n return 0\n \n prob = 1\n model = self.mdl\n \n words_ngram = NGramLM(self.N, []).create_ngrams(words) # Create NGram model for words\n for ngram in words_ngram:\n # Never seen before ngram or n-1gram\n if (ngram not in list(model['ngram'])) or (ngram[:-1] not in list(model['n1gram'])):\n return 0\n if isinstance(self, NGramLM):\n prob *= model[model['ngram'] == ngram]['prob'].values[0]\n \n def recur_prob(model, w):\n prob = 1\n prev_mod = model.prev_mdl\n if isinstance(prev_mod, UnigramLM): # Unigram base case\n prob *= prev_mod.mdl[w[0]]\n else:\n words_n1gram = NGramLM(prev_mod.N, []).create_ngrams(w) # Create NGram model for words\n prob *= prev_mod.mdl[prev_mod.mdl['ngram'] == words_n1gram[0]]['prob'].values[0]\n prob *= recur_prob(prev_mod, words_n1gram[0]) # Recursive call\n return prob\n\n prob *= recur_prob(self, words_ngram[0])\n \n return prob", "def p_word_given_label(vocab, training_data, label):\n\n smooth = 1 # smoothing factor\n word_prob = {}\n # TODO: add your code here\n total_word = 0\n\n word_prob[None] = 0\n\n\n for dic in training_data:\n\n for index0, i0 in enumerate(dic['bow']):\n if (list(dic['bow'])[index0] in word_prob):\n continue;\n word_prob[list(dic['bow'])[index0]] = 0\n #word_prob[None] = 0\n if(dic[\"label\"] == label):\n for index, i in enumerate(dic[\"bow\"]):\n if(list(dic['bow'])[index] in vocab):\n if(list(dic['bow'])[index] in word_prob):\n\n word_prob[list(dic['bow'])[index]] += dic[\"bow\"][i]\n else:\n word_prob[list(dic['bow'])[index]] = dic[\"bow\"][i]\n else:\n if(None in word_prob):\n word_prob[None] += dic[\"bow\"][i]\n else:\n word_prob[None] = 0\n\n total_word += dic[\"bow\"][i]\n #word_prob [None] = 5\n\n for h in word_prob:\n word_prob[h] = math.log((word_prob[h] + smooth*1)) - math.log((total_word + smooth*(len(vocab) +1)))\n\n\n return word_prob", "def _computeCondProb(self, testData, classValue):\n classAttrObj = self._classAttrs[classValue]\n frequencyDict = classAttrObj.frequencyDict\n totalDocsInClass = classAttrObj.totalDocsInClass\n\n result = (totalDocsInClass/self._totalTrainDocs) # P(c)\n # Compute P(t|c) for each t in d\n for word in testData:\n result *= ((frequencyDict.get(word, 0) + 1) / (sum(frequencyDict.values()) + self._sizeOfVocabulary))\n return result", "def predictability(self):\n temp = self.probs\n for n in range(10):\n temp = temp.dot(temp)\n final = temp[0,:]\n #Let's assume that all words have unique initial letters\n probs = map(len, self.words)\n probs = array(probs)\n probs = (probs + self.probs.max(1)-1)/probs\n return sum(final*probs)", "def next_word_probability(self, observation, partial_out):\n if not hasattr(self, 'prev_enc'):\n self.prev_enc = None\n self.last_text = None\n if observation['text'] != self.last_text:\n self.prev_enc = None\n self.last_text = observation.get('text')\n self.observe(observation)\n\n obs = self.observation\n obs['eval_labels'] = [' '.join(partial_out)]\n batch = self.vectorize([obs])\n self.model.eval()\n self.model.longest_label = 1 # no need to predict farther ahead\n out = self.model(\n batch[0], # xs\n ys=(batch[1] if len(partial_out) > 0 else None),\n prev_enc=self.prev_enc)\n scores, self.prev_enc = out[1], out[3]\n # scores is bsz x seqlen x num_words, so select probs of current index\n assert len(partial_out) == scores.size(1) - 1\n probs = F.softmax(scores.select(1, len(partial_out)), dim=1).squeeze().cpu()\n dist = self.probs\n for i in range(len(probs)):\n try:\n val = probs[i].item()\n except AttributeError:\n val = probs[i][0]\n dist[self.dict[i]] = val\n self.batch = batch\n return dist", "def compute_probability_of_state(state):\n p = compute_log_probability_of_text(state[\"text\"], state[\"char_to_ix\"], \n state[\"frequency_statistics\"], state[\"transition_matrix\"])\n \n return p", "def cond_prob(self, token, prev_tokens=None):\n if not prev_tokens:\n assert self.n == 1\n prev_tokens = tuple()\n\n hits = self.count((tuple(prev_tokens)+(token,)))\n sub_count = self.count(tuple(prev_tokens))\n # heuristic\n return (hits+1) / (float(sub_count)+self.V())", "def calc_p(self, context, seq):\n num_zeros, num_ones = _count_followers(context, seq)\n if num_zeros == 0 and num_ones == 0:\n return 1.0\n\n p0context = self.calc_p(\"0\" + context, seq)\n p1context = self.calc_p(\"1\" + context, seq)\n p_uncovered = 1.0\n if seq.startswith(context):\n # A bit will be uncovered by the child models,\n # if the 0context and 1context don't fit before it in the sequence.\n # The \"Extending the Context-Tree Weighting Method\" paper names\n # the p_uncovered as P^{epsilon s}.\n assert self.estimator(1, 0) == self.estimator(0, 1)\n p_uncovered = 0.5\n\n # The CTW estimate is the average\n # of the this context model and the model of its children.\n # The recursive averaging prefers simpler models.\n result = 0.5 * (\n self.estimator(num_zeros, num_ones) +\n p0context * p1context * p_uncovered)\n return result", "def get_lexical_generation_prob(self, word, label):\n word = word.lower()\n numer = self.SMOOTHING_VALUE\n if word in self.words_labels_counts[label] and self.words_labels_counts[label][word] != 0:\n numer += self.words_labels_counts[label][word]\n elif word in self.words_labels_counts[label]:\n numer += self.words_labels_counts[label][self.UNKNOWN_TOKEN]\n denom = self.label_counts[label] + self.SMOOTHING_VALUE * self.all_grams.get_count()\n return float(numer) / denom", "def cond_prob(self, token, prev_tokens=()):\n assert len(prev_tokens) < self._n\n if self.count(prev_tokens) == 0:\n return 0.0\n return float(self.count(list(prev_tokens) + [token])) / float(self.count(prev_tokens))", "def prob(self, w):\n return self.counts[w] / self.total_count", "def cond_prob(self, token, prev_tokens=()):\n return float(self.count(list(prev_tokens) + [token]) + 1) / float(self.count(prev_tokens) + self._V)", "def next_word_proba(self, word, seq):\n context = tuple(seq[-2:]) # last two words\n return self.probas[context].get(word, 0.0)", "def estimate_prob(self, history, word):\n\t\t# YOUR CODE HERE\n\n\t\tif history == '':\n\t\t\t# unigram\n\t\t\tword_frequency = self.ngram_counts[tuple([word])]\n\t\t\treturn word_frequency/self.total_counts\n\n\t\telse:\n\t\t\t# bigram\n\t\t\tword_frequency = self.ngram_counts[tuple([history, word])]\n\t\t\t# history_count = sum([self.ngram_counts[key] for key in self.ngram_counts if key[0] == history])\n\t\t\t# history_count = self.history_count[history]\n\t\t\thistory_count = self.ngram_counts[tuple([history])]\n\t\t\t# print('his: {}',format(history))\n\t\t\t# print('his count {}'.format(history_count))\n\t\t\treturn word_frequency/history_count", "def eval_ppl(model, context, resp_gt, vocab):\n loss = 0\n num_tokens = 0\n num_unk = 0\n for i in range(len(resp_gt)):\n if resp_gt[i] in vocab:\n probs, eos_probs = model.next_word_probability(context, resp_gt[:i])\n prob_true = probs.get(resp_gt[i], 0)\n if prob_true > 0:\n prob_true /= (sum((probs.get(k, 0) for k in vocab)) + eos_probs)\n loss -= math.log(prob_true)\n else:\n loss = float('inf')\n num_tokens += 1\n else:\n num_unk += 1\n probs, eos_probs = model.next_word_probability(context, resp_gt)\n eos_probs /= (sum((probs.get(k, 0) for k in vocab)) + eos_probs)\n loss -= math.log(eos_probs)\n num_tokens += 1\n return loss / num_tokens, math.exp(loss / num_tokens)", "def perplexity(self, corpus):\n sum_pro = 0.0\n total_words = 0\n for sentence in corpus:\n sen_pro = self.sentence_logprob(sentence)\n sum_pro += sen_pro\n total_words += len(sentence)\n\n \n\n l = sum_pro/total_words\n w = 0.0\n w = 2**(-l)\n\n return w", "def get_probability(self, sentence):\n if len(sentence) == 1:\n return Decimal(10) ** self.get_unigram_log_prob(sentence)\n elif len(sentence) == 2:\n return Decimal(10) ** self.get_bigram_log_prob(sentence)\n else:\n log_prob = Decimal(0.0)\n for w1, w2, w3 in zip(sentence, sentence[1:], sentence[2:]):\n log_prob += self.get_trigram_log_prob((w1, w2, w3))\n log_prob = Decimal(log_prob)\n return Decimal(10) ** log_prob", "def perplexity(self, corpus):\n l = 0\n total_word_count = 0\n for sentence in corpus :\n l += self.sentence_logprob(sentence)\n # 2 extra START tokens and 1 extra STOP token\n total_word_count += len(sentence)\n l /= total_word_count\n return math.pow(2, -l)", "def get_prob(self, term):\n # first need to find a document that contains the term\n hits = self._es.search(index=INDEX_NAME, q=term, df=self._field, _source=False, size=1).get(\"hits\", {}).get(\n \"hits\", {})\n doc_id = hits[0][\"_id\"] if len(hits) > 0 else None\n if doc_id is not None:\n # ask for global term statistics when requesting the term vector of that doc\n tv = self._es.termvectors(index=INDEX_NAME, doc_type=DOC_TYPE, id=doc_id, fields=self._field,\n term_statistics=True)[\"term_vectors\"][self._field]\n ttf = tv[\"terms\"].get(term, {}).get(\"ttf\", 0) # total term count in the collection (in that field)\n sum_ttf = tv[\"field_statistics\"][\"sum_ttf\"]\n return ttf / sum_ttf\n\n return None", "def pred(self, w):\n pr = 0;\n res = ''\n for item in self.counts:\n if w in item[:-1] and self.prob(item) > pr:\n# print(\"HIT\")\n# print(item)\n i = item.index(w) + len(w)\n res = item[i]\n pr = self.prob(item)\n if res == '':\n res = '*'\n return res", "def posterior_first(self, word):\r\n prob = {}\r\n if word not in prob.keys():\r\n prob[word] = {\r\n pos: self.emission_probability[pos][word]\r\n * self.initial_probability[pos]\r\n if word in self.emission_probability[pos]\r\n else (1 / float(10 ** 10)) * self.initial_probability[pos]\r\n for pos in self.position_list\r\n }\r\n\r\n return prob[word]", "def probability(cpts, term, obs):\r\n \r\n \r\n # term is a list e.g., ['x_1', '0']\r\n # flip refers to the assignment either '0' false or '1' true\r\n flip = term[1]\r\n # the term itself\r\n term = term[0]\r\n # accumulator variable\r\n answer = 0\r\n # this loop locates where in the CPT we're looking\r\n for clause in range(len(cpts)):\r\n if cpts[clause][0] == term:\r\n index = clause\r\n # focus on our term\r\n cpt = cpts[index]\r\n # this loop checks if there are no preconditions\r\n # if not, then we immediately know the probability and can return\r\n for m in range(len(cpt[1])):\r\n if cpt[1][m][-2][1] == '1':\r\n if cpt[1][m][0] == [[]]:\r\n answer = cpt[1][m][-1]\r\n # list of the variables we have observed\r\n have = []\r\n if obs != []:\r\n for k in obs:\r\n have.append(k[0])\r\n # list of variables we need to know in order to calculate the probability\r\n needed = []\r\n for prob in range(len(cpt[1])):\r\n for j in cpt[1][prob][0]:\r\n if j != []:\r\n if j[0] not in needed:\r\n needed.append(j[0])\r\n # conditional logic based on the known variables\r\n for required in needed:\r\n if required not in have:\r\n # deep copy our observations list\r\n obs2 = []\r\n obs3 = []\r\n for observs in obs:\r\n obs2.append(observs)\r\n obs3.append(observs)\r\n # if we need to know a variable but don't have it\r\n # then we allow it to be either 0 or 1\r\n obs3.append([required,'1'])\r\n obs2.append([required,'0'])\r\n # computes probability if the unknown term is true, times \r\n # the probability that the unknown term is true, plus the\r\n # probability if the unknown term is false, times the \r\n # probability that the unknown term is false\r\n answer = (probability(cpts, [term,flip], obs3) * probability(cpts, [required,'1'], obs)) + (probability(cpts, [term,flip], obs2) * (probability(cpts, [required,'0'], obs)))\r\n # this loop looks complicated but all it's doing is finding the correct\r\n # line in the CPT\r\n if cpt[1][prob][-2][1] == '1':\r\n count = 1\r\n for i in range(len(cpt[1][prob][0])):\r\n if cpt[1][prob][0][i] in obs:\r\n count *= 1\r\n else:\r\n count = 0\r\n if count == 1:\r\n answer += cpt[1][prob][-1]\r\n\r\n\r\n # this computes the probability that the term is true, so if we asked \r\n # for the probability that it is false, just return 1 - answer\r\n if flip == '0':\r\n return 1 - answer\r\n return answer", "def probability(self, tokens):\n\n return 2 ** self.log_probability(tokens)", "def _term_probability(self, frequency, total_frequency, doc_length, total_doc_length):\n omega = self.alpha / (doc_length + self.alpha)\n if doc_length == 0:\n p1 = 0\n else:\n p1 = frequency / doc_length\n if total_doc_length == 0:\n p2 = 0\n else:\n p2 = total_frequency / total_doc_length\n return (1-omega) * p1 + omega * p2", "def ppmi_similarity(target_word, context_word, co_occurrences, word_id, word_frequency, count):\n target_id = word_id[target_word]\n context_id = word_id[context_word]\n target_occurrences = word_frequency[target_word]\n context_occurrences = word_frequency[context_word]\n if context_id not in co_occurrences[target_id]:\n return 0\n cooccurrence_prob = co_occurrences[target_id][context_id]/target_occurrences\n target_occurrence_prob = target_occurrences/count\n context_occurrence_prob = context_occurrences/count\n pmi = math.log2(cooccurrence_prob/(target_occurrence_prob*context_occurrence_prob))\n if pmi < 0:\n return 0\n else:\n return pmi", "def estimate_probability(word, previous_n_gram, \r\n n_gram_counts, n_plus1_gram_counts, vocabulary_size, k=1.0):\r\n # Note : 1 . Here we are actually not considering the end token or start token as a part of a vocabulary.\r\n # 2 . Although the literature says we need to prepend the n-1 SOS tokens but in reality we are prepending n SOS tokens\r\n \r\n # convert list to tuple to use it as a dictionary key\r\n previous_n_gram = tuple(previous_n_gram)\r\n\r\n previous_n_gram_count = n_gram_counts.get(previous_n_gram, 0)\r\n \r\n\r\n denominator = float(previous_n_gram_count + (k*vocabulary_size))\r\n\r\n n_plus1_gram = previous_n_gram + (word,)\r\n \r\n\r\n n_plus1_gram_count = n_plus1_gram_counts.get(n_plus1_gram, 0)\r\n \r\n\r\n numerator = float(n_plus1_gram_count + k)\r\n\r\n probability = float(numerator/denominator)\r\n \r\n \r\n return probability", "def p_word_given_topic(self, word, topic, beta=0.1):\n return ((self.topic_word_counts[topic][word] + beta) /\n (self.topic_counts[topic] + self.W * beta))", "def cond_prob(self, token, prev_tokens=None):\n\n addone = self.addone\n\n # unigram case\n if not prev_tokens:\n if addone:\n result = (self.count((token,))+1) / (self.V() + self.count(()))\n else:\n result = self.count((token,)) / self.count(())\n else:\n A_set = self.A(prev_tokens)\n # check if discounting can be applied\n if token in A_set:\n result = self.count_star(tuple(prev_tokens) + (token,)) /\\\n self.count(tuple(prev_tokens))\n else:\n # recursive call\n q_D = self.cond_prob(token, prev_tokens[1:])\n denom_factor = self.denom(prev_tokens)\n if denom_factor:\n alpha = self.alpha(prev_tokens)\n result = alpha * q_D / denom_factor\n else:\n result = 0\n return result", "def _predict_doc(self, x, flag):\n\n if flag == 1:\n denom = self.X.num_positive()\n else:\n denom = self.X.num_negative()\n denom += self.X.vocab_size()\n\n # multiply word probabilities for all words in x\n words = tokenize(x)\n # prob = 1.0\n # for word in words:\n # wi = self._doc_count_for_word(word, flag=flag)\n # # utilize the Laplace Smooth\n # prob *= ((float(wi)+1.0) / (float(denom)+2.0))\n\n prob = math.log(self.X.priors[str(flag)])\n for word in words:\n wi = self._doc_count_for_word(word, flag=flag)\n # utilize the Laplace Smooth\n prob += math.log((float(wi)+1.0) / (float(denom)+2.0))\n\n # prob *= math.log(self.X.priors[str(flag)])\n\n return prob", "def prob_calculate(self, tokens):\n\n prob = 0\n for x in range(0, len(tokens) - self.order - 1):\n prompt = tuple(tokens[x:x + self.order])\n if prompt in self.transitions:\n next_token = tokens[x + self.order]\n values = self.transitions[prompt]\n prob += (values.count(next_token))/len(values)\n\n return prob", "def doc_prob(self, doc, cat):\n features = self.get_features(doc) \n # Multiply the probabilities of all the features together\n p = Decimal(1)\n for f in features:\n p *= Decimal(str(self.weighted_prob(f, cat, self.feature_prob))) \n return p", "def sub_cond_prob(self, token, prev_tokens=()):\n assert len(prev_tokens) < self._n\n if (self.count(prev_tokens) == 0):\n return 0.0\n return float(self.count(list(prev_tokens) + [token])) / float(self.count(prev_tokens))", "def compute_probabilities(text, X=alph):\n\n # Convert to lowercase (just to be sure)\n text = text.lower()\n\n # Make empty dictionary with letters as keys\n counts = {k: 0 for k in X}\n\n # Keep track of total length of legitimate characters\n total = 0\n\n # Loop through text and update counts only for alphabet\n for c in text:\n if c in X:\n total += 1\n counts[c] += 1\n\n # Normalise the counts and return\n return {k: c / total for k, c in counts.items()}", "def _term_probability(self, frequency, total_frequency, doc_length, total_doc_length):\n if doc_length == 0:\n p1 = 0\n else:\n p1 = frequency / doc_length\n if total_doc_length == 0:\n p2 = 0\n else:\n p2 = total_frequency / total_doc_length\n return (1-self.omega) * p1 + self.omega * p2", "def prob(self, doc, cat):\n catprob = self.category_count(cat) / self.total_count() # Pr(Category)\n docprob = self.doc_prob(doc, cat) # Pr(Document | Category)\n return docprob*Decimal(str(catprob)) # Pr(Category | Document)", "def calculate_perplexity(sentence, n_gram_counts, n_plus1_gram_counts, vocabulary_size, k=1.0):\r\n # length of previous words\r\n n = len(list(n_gram_counts.keys())[0]) \r\n \r\n # prepend <s> and append <e>\r\n sentence = [\"<s>\"] * n + sentence + [\"<e>\"]\r\n \r\n # Cast the sentence from a list to a tuple\r\n sentence = tuple(sentence)\r\n \r\n # length of sentence (after adding <s> and <e> tokens)\r\n N = len(sentence)\r\n \r\n\r\n product_pi = 1.0\r\n \r\n \r\n # Index t ranges from n to N - 1, inclusive on both ends\r\n for t in range(n, N): \r\n\r\n # get the n-gram preceding the word at position t\r\n n_gram = sentence[t-n:t]\r\n \r\n # get the word at position t\r\n word = sentence[t]\r\n \r\n\r\n probability = estimate_probability(word, n_gram, n_gram_counts, n_plus1_gram_counts, vocabulary_size, k=1.0)\r\n \r\n\r\n product_pi *= 1/probability\r\n\r\n # Take the Nth root of the product\r\n perplexity = product_pi**(1/float(N))\r\n return perplexity", "def _prob_of_next_word(next_word, prev_word_array, review_batch):\n compare_phrase = np.append(prev_word_array, next_word)\n resized_batch = np.resize(review_batch, (len(compare_phrase)))\n count = 0\n\n for phrase in resized_batch:\n if np.array_equal(phrase, compare_phrase):\n count += 1\n\n return count / (resized_batch.shape[0] * resized_batch.shape[1])", "def word_selection(dictionary):\n total_sum = 0 # {'two': 4, 'red': 4} total_sum == 8\n cumulative_prob = 0.0\n\n for item in dictionary:\n total_sum += dictionary[item]\n\n random_num = random.uniform(0, 1)\n for value in dictionary:\n cumulative_prob += float(dictionary[value]) / float(total_sum)\n if cumulative_prob >= random_num:\n return value", "def get_emissions_probability(label_matches, given_tag, given_word, tag_counts):\r\n\tlookup_tuple = (given_word, given_tag)\r\n\tword_tag_frequency = label_matches.get(lookup_tuple, 0)\r\n\ttag_frequency = tag_counts[given_tag]\r\n\tif tag_frequency == 0:\r\n\t\temissions_probability = 0\r\n\telse:\r\n\t\temissions_probability = float(word_tag_frequency)/float(tag_frequency)\r\n\treturn emissions_probability", "def posterior_else(self, word, previous, previous_second):\r\n prob = {}\r\n if word not in prob.keys():\r\n\r\n prob[word] = {\r\n pos: self.emission_probability[pos][word]\r\n * (\r\n float(\r\n self.transition_probability[previous][pos]\r\n * self.posterior_probability[previous]\r\n )\r\n / self.posterior_probability[pos]\r\n )\r\n * (\r\n float(\r\n self.transition_probability[previous_second][pos]\r\n * self.posterior_probability[previous_second]\r\n )\r\n / self.posterior_probability[pos]\r\n )\r\n * self.posterior_probability[pos]\r\n if word in self.emission_probability[pos]\r\n else (1 / float(10 ** 10))\r\n * (\r\n float(\r\n self.transition_probability[previous][pos]\r\n * self.posterior_probability[previous]\r\n )\r\n / self.posterior_probability[pos]\r\n )\r\n * (\r\n float(\r\n self.transition_probability[previous_second][pos]\r\n * self.posterior_probability[previous_second]\r\n )\r\n / self.posterior_probability[pos]\r\n )\r\n * self.posterior_probability[pos]\r\n for pos in self.position_list\r\n }\r\n\r\n return prob[word]", "def perplexity(model, data):\n probs = [model.get_prob(word) for word in data] # get word's probability\n probs_log = [\n log2(word_prob) if word_prob > 0 else log2(float_info.epsilon)\n for word_prob in probs\n ] # log the probabilities. using epsilon when the probability is 0\n sum_probs = reduce(lambda a, b: a + b, probs_log) # sum all\n power_val = (-1 * sum_probs) / len(probs_log) # divide by n and neg all\n return 2 ** power_val", "def predict_next(self, seq):\n context = tuple(seq[-2:]) # last two words\n pc = self.probas[context] # conditional distribution\n words, probs = zip(*pc.items()) # convert to list\n return np.random.choice(words, p=probs)", "def post_second(self, word, previous):\r\n prob = {}\r\n if word not in prob.keys():\r\n\r\n prob[word] = {\r\n pos: self.emission_probability[pos][word]\r\n * (\r\n float(\r\n self.transition_probability[previous][pos]\r\n * self.posterior_probability[previous]\r\n )\r\n / self.posterior_probability[pos]\r\n )\r\n * self.posterior_probability[pos]\r\n if word in self.emission_probability[pos]\r\n else (1 / float(10 ** 10))\r\n * (\r\n float(\r\n self.transition_probability[previous][pos]\r\n * self.posterior_probability[previous]\r\n )\r\n / self.posterior_probability[pos]\r\n )\r\n * self.posterior_probability[pos]\r\n for pos in self.position_list\r\n }\r\n\r\n return prob[word]", "def score(self, word, context=None):\n return self.unmasked_score(\n self.vocab.lookup(word), self.vocab.lookup(context) if context else None\n )", "def perplexity(self, corpus):\n M = 0\n prob = 0\n\n for line in corpus:\n M += len(line)\n M += 1 # consider \"STOP\"\n prob += self.sentence_logprob(line)\n result = 2**(-(prob/M))\n\n return result", "def prob(self, x, y):\n p = self.tag_prob(y)\n for i in range(len(y)):\n p *= self.out_prob(x[i], y[i])\n\n return p", "def calc_prob(wds, dic, neg, pos):\n tot = neg + pos\n p_neg = float(neg) / tot\n p_pos = float(pos) / tot\n ct_neg = sum(dic[\"neg\"].values())\n ct_pos = sum(dic[\"pos\"].values())\n V_neg = len(dic[\"neg\"])\n V_pos = len(dic[\"pos\"])\n V = V_neg + V_pos\n cstar_neg = log(p_neg)\n cstar_pos = log(p_pos)\n\n for term in wds:\n\n # if word from test doc is in training doc dictionary\n # under class \"neg\" compute this smoothed probability\n if term in dic[\"neg\"]:\n\n p_wi_neg = float(dic[\"neg\"][term] + 1) / (ct_neg + V + 1)\n\n # otherwise compute this smoothed probability\n else:\n\n p_wi_neg = 1.0 / (ct_neg + V + 1)\n\n # add to the cstar_neg variable\n cstar_neg += (wds[term] * log(p_wi_neg))\n\n # if word from test doc is in training doc dictionary\n # under class \"pos\" compute this smoothed probability\n if term in dic[\"pos\"]:\n\n p_wi_pos = float(dic[\"pos\"][term] + 1) / (ct_pos + V + 1)\n\n # otherwise compute this smoothed probability\n else:\n\n p_wi_pos = 1.0 / (ct_pos + V + 1)\n\n # add to the cstat_pos variable\n cstar_pos += (wds[term] * log(p_wi_pos))\n\n # return a tuple of the two probabilities\n return cstar_neg, cstar_pos", "def tf(word, document):\n return freq(word,document) / wordCount(document)", "def probability(self, item):\n count = self.counter.get(item, 0)\n if self.smoothing_dict:\n smooth_count = self.smoothing_dict.get(count, count)\n assert smooth_count > 0\n return smooth_count / self.smooth_total\n else:\n return count / self.total", "def estimate_pxy(x,y,label,smoothing,vocab):\n log_probabilities = defaultdict(float)\n corpus_counts = get_corpus_counts(x, y, label)\n total = sum(corpus_counts.values())\n for word in vocab:\n log_probabilities[word] = np.log(((corpus_counts[word] if word in corpus_counts else 0) + smoothing) / (total + len(vocab) * smoothing))\n return log_probabilities", "def out_prob(self, word, tag):\n return self._out.get(tag, {}).get(word, 0)", "def token_percentage(word, text):\n word_count = text.count(word)\n text_len = len(text)\n return percentage(word_count, text_len)", "def tag_prob(self, y):\n p = 1\n n = self._n\n y = (START_TAG,) * (n - 1) + tuple(y) + (END_TAG,)\n for i in range(len(y) - self._n + 1):\n tag = y[i + n - 1]\n prev_tags = y[i:i + n - 1]\n p *= self.trans_prob(tag, prev_tags)\n\n return p", "def sentence_probability(self, sentence, ngram_type=1):\n sentence_word_list = sentence.lower().split()\n prob = 0 # sentence probability\n if ngram_type == 1:\n for i in range(len(sentence_word_list)):\n prob = prob + self.n_gram_MLE(sentence_word_list[i], gram_type=1)\n\n if ngram_type == 2:\n for i in range(len(sentence_word_list)):\n if i >= len(sentence_word_list) - 1:\n break\n prob = prob + self.n_gram_MLE(sentence_word_list[i],\n sentence_word_list[i] + ' ' + sentence_word_list[i + 1], gram_type=2)\n\n if ngram_type == 3:\n for i in range(len(sentence_word_list)):\n if i >= len(sentence_word_list) - 2:\n break\n prob = prob + self.n_gram_MLE(sentence_word_list[i] + ' ' + sentence_word_list[i + 1],\n sentence_word_list[i] + ' ' + sentence_word_list[i + 1] + ' ' +\n sentence_word_list[i + 2], gram_type=3)\n\n if ngram_type == 4:\n for i in range(len(sentence_word_list)):\n if i >= len(sentence_word_list) - 3:\n break\n prob = prob + self.n_gram_MLE(sentence_word_list[i] + ' ' + sentence_word_list[i + 1] + ' ' +\n sentence_word_list[i + 2],\n sentence_word_list[i] + ' ' + sentence_word_list[i + 1] + ' ' +\n sentence_word_list[i + 2] + sentence_word_list[i + 3], gram_type=4)\n\n if ngram_type == 5:\n for i in range(len(sentence_word_list)):\n if i >= len(sentence_word_list) - 4:\n break\n prob = prob + self.n_gram_MLE(sentence_word_list[i] + ' ' + sentence_word_list[i + 1] + ' ' +\n sentence_word_list[i + 2] + sentence_word_list[i + 3],\n sentence_word_list[i] + ' ' + sentence_word_list[i + 1] + ' ' +\n sentence_word_list[i + 2] + sentence_word_list[i + 3] +\n sentence_word_list[i + 4], gram_type=5)\n\n return prob", "def get_word_context(word):\r\n\tfor content, profile in word_context_profile:\r\n\t\tif word == content:\r\n\t\t\treturn profile \r\n\treturn 0", "def preprocessing():\n english_dictionary = nltk.corpus.brown.words()\n slang_vocab = pickle.load(open('vocab_pattern_match_with_freq.pkl', 'rb'))\n\n normalize_english_dict = len(english_dictionary)\n normalize_slang_vocab = 0\n for w, n in slang_vocab.items():\n normalize_slang_vocab += n\n\n words = {}\n for w, n in Counter(english_dictionary).items():\n words[w] = n/normalize_english_dict\n \n for w, n in slang_vocab.items():\n if w not in words:\n words[w] = 0.\n words[w] += n/normalize_slang_vocab\n\n words_by_freq = [w for w,_ in sorted(words.items(), key=lambda x: x[1], reverse=True)]\n\n # Build a cost dictionary, assuming Zipf's law and cost = -math.log(probability).\n #words = open(\"words_by_frequency.txt\").read().split()\n wordcost = dict((k, log((i+1)*log(len(words_by_freq)))) for i,k in enumerate(words_by_freq))\n maxword = max(len(x) for x in words_by_freq)\n return wordcost,maxword", "def cond_prob(self, token, prev_tokens=()):\n n = self._n\n lambdas = [0] * n\n res = 0\n for i in range(0, n - 1):\n lambdas[i] = (1 - sum(lambdas[:i])) * float(self.count(prev_tokens[i:])) / (\n float(self.count(prev_tokens[i:])) + self._gamma)\n res = res + lambdas[i] * self.sub_cond_prob(token, prev_tokens[i:])\n lambdas[n - 1] = (1 - sum(lambdas))\n assert (sum(lambdas) == 1.0)\n if (self._addone):\n return float(res + lambdas[n - 1] * self.sub_cond_prob_addone(token))\n else:\n return float(res + lambdas[n - 1] * self.sub_cond_prob(token))", "def cond_prob(self, event, context):\n if self.margin[context] > self.k:\n return super().cond_prob(event, context)\n else:\n return self.backoff.cond_prob(event, context[1:])", "def perplexity(self, text_ngrams):\n return pow(\n 2.0, self.entropy(progress(text_ngrams, desc=\"Calculating Perplexity\") if self.verbose else text_ngrams)\n )", "def predict(self, sentence, smoothing=None):\n words = sentence.split()\n words.append(\"STOP\")\n probability = 1.0\n\n words = [self.START_SYMBOL, self.START_SYMBOL] + words\n ###################\n # Compute the probability of a sentence under the trigram model\n # p(x1,..,xn)= \\prod {q(x_i| x_{i-2}x_{i-1}}\n for i in xrange(len(words)-2):\n probability *= self.trigram_prob(words[i], words[i+1], words[i+2])\n\n return probability", "def log_probability_of_sentence(self, tokens: Sequence[str]):\n if isinstance(tokens, str):\n raise ValueError(\"Input to log_probability_of_sentence is a sequence of token strings,\"\n \" not a single string\")\n # these don't matter when we are running the model in inference mode\n targets = np.zeros([_BATCH_SIZE, _NUM_TIMESTEPS], np.int32)\n weights = np.ones([_BATCH_SIZE, _NUM_TIMESTEPS], np.float32)\n\n # these contain information about the previous word\n # we initialize them with the beginning-of-sentence marker\n inputs = np.zeros([_BATCH_SIZE, _NUM_TIMESTEPS], np.int32)\n inputs[0, 0] = self._vocab.word_to_id(_START_SENTENCE_SYMBOL)\n\n char_ids_inputs = np.zeros(\n [_BATCH_SIZE, _NUM_TIMESTEPS, self._vocab.max_word_length], np.int32)\n char_ids_inputs[0, 0, :] = self._vocab.word_to_char_ids(_START_SENTENCE_SYMBOL)\n\n # we take the log probability of a token sequence to be the sum of the log-probs\n # of each of its tokens given the preceding context\n log_prob_sum = 0.0\n for token in tokens:\n with contexttimer.Timer() as token_timer:\n dist_over_next_words = self._session.run(\n self._name_to_node['softmax_out'],\n feed_dict={\n self._name_to_node['char_inputs_in']: char_ids_inputs,\n self._name_to_node['inputs_in']: inputs,\n self._name_to_node['targets_in']: targets,\n self._name_to_node['target_weights_in']: weights})\n token_idx = self._vocab.word_to_id(token)\n log_prob_sum += math.log(dist_over_next_words[0][token_idx])\n\n # prepare this word to be the context for the next word\n inputs[0, 0] = token_idx\n char_ids_inputs[0, 0, :] = self._vocab.word_to_char_ids(token)\n\n # restore original state so that future calls to log_probability_of_sentence\n # are not affected by past calls\n self._reset_state()\n\n return log_prob_sum", "def conditional_prob(self, label, datapoint):\r\n\r\n # REPLACE THE COMMAND BELOW WITH YOUR CODE\r\n feat_vec = self.x[datapoint]\r\n\r\n if label == 1:\r\n return self.conditional_prob_1(feat_vec)\r\n\r\n return 1 - self.conditional_prob_1(feat_vec)", "def percent_using_relevant_words_by_context_and_question(self):\n total_student_count = self.get_number_of_unique_students()\n\n question_context_count_list = self.students_using_relevant_words_by_context_and_question()\n\n question_context_percent_list = []\n for item in question_context_count_list:\n question_context_percent_list.append((item[0], item[1], item[2] / total_student_count))\n\n return question_context_percent_list", "def get_prob(cls, word, **given):\n fields = 'pos phr lmk rel deg'\n params = dict((f, None) for f in fields.split())\n params.update(given)\n return cls.query.filter_by(word=word, **params).one()", "def calc_trans_prob(corpus_context, bigram, word_boundaries, direction, mode = 'segMode', call_back = None):\n\n if call_back is not None:\n call_back(\"Generating probabilities...\")\n call_back(0,0)\n cur = 0\n\n if mode == 'segMode':\n is_in_corpus = corpus_context.get_frequency_base()\n try:\n is_in_corpus[bigram[0]]\n except KeyError:\n raise TPError('The segment %s was not found in the corpus.' % bigram[0])\n try:\n is_in_corpus[bigram[1]]\n except KeyError:\n raise(TPError('The segment %s was not found in the corpus.' % bigram[1]))\n\n if word_boundaries == 'Word-end only':\n bigram_dict = corpus_context.get_frequency_base(gramsize=2, halve_edges=True, probability=True, need_wb=True)\n elif word_boundaries == 'Both sides':\n bigram_dict = corpus_context.get_frequency_base(gramsize=2, halve_edges=False, probability=True, need_wb=True)\n else:\n bigram_dict = corpus_context.get_frequency_base(gramsize=2, halve_edges=False, probability=True, need_wb=False)\n\n\n try:\n prob_bg = bigram_dict[bigram]\n except KeyError:\n raise TPError('The string \\'%s\\' was not found in the corpus.' % ''.join(bigram))\n\n if prob_bg == 0.0:\n raise TPError('Transitional probability cannot be calculated for ' + ''.join(bigram))\n\n # Find the probability of a_ and _b bigrams for a bigram (a, b)\n in_context_prob = 0\n if direction == 'forward':\n in_context_prob = sum([bigram_dict[pair] if pair[0] == bigram[0] else 0 for pair in bigram_dict])\n elif direction == 'backward':\n in_context_prob = sum([bigram_dict[pair] if pair[1] == bigram[1] else 0 for pair in bigram_dict])\n\n return prob_bg / in_context_prob\n\n # Calculation using syllables. WIP.\n else:\n # create an unspecified syllable, find the number of all syllables in corpus\n bigram_filter = SyllableEnvironmentFilter(corpus_context.inventory, bigram[1].middle, lhs=bigram[0].middle)\n unspecified_dict = {'contents':[], 'search_type':'Minimally contains'}\n unspecified_syll = {'onset':unspecified_dict, 'nucleus':unspecified_dict, 'coda':unspecified_dict,\n 'stress':set(list(corpus_context.inventory.stress_types.keys()) + ['None']),\n 'tone':set(list(corpus_context.inventory.tone_types.keys()) + ['None']), 'nonsegs':set()}\n num_syllables = len(corpus_context.corpus.inventory.syllables)\n\n # All the words that contain the two syllables in sequence\n with_combined_context = search_for_syll_context(corpus_context.corpus, bigram_filter)\n with_combined_prob = len(with_combined_context) / num_syllables\n\n if with_combined_prob == 0.0:\n raise TPError('The sequence \\'%s\\' was not found in the corpus.' % str(bigram_filter))\n\n # all the words that contain the first syllable followed by any syllable\n first_context_filter = SyllableEnvironmentFilter(bigram_filter.inventory, bigram_filter.lhs, rhs=[unspecified_syll])\n with_first_context = search_for_syll_context(corpus_context.corpus, first_context_filter)\n with_first_prob = len(with_first_context) / num_syllables\n\n # the words that contain any syllable followed by the second syllable\n second_context_filter = SyllableEnvironmentFilter(bigram_filter.inventory, bigram_filter.middle, lhs=[unspecified_syll])\n with_second_context = search_for_syll_context(corpus_context.corpus, second_context_filter)\n with_second_prob = len(with_second_context) / num_syllables\n\n # find transitional probability\n if direction == 'forward':\n return with_combined_prob / with_first_prob\n elif direction == 'backward':\n return with_combined_prob / with_second_prob", "def prob_block(sentence, pos_parser=pos):\n\ttry:\n\t\tsentence = unicode(sentence)\n\t\tparsed_data = pos_parser(sentence)\n\t\tfor span in parsed_data.sents:\n\t\t\tsent = [parsed_data[i] for i in range(span.start, span.end)]\n\t\tnon_verbs = np.sum([token.pos_ != 'VERB' for token in sent])\n\t\ttotal = len(sent)\n\t\t#print str(sentence) +\"--\"+ str(float(non_verbs) / total)\n\t\treturn float(non_verbs) / total\n\texcept Exception,e:\n\t\treturn 0", "def lp(word, category, unique, k, name=\"category\"):\n\t\tp1 = category.count(word) + k\n\t\tp2 = len(category) + unique\n\t\tprint(word + \" in \"+name+\": \" + str((p1 * 1.0) / (p2 * 1.0)))\n\t\treturn (p1 * 1.0) / (p2 * 1.0)", "def cond_prob(self, token, prev_tokens=None):\n\n addone = self.addone\n n = self.n\n gamma = self.gamma\n\n if not prev_tokens:\n prev_tokens = []\n assert len(prev_tokens) == n - 1\n\n lambdas = []\n for i in range(0, n-1):\n # 1 - sum(previous lambdas)\n aux_lambda = 1 - sum(lambdas)\n # counts for numerator\n counts_top = self.count(tuple(prev_tokens[i:n-1]))\n # counts plus gamma (denominator)\n counts_w_gamma = self.count(tuple(prev_tokens[i:n-1])) + gamma\n # save the computed i-th lambda\n lambdas.append(aux_lambda * (counts_top / counts_w_gamma))\n # last lambda, by hand\n lambdas.append(1-sum(lambdas))\n\n # Maximum likelihood probs\n ML_probs = dict()\n for i in range(0, n):\n hits = self.count((tuple(prev_tokens[i:])+(token,)))\n sub_count = self.count(tuple(prev_tokens[i:]))\n result = 0\n if addone and not len(prev_tokens[i:]):\n result = (hits+1) / (float(sub_count) + len(self.voc))\n else:\n if sub_count:\n result = hits / float(sub_count)\n # the (i+1)-th element in ML_probs holds the q_ML value\n # for a (n-i)-gram\n ML_probs[i+1] = result\n\n prob = 0\n # ML_probs dict starts in 1\n for j in range(0, n):\n prob += ML_probs[j+1] * lambdas[j]\n return prob", "def sentiment_score(text, loaded_model = loaded_model, vectorizer = tokenizer):\n # tweet_tf_idf = vect_char.transform(text)\n tweet_token = tokenizer.texts_to_sequences(text)\n tweet_token = pad_sequences(tweet_token, maxlen = 40)\n sentiment = loaded_model.predict_proba(tweet_token)\n neg_prob = sentiment[0][0]\n pos_prob = sentiment[0][1]\n return neg_prob, pos_prob", "def eval_text(self, text):\n # Pre-process sentence given\n sents = text.split('\\n')\n words = []\n for sent in sents:\n words.extend(list(sent))\n\n for idx, word in enumerate(words):\n if (word, ) not in self.uni_dist:\n words[idx] = TOKENS[\"UNK\"]\n\n # Compute Log-Probablities\n log_prob = 0\n for ngram in nltk.ngrams(words, self.N):\n log_prob += self.eval_ngram(ngram)\n\n # Compute Perplexity\n num_words = len(words)\n perplexity = 2 ** ((-1 / num_words) * log_prob)\n\n return perplexity", "def sentence_logprob(self, sentence):\n grams = get_ngrams(sentence, 3)\n p = 1\n\n for gram in grams:\n p *= np.longfloat(self.smoothed_trigram_probability(gram))\n\n return np.log2(p)", "def profanityCheck(text):\n return predict_prob([text])[0]", "def word_to_perplexity(model, timeseries, indices, words, args):\n accum = 0\n words_len = len(words)-args.window_size\n batches = math.floor(words_len / args.batch_size)\n print(batches)\n for start in range(0, batches):\n idx = start*args.batch_size\n inp = np.array([timeseries[i:i+args.window_size] for i in range(idx, idx+args.batch_size)])\n label = np.asarray([indices[x] for x in words[idx+args.window_size:idx+args.window_size+args.batch_size]]) \n \n pred = model.predict(inp, batch_size=128)\n lp = np.log(pred)\n for i, ent in enumerate(lp):\n accum += ent[label[i]]\n if start % 5 == 0:\n print(\"{} / {}. Perplexity so far: {}\".format(start, batches, np.exp(-accum / (start*args.batch_size+1))))\n accum = -accum\n print(accum)\n avg = accum / words_len \n print(avg)\n perplex = np.power(avg, 2)\n print(perplex)", "def calc_prob(number_of_strings, GC_content, DNA):\r\n\r\n AT = 0\r\n GC = 0\r\n\r\n for nt in DNA:\r\n if nt == \"A\" or nt == \"T\":\r\n AT += 1\r\n elif nt == \"G\" or nt == \"C\":\r\n GC += 1\r\n\r\n #P(at least 1 match of s) = 1 − P(no matches out of N strings) = 1 − [1 - P_no_match]^N\r\n\r\n P_no_match = (((1 - GC_content)/2) **AT) * ((GC_content/2) **GC)\r\n prob = 1 - (1-P_no_match) **number_of_strings\r\n\r\n print(\"%0.3f\" %prob)", "def pred_prob(hp, ss, y):\n K = len(ss['counts'])\n N = sum(ss['counts'])\n assert y >= 0 and y <= K\n if y < K:\n return log((ss['counts'][y] - hp['d']) / (hp['alpha'] + N))\n elif y == K:\n return log((hp['alpha'] + hp['d'] * K) / (hp['alpha'] + N))", "def choose(raw_freq):\n\t\t# Build a map of accumulated frequencies to words\n\t\tacc = itertools.accumulate(raw_freq.values())\n\t\tlookup = jaraco.collections.RangeMap(zip(acc, raw_freq))\n\n\t\t# choose a random word proportional - to do that, pick a\n\t\t# random index from 1 to the total.\n\t\t_, total = lookup.bounds()\n\t\treturn lookup[random.randint(1, total)]", "def context_counts(self, context):\n return (\n self.counts[len(context) + 1][context] if context else self.counts.unigrams\n )", "def prob(seq, model):\n if seq in model:\n\n return (model[seq][0], len(seq))\n elif len(seq) == 1: #this is an OOV, it isn't in the model, and is one long\n return (model[(\"<unk>\",)][0],0) #return 0 for order if OOV\n elif seq[:len(seq)-1] in model:\n\n pr=prob(seq[1:], model)\n return (model[seq[:len(seq)-1]][1] + pr[0], pr[1])\n else:\n\n return prob(seq[1:], model)", "def Probability(rating1, rating2):\n return 1.0 * 1.0 / (1 + 1.0 * math.pow(10, 1.0 * (rating1 - rating2) / 400))", "def trigram_prob(self,u,v,w):\n ###################\n # Use the trigram_counts to get q(w|u,v)\n choices = self.trigram_counts[(u,v)]\n total = sum(choices.values())+sum(self.word_counts.values())\n\n trigram_probability = (self.trigram_counts[(u,v)][w]+1)/float(total)\n\n return trigram_probability", "def __dis_context__(self, context, word):\n senses = self.vs.get_senses(word, self.ignore_case)\n if self.verbose:\n print(\"Senses of a target word:\")\n print(senses)\n\n if len(senses) == 0: # means we don't know any sense for this word\n return None\n\n # collect context vectors\n vctx = [self.vc[c] for c in context]\n\n if len(vctx) == 0: # means we have no context\n return None\n # TODO: better return most frequent sense or make random choice\n\n # filter context vectors, if aplicable\n if self.filter_ctx >= 0:\n vctx = self.__filter__(vctx, senses, self.filter_ctx)\n\n if self.ctx_method == 'prob':\n avg_context = np.mean(vctx, axis=0)\n scores = [self.__logprob__(avg_context, self.vs[sense]) for sense, prob in senses]\n\n elif self.ctx_method == 'sim':\n avg_context = np.mean(vctx, axis=0)\n scores = [self.__cosine_sim__(avg_context, self.vs[sense]) for sense, prob in senses]\n if self.verbose:\n print(\"Sense probabilities:\")\n print(scores)\n\n else:\n raise ValueError(\"Unknown context handling method '%s'\" % self.ctx_method)\n\n # return sense (word#id), scores for senses\n return senses[np.argmax(scores)][0], scores", "def get_most_probable_sentence(\n self,\n suggestions: List[List[str]]\n ) -> str:\n sent_word_count = len(suggestions)\n suggestions = [[tok] for tok in ContextModel.START_TOKENS] + suggestions + \\\n [[tok] for tok in ContextModel.END_TOKENS]\n memory = [[MemoryItem(score=0.0, decoded=tuple())], [MemoryItem(score=0.0, decoded=tuple())]]\n for t in range(2, len(suggestions)):\n memory.append([])\n for i, word in enumerate(suggestions[t]):\n mx_score, pick_1, pick_2 = 0, 0, 0\n for j, suggestion_1 in enumerate(suggestions[t - 1]):\n for k, suggestion_2 in enumerate(suggestions[t - 2]):\n curr_score = memory[-3][k].score \\\n + self.model_dict.get((suggestion_2, suggestion_1), self.default_prob) \\\n + self.model_dict.get((suggestion_1, word), self.default_prob) \\\n + self.model_dict.get((suggestion_2, word), self.default_prob)\n if curr_score > mx_score:\n mx_score, pick_1, pick_2 = curr_score, j, k\n memory_item = MemoryItem(score=mx_score, decoded=memory[-3][pick_2].decoded + (pick_2, pick_1,))\n memory[-1].append(memory_item)\n memory = memory[1:]\n\n decoded = ' '.join([suggestions[t][i] for t, i in enumerate(memory[-1][0].decoded[-sent_word_count:],\n start=2)])\n # score = memory[-1][0].score\n return decoded", "def calculate_perplexity_new(self, sentence_probability, input_y, words_to_idx):\n \n batch_size = sentence_probability.shape[0]\n sentence_length = sentence_probability.shape[1]\n perplexity = np.zeros(batch_size)\n pad_index = words_to_idx['<pad>'] # index of pad\n \n for sentence_i in range(batch_size):\n word_index = 1 # start at 1 to discard bos\n log_sum = 0\n while ((word_index < sentence_length) and (input_y[sentence_i][word_index] != pad_index)): # stop when the first pad token is reached\n \n log_sum += np.log2(sentence_probability[sentence_i, word_index, \n input_y[sentence_i][word_index]])\n word_index += 1 \n word_index -= 1 # remove one count because of discarded bos\n try:\n # catch sentence with all pad's\n perplexity[sentence_i] = np.power(2, -(log_sum/word_index))\n except:\n print(input_y[sentence_i])\n print('sencentece with just pads')\n perplexity[sentence_i] = np.nan \n \n return perplexity", "def get_probs(self, *vars):\n freqs = self.freq_counts([self.data.get_column_view(v)[0] for v in vars], [len(v.values) for v in vars])\n k = np.prod([len(v.values) for v in vars])\n return (freqs + self.alpha) / (np.sum(freqs) + self.alpha*k)", "def extract_probs(label, x):\n\tb = np.ascontiguousarray(x).view(np.dtype((np.void, x.dtype.itemsize * x.shape[1])))\n\tunique_array, unique_indices, unique_inverse_x, unique_counts = \\\n\t\tnp.unique(b, return_index=True, return_inverse=True, return_counts=True)\n\tunique_a = x[unique_indices]\n\tb1 = np.ascontiguousarray(unique_a).view(np.dtype((np.void, unique_a.dtype.itemsize * unique_a.shape[1])))\n\tpxs = unique_counts / float(np.sum(unique_counts))\n\tp_y_given_x = []\n\tfor i in range(0, len(unique_array)):\n\t\tindexs = unique_inverse_x == i\n\t\tpy_x_current = np.mean(label[indexs, :], axis=0)\n\t\tp_y_given_x.append(py_x_current)\n\tp_y_given_x = np.array(p_y_given_x).T\n\treturn p_y_given_x, b1, b, unique_a, unique_inverse_x, pxs" ]
[ "0.8106743", "0.75592023", "0.74971807", "0.7463757", "0.7168955", "0.7168955", "0.7081482", "0.707541", "0.69993883", "0.69632804", "0.6952478", "0.6929444", "0.6909919", "0.67921746", "0.6687317", "0.6680557", "0.66741383", "0.66102266", "0.6583568", "0.6526557", "0.64608645", "0.64588284", "0.6452997", "0.64493877", "0.64414537", "0.6421439", "0.64033943", "0.6386405", "0.6361484", "0.6350575", "0.6321615", "0.63114685", "0.63092643", "0.62527037", "0.6248438", "0.62471145", "0.62331665", "0.62036926", "0.61936635", "0.6191625", "0.6182498", "0.61803997", "0.6174826", "0.61263406", "0.61173487", "0.6112766", "0.610353", "0.6103021", "0.60775447", "0.6052952", "0.60481143", "0.6042462", "0.60326546", "0.6006503", "0.5993707", "0.59710413", "0.5969677", "0.5959055", "0.59539825", "0.59509957", "0.59494907", "0.59268916", "0.5918864", "0.5897941", "0.5884773", "0.58776283", "0.58682394", "0.583098", "0.5829735", "0.58233434", "0.5817829", "0.5806836", "0.5801246", "0.57872844", "0.5787204", "0.57859", "0.577263", "0.57650447", "0.5764925", "0.57497984", "0.5734511", "0.57295483", "0.5729538", "0.5726139", "0.5709285", "0.5696799", "0.56888497", "0.5682635", "0.56748945", "0.5660781", "0.56578493", "0.56577265", "0.5655033", "0.5650508", "0.5641262", "0.56408215", "0.56245244", "0.56237537", "0.5623411", "0.5622184" ]
0.8385041
0
Number of (nonbackground) categories. Returns int Number of (nonbackground) categories.
def num_class(self): return self._num_class
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_number_of_categories() -> List[int]:\n\n return [len(cats) for cats in wiki_data[\"categories\"]]", "def category_count(self, cat):\n res = self.con.execute('select count from cc where category=\"%s\"'\n %(cat)).fetchone()\n if res == None:\n return 0\n else:\n return float(res[0])", "def get_category_count(self, category):\r\n if category in self.category_count:\r\n return float(self.category_count[category])\r\n else:\r\n return 0.0", "def nb_leafy_rameau_cat(x, cat):\r\n nb = 0\r\n for y in Sons(x):\r\n if Class(y) == 'I':\r\n if cat == 'small':\r\n nb +=1\r\n try:\r\n if type_uc(Sons(y)[0]) == cat:\r\n nb +=1\r\n except IndexError:\r\n continue\r\n elif type_uc(y) == cat:\r\n nb += 1\r\n return nb", "def get_categories_group_count():\n categoriesByArticle = RedisHelper.get_cache(KEY_CATEGORIES_COUNT_BY_ARTICLE)\n if RedisHelper.is_cache_exist(KEY_CATEGORIES_COUNT_BY_ARTICLE) is False:\n categoriesByArticle = list(Comment.objects.raw(SQL_GET_CATEGORY_COUNTS_BY_BLOG))\n RedisHelper.create_cache(KEY_CATEGORIES_COUNT_BY_ARTICLE, categoriesByArticle, RedisTimeOut.REDIS_TIMEOUT_1_DAYS)\n return categoriesByArticle", "def num_of_channels(self) -> int:\n return len(self.non_zero_channels())", "def retrieve_cat_pages_nb(self, json_data):\n return round(int(json_data[\"count\"]) / json_data[\"page_size\"]) + 1", "def n_count(category):\r\n sql = text('''\r\n WITH uniq AS (\r\n SELECT COUNT(app.id) FROM task, app\r\n LEFT OUTER JOIN category ON app.category_id=category.id\r\n WHERE\r\n category.short_name=:category\r\n AND app.hidden=0\r\n AND app.info LIKE('%task_presenter%')\r\n AND task.app_id=app.id\r\n GROUP BY app.id)\r\n SELECT COUNT(*) FROM uniq\r\n ''')\r\n\r\n results = db.engine.execute(sql, category=category)\r\n count = 0\r\n for row in results:\r\n count = row[0]\r\n return count", "def numberOfClasses(self):\n classes = self.classesAndFrames()\n return len(classes.keys())", "def n_channels(self):\n return len(self.channels)", "def get_class_count(Y_category):\n # Assertions\n assert isinstance(Y_category, np.ndarray), \\\n 'Input must be a numpy ndarray.'\n cls, counts = np.unique(Y_category, return_counts = True)\n cls_counts = dict(zip(cls, counts))\n\n return cls_counts", "def num_of_classes(self):\n return len(self.classes_())", "def num_of_classes(self):\n return len(self.classes_())", "def nlevels(self) -> int:\n return len(self._levels)", "def cluster_count(self) -> int:\n cluster_count = max(1, round(16**3 * (self.vein.purity / 100.0) / self.cluster_size))\n return self.distribution.scale_cluster_count(cluster_count)", "def get_num_cat(sample_by_cat, samples_in_otus):\r\n num_cat = defaultdict(int)\r\n for cat, samples in sample_by_cat.items():\r\n num_samples = len(set(samples_in_otus) & set(samples))\r\n num_cat[cat[0]] += (num_samples * (num_samples - 1)) / 2\r\n return num_cat", "def n_channels(self):\n return self._n_channels", "def Nlevels(self):\n return self._nlevels", "def n_levels(self):\n return self.primary_header['Number of levels']", "def n_levels(self):\n return len(self.scales)", "def constituent_count(self):\n return self._constituent_count", "def num_classes(self):\n return len(self.classes)", "def get_number_of_classes(self):\n return len(self.class_dict.keys())", "def count(self):\n # TODO not implemented yet\n return 0", "def num_animals(self):\n return self._num_herbs + self._num_carns", "def get_num_instances(im, non_building_labels):\n return np.setdiff1d(im, non_building_labels)", "def dimension_count(self):\n return self._dimensionCount", "def get_non_archived_traits_tagged_count(self):\n return apps.get_model('tags', 'TaggedTrait').objects.current().non_archived().filter(\n trait__source_dataset__source_study_version__study=self).aggregate(\n models.Count('trait', distinct=True))['trait__count']", "def n_cs(self):\n return np.size(self._cs, 0)", "def get_num_labels(self):\n return self.num_labels", "def num_labels(self):\n return len(self.get_labels())", "def num_labels(self):\n return len(self.get_labels())", "def get_num_channels():\r\n check_mixer()\r\n return sdl.Mix_GroupCount(-1)", "def getNbClusters( model):\r\n\r\n\tlabels = model.labels_\r\n\tlabelValues = []\r\n\tfor label in labels:\r\n\t\tif label not in labelValues and label != -1: labelValues.append(label)\r\n\tnbClusters = len( labelValues)\r\n\treturn nbClusters", "def num_classes(self) -> int:\n y = self.data.y\n if y is None:\n return 0\n elif y.numel() == y.size(0) and not torch.is_floating_point(y):\n return int(self.data.y.max()) + 1\n elif y.numel() == y.size(0) and torch.is_floating_point(y):\n return torch.unique(y).numel()\n else:\n return self.data.y.size(-1)", "def get_count(self):\n return len(self._tags)", "def count(self):\n return len(self.wallpapers)", "def count_containers(lines: Lines) -> int:\n rules = parse_rules(lines)\n allowed_containers = containers(\"shiny gold\", rules)\n assert allowed_containers is not None\n return len(allowed_containers) - 1", "def get_cluster_count(self) -> int:\n return len(self.get_all_cluster_ids())", "def num_classes(self):\n\t\treturn len(self.classes)", "def carn_count(self):\n return len(self.carnivores)", "def num_classes(self):\n return self._num_classes", "def num_classes(self):\n\t\t\treturn len(self.classes)", "def count_categories_for_party(party_id: PartyID) -> int:\n return DbCategory.query \\\n .for_party(party_id) \\\n .count()", "def count(self):\n\t\treturn len(list(self.nodes))", "def getNumCleanedTiles(self):\n counter = 0\n for tile in self.tiles:\n if self.tiles[tile] == 'clean':\n counter += 1\n return counter", "def get_num_of_containers(self):\n Container.num_of_cntnrs = len(Container.containers)\n return self.num_of_cntnrs", "def get_num_channels(self):\r\n check_mixer()\r\n return sdl.Mix_GroupCount(self._chunk_tag)", "def n_clusters(self):\n return len(self.clusters)", "def N(self):\n return len(self.cavity_grid.cavities) + 1", "def num_classes_a(self):\r\n return self._num_classes_a", "def get_feature_count(self, feature, category):\r\n if feature in self.feature_count and category in self.feature_count[feature]:\r\n return float(self.feature_count[feature][category])\r\n else:\r\n return 0.0", "def count(self):\n \n return len(self.img_lst)", "def num_complementary_regions(self):\n g = self._get_puncturefinder_graph()\n # return g.connected_components_number()\n return nx.number_connected_components(g)", "def get_num_countries(self):\n return len(self.countries)", "def is_category(series):\n # TODO: Codelists could have 50+ codes\n # TODO: In the absence of feather dtypes, how to differentiate cat/cont?\n # TODO: Or just group and report the top n?\n return len(series.unique()) <= 10", "def Categories():\n cat = {\n \t \"Featured\": 0,\n \t \"All\": 1,\n \t \"Collectibles\": 2,\n \t \"Clothing\": 3,\n \t \"BodyParts\": 4,\n \t \"Gear\": 5,\n \t \"Models\": 6,\n \t \"Plugins\": 7,\n\t \"Decals\": 8,\n \t \"Audio\": 9,\n \t \"Meshes\": 10,\n\t \"Accessories\": 11,\n\t \"AvatarAnimations\": 12,\n\t \"CommunityCreations\": 13,\n\t \"Video\": 14,\n\t \"Recommended\": 15\n }\n return cat", "def countSites(self):\n self.ni = len(self.sites)\n return self.ni", "def count(self):\n\n if self.cluster:\n return self.cluster.count()\n\n return super().count()", "def count(self):\n return self.__tree.node_count", "def axis(self):\n return len(self._colors)", "def numnems(self):\n count = 0\n for o in self._objs.values():\n count += len(o.netifs())\n return count", "def count_compounds(self) -> int:\n return self._count_model(Compound)", "def num_channels(self):\n with audioread.audio_open(self.path) as f:\n return f.channels", "def count_labels(self, add_no_ne_label=False):\n return sum([count[1] for count in self.get_label_counts(add_no_ne_label=add_no_ne_label)])", "def count(self) -> int:\n return self.__count", "def count(self):\n return int()", "def count(self):\n return int()", "def count_nodes(self):\n\t\treturn self.__count_nodes(self)", "def eventcount(self):\n return self.serviceinstance_set.aggregate(Count('service__category', distinct=True))['service__category__count']", "def number_of_constituents(bc_class):\n num_trn = 0\n cn = bc_class.constituent_properties\n if cn.salinity:\n num_trn += 1\n if cn.temperature:\n num_trn += 1\n if cn.vorticity:\n num_trn += 1\n if not cn.general_constituents.empty:\n num_trn += len(cn.general_constituents.index)\n if not cn.sand.empty:\n num_trn += len(cn.sand.index)\n if not cn.clay.empty:\n num_trn += len(cn.clay.index)\n return num_trn", "def hives_count(self) -> int:\n return self.hives.count()", "def count(self) -> int:\n return self._count", "def count(self) -> int:\n return self._count", "def count(self) -> int:\n return self._count", "def get_word_count_category(self):\n word_count_category_dict = dict()\n from capstoneproject.models.models.category import Category\n for cat in Category.categories.all():\n word_count_category_dict[cat.name] = dict()\n\n word_count_dict = self._create_word_count_dict()\n for word, count in word_count_dict.items():\n from capstoneproject.models.models.word import Word\n word_model = Word.words.get_word(word=word)\n for word_cat in word_model.get_categories():\n word_count_category_dict[word_cat][word] = count\n\n return word_count_category_dict", "def n_children(self):\n ch = self.children\n return 0 if not ch else len(ch) + sum([c.n_children for c in ch])", "def get_num_classes(self):", "def count(self):\n return self._reduce_for_stat_function(F.count, only_numeric=False)", "def n_cf(self):\n return np.size(self._ref_ii, 0)", "def num_carns(self):\n return self._num_carns", "def num_cuts(self) -> Optional[int]:\n raise NotImplementedError(\n 'Sub-classes of CutSampler have to implement self.num_cuts'\n )", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def get_num_carn_landscape(self):\n return len(self.carn_pop)", "def n_cf(self):\n return self._configurations[0].n_cf", "def varCount(self, aKind):\n return self.counts[aKind]", "def uncategorized(df):\n\n counter = 0\n for movie in df.index:\n if len(df.loc[movie, 'imdbGenres']) == 1 and\\\n df.loc[movie, 'Political'] == 0:\n counter += 1\n\n return counter", "def category_number(self):\r\n return conf.lib.clang_getDiagnosticCategory(self)", "def num_classes():\n return NUM_CLASSES", "def count(self):\n return self.get_count()", "def count(self):\n return self.size()", "def nr_labels(self):\n return self.W.shape[1]", "def dim(self):\n if self._classifier is None:\n with self:\n return self._classifier.features_dim\n\n return self._classifier.features_dim", "def num_conll(self):\n pass", "def channel_size(self):\n if self.channels is None:\n return 0\n return self.channels.size", "def get_rand_cat(self):\n return randint(0,GAConfig[\"num_categories\"]-1)", "def count(self):\r\n return self.count_helper(self.top_node)", "def get_num_channels(x):\n return x.get_shape().as_list()[-1]" ]
[ "0.7708821", "0.7072953", "0.6978198", "0.64453894", "0.64185804", "0.6267535", "0.62063515", "0.61772865", "0.6093896", "0.60898095", "0.6053971", "0.6049354", "0.6049354", "0.60457516", "0.6007069", "0.59920245", "0.59676576", "0.5959858", "0.58949566", "0.58928066", "0.5887045", "0.5883174", "0.5866593", "0.5864284", "0.5862431", "0.5856065", "0.5855587", "0.58479726", "0.5843039", "0.58383334", "0.5834697", "0.5834697", "0.5832649", "0.58296996", "0.5826653", "0.5816816", "0.5811656", "0.5802909", "0.58026636", "0.5797458", "0.5789675", "0.57693267", "0.57688004", "0.5765663", "0.5764611", "0.5745968", "0.57170033", "0.5716951", "0.5715019", "0.56972194", "0.5693869", "0.5692093", "0.5679232", "0.5676655", "0.56657326", "0.5660884", "0.564429", "0.5628759", "0.56253064", "0.56220645", "0.5616655", "0.5616618", "0.56056625", "0.5603469", "0.5602465", "0.55948704", "0.55909103", "0.55909103", "0.5589035", "0.5588484", "0.5584381", "0.55841756", "0.5578994", "0.5578994", "0.5578994", "0.55783814", "0.5577777", "0.55759895", "0.557265", "0.55716926", "0.5567615", "0.55612737", "0.5560938", "0.5560938", "0.5560938", "0.5560938", "0.555926", "0.5558303", "0.55537546", "0.5553696", "0.55513036", "0.55474", "0.55468553", "0.5542182", "0.5539082", "0.5536549", "0.5534789", "0.5534218", "0.5533937", "0.55328417", "0.5525973" ]
0.0
-1
Return names of (nonbackground) categories. Returns iterable of str Names of (nonbackground) categories.
def classes(self): return self._classes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def category_names(self):\n return list(self.categories.keys())", "def category_names(self):\n return self._category_names", "def CategoryNames(self):\r\n return sorted(self.getSampleMetadata(self.SampleIds[0]).keys()) \\\r\n if len(self.SampleIds) > 0 else []", "def categories(self):\n cur = self.con.execute('select category from cc');\n return [d[0] for d in cur]", "def categories(self) -> List[str]:\n return self._categories", "def categories(self):\n categories = self.client.classify_text(self.document).categories\n\n result = []\n for category in categories:\n result.append(category.name)\n\n return result", "def categories(self):\n game_categories = self.game_categories.all()\n return [ gc.category for gc in game_categories ]", "def get_category_titles(self):\n\n return self.catbrowser.get_category_titles()", "def cat_labels(self):\n try:\n return list(self.cats.columns)\n except AttributeError:\n return []", "def categories(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"categories\")", "def getCategories(self):\n return self.categories.keys()", "def categories(self):\n\t\treturn self._categories", "def categories(self):\n return self.__categories", "def get_labels(self):\n return set(category.label for category in\n self.get_categories(LABELS_SCHEME))", "def categories(self):\n return self.env.categories", "def categories(self):\n return self._categories", "def categories(self):\n pass", "def list_categories(self) -> List[Tuple[str, str]]:\n category_list = [(name, path) for name, path in self.category_map.items()]\n # Fix the order of category list.\n category_list.sort(key=lambda category: category[0])\n return category_list", "def get_category_classes(self):\n\n return self.catbrowser.get_category_classes()", "def list_categories(self):\n raise NotImplementedError()", "def categories(self):\n return self._data[\"categories\"]", "def categories(self):\r\n return self.q(css='span.rubric-category').text", "def Categories(self):\r\n return self._categories", "def Categories():\n cat = {\n \t \"Featured\": 0,\n \t \"All\": 1,\n \t \"Collectibles\": 2,\n \t \"Clothing\": 3,\n \t \"BodyParts\": 4,\n \t \"Gear\": 5,\n \t \"Models\": 6,\n \t \"Plugins\": 7,\n\t \"Decals\": 8,\n \t \"Audio\": 9,\n \t \"Meshes\": 10,\n\t \"Accessories\": 11,\n\t \"AvatarAnimations\": 12,\n\t \"CommunityCreations\": 13,\n\t \"Video\": 14,\n\t \"Recommended\": 15\n }\n return cat", "def get_list(self):\n categories = []\n for attribut in self.attributes:\n attr = getattr(self, attribut, False)\n if attr is True:\n categories.append(attribut)\n if getattr(self, 'education') is True:\n categories.append(_(u'education'))\n if getattr(self, 'training') is True:\n categories.append(_(u'training'))\n if getattr(self, 'tutoring') is True:\n categories.append(_(u'tutoring'))\n\n return categories", "def getAllCategories(self):\n return self.categories", "def getCategories(self):\r\n return self.categories", "def show_categories():\n for category in NEWS_CATEGORIES:\n print(category)", "def getCategories(self):\n logger.debug(\"Func: getCategories\")\n\n return self._categories", "def get_names(cat):\n res = []\n while cat:\n res.append(cat.name)\n cat = cat.parent_id\n return res", "def categories(self):\n\t\treturn (sorted(self.dictData.keys()))", "def strip_categories(categories):\n\t\tcategorieslist = []\n\t\tfor category in categories:\n\t\t\t# turns unicode into strings\n\t\t\tcats = (''.join(''.join([cat.encode('UTF8') for cat in category]))).split(', ')\n\t\t\tcategorieslist.append(catswords)\n\t\treturn categorieslist[1]", "def get_categories(self) -> tuple:\n return self.categories", "def strip_categories(categories):\n\tcategorieslist = []\n\tfor category in categories:\n\t\t# turns unicode into strings\n\t\tcats = (''.join(''.join([cat.encode('UTF8') for cat in category]))).split(', ')\n\t\tcategorieslist.append(catswords)\n\treturn categorieslist[1]", "def get_categories(self):\n cats = []\n for post in self:\n cats.extend(post.Categories)\n return list(sorted(set(cats)))", "def categories(self) -> List[Category]:\n return list(set(self.mapping.values()))", "def categories(self):\n return self.r.smembers(self._categories_key)", "def search_categories(self):\n with Transaction().start(DBNAME, 1):\n categorieslist = self.Category.search(['parent', '=', 'Ingredients'])\n return tuple(i.name for i in categorieslist)", "def categories_str(self):\n slist = \"\"\n for i, category in enumerate(self.categories):\n if i == 0:\n slist += \"{}\".format(category)\n else:\n slist += \",{}\".format(category)\n return slist", "def category_name(self):\n return self.category.name", "def list_unique_categories(self):\n self._load_all_events()\n categories = set()\n for events in self._events.values():\n if not events:\n continue\n categories.add(events[0].category)\n\n categories = list(categories)\n categories.sort()\n return categories", "def available_categories(self):\n return list(self.landmarks.keys())", "def show_event_categories(self):\n print(\"Catgories: {}\".format(\" \".join(self.list_unique_categories())))", "def categories(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"categories\")", "def categories(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"categories\")", "def categories(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"categories\")", "def _get_ifunction_categories_list(self):\n category_list = [\"FitFunctions\"]\n func_cats = self.create_mantid_ifunction(self.algorithm_name()).categories()\n for cat in func_cats:\n # double up the category separators so they are not treated as escape characters\n category_list.append(cat.replace(\"\\\\\", \"\\\\\\\\\"))\n\n return category_list", "def subcategories(self):\r\n return [self.decode_string(x) for x in self.extra.get('Subcategories', []) if x]", "def obj_categories(self):\r\n return self._tags", "def list(self):\n return list(sorted(self.manager.data[\"category\"].keys()))", "def getCategories(self):\n categories = set()\n for article in self.articles.values():\n categories.add(article.category)\n return categories", "def all_categories(local):\n\n categories = []\n if local == True:\n categories = list(map((lambda c: c.name), Category.all()))\n else:\n categories = jokes_api.categories()\n\n click.echo(categories)", "def print_categories_list():\n\n categories = []\n for item in data:\n cat = item[\"category\"]\n\n if cat not in categories:\n categories.append(cat)\n\n print(categories) # print the list", "def getCategories(page: wtp._wikitext.WikiText) -> List[str]:\n # Categories are always listed at the bottom of the wikipedia article\n # So the first \"[[Category: ...]]\" you find everything after it will be categories\n s = page.string\n categories = s[s.find(\"[[Category:\"):].split(\"\\n\")\n return categories", "def get_all_names(cls, exclude_values: Iterator['CommonGameTagCategory'] = None) -> Tuple[str]:\n name_list: Tuple[str] = tuple([value.name for value in cls.get_all(exclude_values=exclude_values)])\n return name_list", "def _process_categories(categories):\n categories = [c[0].text for c in categories if c and 'graph' in c[0].text]\n for bad_str in ['graph-youtube-', '-container']:\n categories = [c.replace(bad_str, '') for c in categories]\n return categories", "def getBuilderNames(categories=None):", "def get_category_list(cls):\n if Category.__category_list is None:\n Category.__category_list = []\n return Category.__category_list", "def _get_all_categories() -> dict:\n categories = database.fetchall(\"Categories\", \"id\", \"name_ua\")\n return categories", "def get_categories(category_name: str) -> str:\n fixed_name = category_name.lower()\n output = _get_content(fixed_name, \"categories\")\n\n return output", "def extra_super_categories(self):\n return [self.base_category()]", "def categories(self):\n return { category: subcategories.keys() for category, subcategories in self.lib.items()}", "def category_name(self):\n try:\n category = self.proto.category.parent\n return f'{category.name} - {self.proto.category.name}'\n except AttributeError:\n return self.proto.category.name", "def categories(self) -> list:\n if self.primary_category:\n return [\n self.primary_category,\n *self.secondary_categories.exclude(id=self.primary_category.id)\n ]\n\n return [*self.secondary_categories.all()]", "def category(self) -> str:\n return self._search_in_properties(ATTR_CATEGORY)", "def items(self):\n categories = []\n\n for category in Category.objects.filter(level__gt=0):\n categories.append(('shop:product-category', {'store': 'default', 'slug': category.slug}))\n\n return categories", "def report_categories():\n return list(sorted(set([rt.category for rt in report_types()])))", "def collect_english_cats(self):\n tf.logging.info('collecting english categories')\n self.english_cats = list(\n self.frames(filter_english=True, filter_category=True))", "def _get_categories(category_label):\n if not category_label:\n return None\n return map(lambda x: x if x != '$' else None, category_label.split('###'))", "def get_name(self):\n return self.category_name", "def get_categories_from_labels(labels):\n cats = []\n for cat in label_dict:\n for label in labels: \n if label in label_dict[cat]:\n cats.append(cat)\n return cats", "def getNuclideCategories(self):\n if not self._nuclideCategories:\n coolantNuclides = set()\n fuelNuclides = set()\n structureNuclides = set()\n for c in self.iterComponents():\n # get only nuclides with non-zero number density\n # nuclides could be present at 0.0 density just for XS generation\n nuclides = [\n nuc for nuc, dens in c.getNumberDensities().items() if dens > 0.0\n ]\n if c.getName() == \"coolant\":\n coolantNuclides.update(nuclides)\n elif \"fuel\" in c.getName():\n fuelNuclides.update(nuclides)\n else:\n structureNuclides.update(nuclides)\n structureNuclides -= coolantNuclides\n structureNuclides -= fuelNuclides\n remainingNuclides = (\n set(self.parent.blueprints.allNuclidesInProblem)\n - structureNuclides\n - coolantNuclides\n )\n fuelNuclides.update(remainingNuclides)\n self._nuclideCategories[\"coolant\"] = coolantNuclides\n self._nuclideCategories[\"fuel\"] = fuelNuclides\n self._nuclideCategories[\"structure\"] = structureNuclides\n self.summarizeNuclideCategories()\n\n return (\n self._nuclideCategories[\"coolant\"],\n self._nuclideCategories[\"fuel\"],\n self._nuclideCategories[\"structure\"],\n )", "def pretty_cat_list(self):\n return \", \".join([x.name for x in self.for_service_cat.all()])", "def get_categories(self) -> list:\n headers_dict = {\n 'user-key': self.user_key.key\n }\n\n endpoint = f'{const.API_HOST}{const.API_SNAPSHOTS_TAXONOMY_BASEPATH}'\n\n response = req.api_send_request(method='GET', endpoint_url=endpoint, headers=headers_dict)\n\n if response.status_code == 200:\n return [entry['attributes']['name'] for entry in response.json()['data']]\n\n raise RuntimeError('API Request returned an unexpected HTTP status')", "def nonterminals(g):\n return g._categories.copy()", "def Categories(self, default=[None]):\n return self.data.get('categories', default)", "def get_target_data_categories(self) -> List[str]:\n return [target.data_category for target in self.targets]", "def list_categories():\n categories = get_categories()\n listing = []\n for title,iurl in sorted(categories.iteritems()):\n list_item = xbmcgui.ListItem(label=title[1:])\n list_item.setArt({'thumb': _icon,\n 'icon': _icon,\n 'fanart': _fanart})\n url = '{0}?action=list_category&category={1}'.format(_url, urllib.quote(iurl))\n is_folder = True\n listing.append((url, list_item, is_folder))\n xbmcplugin.addDirectoryItems(_handle, listing, len(listing))\n xbmcplugin.endOfDirectory(_handle)", "def get_categories(self):\n categories = self.session.query(Category).all()\n return categories", "def categories(self):\r\n return categories.ForumCategories(self)", "def vocall_category_info(with_background=True):\n label_map = pascalvoc_label(with_background)\n label_map = sorted(label_map.items(), key=lambda x: x[1])\n cats = [l[0] for l in label_map]\n\n if with_background:\n cats.insert(0, 'background')\n\n clsid2catid = {i: i for i in range(len(cats))}\n catid2name = {i: name for i, name in enumerate(cats)}\n\n return clsid2catid, catid2name", "def test_CategoryNames(self):\n exp = [\"BarcodeSequence\", \"DOB\", \"Description\", \"Treatment\"]\n obs = self.overview_map.CategoryNames\n self.assertEqual(obs, exp)\n\n obs = self.no_metadata.CategoryNames\n self.assertEqual(obs, [])\n\n obs = self.empty_map.CategoryNames\n self.assertEqual(obs, [])", "def category_name(self):\r\n return conf.lib.clang_getDiagnosticCategoryName(self.category_number)", "def get_categories(self):\r\n return self.ancestors.filter(category=True)", "def getCategory():", "def test_CategoryNames(self):\r\n exp = [\"BarcodeSequence\", \"DOB\", \"Description\", \"Treatment\"]\r\n obs = self.overview_map.CategoryNames\r\n self.assertEqual(obs, exp)\r\n\r\n obs = self.no_metadata.CategoryNames\r\n self.assertEqual(obs, [])\r\n\r\n obs = self.empty_map.CategoryNames\r\n self.assertEqual(obs, [])", "def get_naked_names(graph: BELGraph) -> Set[str]:\n return set(_naked_names_iter(graph))", "def get(self):\n \n categories = db.categories.Category.find()\n return list(categories)", "def get_category_name(self):\n return self._ctx.get(\"name\", self._ctx[\"id\"])", "def avail_categories(self):\n # retrieve categories\n categories = self.show_all_categories()\n # for each category, retrieve packages\n output = {}\n for category in categories:\n packages = self.show_category(category)\n output[category] = packages\n\n return output", "def list_valid_tags(self):\n tags = Tag.objects.filter(db_category=\"rp hooks\").order_by(\"db_key\")\n self.msg(\"Categories: %s\" % \"; \".join(tag.db_key for tag in tags))\n return", "def list_labels(self):\n # Create empty list\n label_names = []\n \n # For every name in training directory\n for name in os.listdir(self.train_data):\n # If it does not start with . (which hidden files do)\n if not name.startswith('.'):\n label_names.append(name)\n \n return label_names", "def _get_algorithm_categories_list(self):\n category_list = [\"Algorithms\"]\n alg_cats = self.create_mantid_algorithm(self.algorithm_name(), self.algorithm_version()).categories()\n for cat in alg_cats:\n # double up the category separators so they are not treated as escape characters\n category_list.append(cat.replace(\"\\\\\", \"\\\\\\\\\"))\n\n return category_list", "def get_categories(mapping):\n categories = []\n \n for idx, name in mapping.items(): \n temp = {'id':idx, 'name':name, 'supercategory':'NA'}\n categories.append(temp)\n \n return categories", "def event_categories(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"event_categories\")", "def get_used():\r\n sql = text('''\r\n SELECT category.* FROM category, app\r\n WHERE app.category_id=category.id GROUP BY category.id\r\n ''')\r\n results = db.engine.execute(sql)\r\n categories = []\r\n for row in results:\r\n category = dict(id=row.id, name=row.name, short_name=row.short_name,\r\n description=row.description)\r\n categories.append(category)\r\n return categories", "def get_category_name(self, user, name):\n user_categories = []\n for cat in self.__cat:\n if cat['name'] == name and cat['created_by'] == user:\n user_categories.append(cat)\n return user_categories", "def get_categories(self, pack=None):\n # Validate Pack name.\n pack = pack or \"Parts\"\n # Get the associated model path.\n search_path = self.get_model_path_from_pack(pack)\n return os.listdir(search_path)", "def get_categories(title: str) -> []:\n\n categories = []\n\n if any(word in title for word in [\"Agenda\", \"agenda\"]):\n categories.append(\"Agendas\")\n if any(word in title for word in [\"Charter\", \"charter\"]):\n categories.append(\"Charters\")\n if any(word in title for word in [\"Flyer\", \"flyer\"]):\n categories.append(\"Flyers\")\n if any(word in title for word in [\"Guidance\", \"guidance\", \"Guide\", \"guide\"]):\n categories.append(\"Guides\")\n if any(word in title for word in [\"Interview\", \"interview\"]):\n categories.append(\"Interviews\")\n if any(word in title for word in [\"Letter\", \"letter\"]):\n if any(word in title for word in [\"Newsletter\", \"newsletter\"]):\n categories.append(\"Newsletters\")\n else:\n categories.append(\"Letters\")\n if any(word in title for word in [\"Memo\", \"memo\"]):\n categories.append(\"Memos\")\n if any(word in title for word in [\"Minutes\", \"minutes\"]):\n categories.append(\"Minutes\")\n if any(word in title for word in [\"Plan\", \"plan\"]):\n categories.append(\"Plans\")\n if any(word in title for word in [\"Table\", \"table\"]):\n categories.append(\"Tables\")\n if any(word in title for word in [\"Transition\", \"transition\"]):\n categories.append(\"Transition papers\")\n\n return categories", "def get_bad_asset_category_ids():\n return app_config.ESI_BAD_ASSET_CATEGORIES", "def name(self) -> str:\n return str(self.category.value)" ]
[ "0.79573333", "0.7846303", "0.73073953", "0.7111364", "0.7110502", "0.70963407", "0.70941347", "0.7074836", "0.6971838", "0.6940362", "0.6827828", "0.68070495", "0.67111456", "0.66973966", "0.6610056", "0.6607805", "0.65921", "0.65612626", "0.6545443", "0.653917", "0.65071493", "0.64744717", "0.6456105", "0.64440286", "0.6442781", "0.6424875", "0.6418396", "0.6404595", "0.6355214", "0.6334723", "0.6328441", "0.6326757", "0.6317515", "0.63142115", "0.6308655", "0.6272027", "0.62698853", "0.6249224", "0.62071186", "0.6190479", "0.6187785", "0.61820817", "0.61708593", "0.6138386", "0.6138386", "0.6138386", "0.61336637", "0.6128702", "0.6083581", "0.60791695", "0.60774535", "0.6066819", "0.60596067", "0.6048591", "0.60422826", "0.6040911", "0.6031688", "0.6014526", "0.60093236", "0.59960526", "0.5991145", "0.5986578", "0.59686226", "0.5966885", "0.5948247", "0.5940512", "0.593492", "0.5916649", "0.5904723", "0.588098", "0.5858787", "0.5854421", "0.58221537", "0.5812217", "0.58052033", "0.5805014", "0.5801981", "0.5800775", "0.58006793", "0.579051", "0.57827276", "0.57817256", "0.57773185", "0.5767759", "0.57663167", "0.5766014", "0.5763295", "0.57477045", "0.5746733", "0.5741034", "0.57175136", "0.5712524", "0.5707321", "0.57008827", "0.56994784", "0.5692087", "0.5686839", "0.5686086", "0.5686082", "0.5680722", "0.567299" ]
0.0
-1
YOLOV3 network hybrid forward.
def hybrid_forward(self, F, x, *args): all_box_centers = [] all_box_scales = [] all_objectness = [] all_class_pred = [] all_anchors = [] all_offsets = [] all_feat_maps = [] all_detections = [] routes = [] for stage, block, output in zip(self.stages, self.yolo_blocks, self.yolo_outputs): x = stage(x) routes.append(x) # the YOLO output layers are used in reverse order, i.e., from very deep layers to shallow for i, block, output in zip(range(len(routes)), self.yolo_blocks, self.yolo_outputs): x, tip = block(x) if autograd.is_training(): dets, box_centers, box_scales, objness, class_pred, anchors, offsets = output(tip) all_box_centers.append(box_centers.reshape((0, -3, -1))) all_box_scales.append(box_scales.reshape((0, -3, -1))) all_objectness.append(objness.reshape((0, -3, -1))) all_class_pred.append(class_pred.reshape((0, -3, -1))) all_anchors.append(anchors) all_offsets.append(offsets) # here we use fake featmap to reduce memory consuption, only shape[2, 3] is used fake_featmap = F.zeros_like(tip.slice_axis( axis=0, begin=0, end=1).slice_axis(axis=1, begin=0, end=1)) all_feat_maps.append(fake_featmap) else: dets = output(tip) all_detections.append(dets) if i >= len(routes) - 1: break # add transition layers x = self.transitions[i](x) # upsample feature map reverse to shallow layers upsample = _upsample(x, stride=2) route_now = routes[::-1][i + 1] x = F.concat(F.slice_like(upsample, route_now * 0, axes=(2, 3)), route_now, dim=1) if autograd.is_training(): # during training, the network behaves differently since we don't need detection results if autograd.is_recording(): # generate losses and return them directly box_preds = F.concat(*all_detections, dim=1) all_preds = [F.concat(*p, dim=1) for p in [ all_objectness, all_box_centers, all_box_scales, all_class_pred]] all_targets = self._target_generator(box_preds, *args) return self._loss(*(all_preds + all_targets)) # return raw predictions, this is only used in DataLoader transform function. return (F.concat(*all_detections, dim=1), all_anchors, all_offsets, all_feat_maps, F.concat(*all_box_centers, dim=1), F.concat(*all_box_scales, dim=1), F.concat(*all_objectness, dim=1), F.concat(*all_class_pred, dim=1)) # concat all detection results from different stages result = F.concat(*all_detections, dim=1) # apply nms per class if self.nms_thresh > 0 and self.nms_thresh < 1: result = F.contrib.box_nms( result, overlap_thresh=self.nms_thresh, valid_thresh=0.01, topk=self.nms_topk, id_index=0, score_index=1, coord_start=2, force_suppress=False) if self.post_nms > 0: result = result.slice_axis(axis=1, begin=0, end=self.post_nms) ids = result.slice_axis(axis=-1, begin=0, end=1) scores = result.slice_axis(axis=-1, begin=1, end=2) bboxes = result.slice_axis(axis=-1, begin=2, end=None) return ids, scores, bboxes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward(self, x):\n for name, module in self.base._modules.items():\n if name == 'avgpool':\n break\n\n if name == 'layer3':\n l2 = Variable(x)\n\n x = Variable(module(x))\n l4 = Variable(x)\n\n \"\"\"for name, param in self.base.named_parameters():\n print(name, param.size())\n\n res50_model = self.base\n res50_conv2 = ResNet50Bottom(res50_model)\n for i,child in enumerate(self.base.children()):\n print(i)\n if i==8:\n l4=x\n break\n if i==6:\n l2=x\n x=res50_conv2(x.detach())\"\"\"\n\n s2 = l2.sum(1) #/ 100\n #\n s4 = l4.sum(1) #/ 1000\n\n\n sw2 = s2 / (s2.view(x.size(0), -1).sum(1)).unsqueeze(1).unsqueeze(2)\n\n sw4 = s4 / (s4.view(x.size(0), -1).sum(1)).unsqueeze(1).unsqueeze(2)\n\n\n l2 = l2 * sw2.unsqueeze(1)\n l4 = l4 * sw4.unsqueeze(1)\n\n \n c2 = self.inconv2(l2)\n c4 = self.inconv4(l4)\n c2 = self.bn2(c2)\n c4 = self.bn4(c4)\n \n n2 = F.softmax(torch.mean(torch.mean(c2, dim=2), dim=2), dim=1)\n n4 = F.softmax(torch.mean(torch.mean(c4, dim=2), dim=2), dim=1)\n nn2 = n2.data.cpu().numpy()\n nn4 = n4.data.cpu().numpy()\n cam2 = np.zeros((x.size(0), 28, 28), dtype=float)\n cam4 = np.zeros((x.size(0), 7, 7), dtype=float)\n\n\n for i in range(0, x.size(0)):\n for j in range(0, 2):\n temp1 = c2[i, j, :, :].data.cpu().numpy()\n temp1 = np.maximum(temp1, 0)\n temp1 = temp1 - np.min(temp1)\n temp1 = temp1 / (np.max(temp1)+1e-8)\n cam2[i] = cam2[i] + nn2[i, j] * temp1\n cam2 = torch.FloatTensor(cam2)\n l2 = l2 * (cam2.unsqueeze(1).cuda())\n l2 = self.stack1(l2)\n l2 = self.stack1_1(l2)\n\n for i in range(0, x.size(0)):\n for j in range(0, 8):\n temp2 = c4[i, j, :, :].data.cpu().numpy()\n temp2 = np.maximum(temp2, 0)\n temp2 = temp2 - np.min(temp2)\n temp2 = temp2 / (np.max(temp2)+1e-8)\n cam4[i] =cam4[i] + nn4[i, j] * temp2\n cam4 = torch.FloatTensor(cam4)\n l4 = l4 * cam4.unsqueeze(1).cuda()\n l4 = self.stack3(l4)\n X = l2.view(x.size(0), 512, 7 ** 2)\n Y = l4.view(x.size(0), 512, 7 ** 2)\n Z = self.cross_bilinear(X, Y)\n return n2, n4, Z", "def forward(self, x):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n x = self.pool1(F.relu(self.batch1(self.conv1(x))))\n x = self.pool2(F.relu(self.batch2(self.conv2(x))))\n x = F.relu(self.batch3a(self.conv3a(x)))\n x = self.pool3(F.relu(self.batch3b(self.conv3b(x))))\n x = F.relu(self.batch4a(self.conv4a(x)))\n x = self.pool4(F.relu(self.batch4b(self.conv4b(x))))\n x = F.relu(self.batch5a(self.conv5a(x)))\n x = self.pool5(F.relu(self.batch5b(self.conv5b(x))))\n x = self.avgpool(x)\n x = x.reshape(x.shape[0], -1)\n out = self.fc1(x)\n\n# raise NotImplementedError\n ########################\n # END OF YOUR CODE #\n #######################\n\n return out", "def forward(self, x):\n sources = list()\n loc = list()\n conf = list()\n\n # apply vgg up to conv4_3 relu\n #print('Reached start of vgg')\n for k in self.vgg._modules.keys():\n if int(k) < 23:\n #print('Reached ' + k + ' ', x.size())\n x = self.vgg._modules[k].cuda()(x)\n #print('Reached L2Norm')\n s = self.L2Norm(x)\n sources.append(s)\n\n #print('Reached after L2Norm')\n # apply vgg up to fc7\n for k in self.vgg._modules.keys():\n if int(k) >= 23:\n #print('Reached ' + k + ' ', x.size())\n x = self.vgg._modules[k].cuda()(x)\n sources.append(x)\n #print('Reached end of VGG')\n\n # apply extra layers and cache source layer outputs\n for k, v in enumerate(self.extras):\n x = F.relu(v(x), inplace=True)\n if k % 2 == 1:\n sources.append(x)\n\n # apply multibox head to source layers\n for (x, l, c) in zip(sources, self.loc, self.conf):\n loc.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf.append(c(x).permute(0, 2, 3, 1).contiguous())\n\n loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)\n conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)\n\n if self.phase == \"test\":\n output = self.detect(\n loc.view(loc.size(0), -1, 4), # loc preds\n self.softmax(conf.view(-1, self.num_classes)), # conf preds\n self.priors # default boxes\n )\n else:\n output = (\n loc.view(loc.size(0), -1, 4),\n conf.view(conf.size(0), -1, self.num_classes),\n self.priors\n )\n return output", "def forward(self, X, batch_size):\n\n z = self.neural_net_forward(X.view(-1, self.n_hos * self.n_types)) # [batch_size, n_structures]\n\n x_1 = self.linear_program_forward(X, z, batch_size)\n\n return x_1", "def forward(self, x): \n pal1_sources = list()\n pal2_sources = list()\n loc_pal1 = list()\n conf_pal1 = list()\n loc_pal2 = list()\n conf_pal2 = list()\n\n # apply vgg up to conv3_3 relu\n for k in range(16):\n x = self.vgg[k](x)\n\n of1 = x\n s = self.L2Normof1(of1)\n pal1_sources.append(s)\n \n # apply vgg up to conv4_3 relu\n for k in range(16, 23):\n x = self.vgg[k](x)\n\n of2 = x\n s = self.L2Normof2(of2)\n pal1_sources.append(s)\n\n # apply vgg up to conv5_3 relu\n for k in range(23, 30):\n x = self.vgg[k](x)\n of3 = x\n s = self.L2Normof3(of3)\n pal1_sources.append(s)\n\n # apply vgg up to fc7\n for k in range(30, len(self.vgg)):\n x = self.vgg[k](x)\n of4 = x\n pal1_sources.append(of4)\n \n # apply extra layers and cache source layer outputs\n for k in range(2):\n x = F.relu(self.extras[k](x), inplace=True)\n of5 = x\n pal1_sources.append(of5)\n for k in range(2, 4):\n x = F.relu(self.extras[k](x), inplace=True)\n of6 = x\n pal1_sources.append(of6)\n\n ## fpn module\n \"\"\"\n lfpn6 = self.fpn_topdown6(of6)\n lfpn5 = self._upsample_product(self.fpn_topdown5(of6), self.fpn_latlayer5(of5))\n lfpn4 = self._upsample_product(self.fpn_topdown4(of5), self.fpn_latlayer4(of4))\n lfpn3 = self._upsample_product(self.fpn_topdown3(of4), self.fpn_latlayer3(of3))\n lfpn2 = self._upsample_product(self.fpn_topdown2(of3), self.fpn_latlayer2(of2))\n lfpn1 = self._upsample_product(self.fpn_topdown1(of2), self.fpn_latlayer1(of1))\n\n\n ef1 = self.fpn_fem3_3(lfpn1)\n ef1 = self.L2Normef1(ef1)\n ef2 = self.fpn_fem4_3(lfpn2)\n ef2 = self.L2Normef2(ef2)\n ef3 = self.fpn_fem5_3(lfpn3)\n ef3 = self.L2Normef3(ef3)\n\n ef4 = self.fpn_fem7(lfpn4)\n ef5 = self.fpn_fem6_2(lfpn5)\n ef6 = self.fpn_fem7_2(lfpn6)\n \"\"\"\n\n conv7 = F.relu(self.fpn_topdown[0](of6), inplace=True)\n x = F.relu(self.fpn_topdown[1](conv7), inplace=True)\n conv6 = F.relu(self._upsample_product(x, self.fpn_latlayer[0](of5)), inplace=True)\n\n x = F.relu(self.fpn_topdown[2](conv6), inplace=True)\n convfc7_2 = F.relu(self._upsample_product(x, self.fpn_latlayer[1](of4)), inplace=True)\n\n x = F.relu(self.fpn_topdown[3](convfc7_2), inplace=True)\n conv5 = F.relu(self._upsample_product(x, self.fpn_latlayer[2](of3)), inplace=True)\n\n x = F.relu(self.fpn_topdown[4](conv5), inplace=True)\n conv4 = F.relu(self._upsample_product(x, self.fpn_latlayer[3](of2)), inplace=True)\n\n x = F.relu(self.fpn_topdown[5](conv4), inplace=True)\n conv3 = F.relu(self._upsample_product(x, self.fpn_latlayer[4](of1)), inplace=True)\n\n ef1 = self.fpn_fem[0](conv3)\n ef1 = self.L2Normef1(ef1)\n ef2 = self.fpn_fem[1](conv4)\n ef2 = self.L2Normef2(ef2)\n ef3 = self.fpn_fem[2](conv5)\n ef3 = self.L2Normef3(ef3)\n ef4 = self.fpn_fem[3](convfc7_2)\n ef5 = self.fpn_fem[4](conv6)\n ef6 = self.fpn_fem[5](conv7)\n\n pal2_sources = (ef1, ef2, ef3, ef4, ef5, ef6)\n\n ## first shot \n for (x, l, c) in zip(pal1_sources, self.loc_pal1, self.conf_pal1):\n loc_pal1.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf_pal1.append(c(x).permute(0, 2, 3, 1).contiguous())\n \n ## second shot\n for (x, l, c) in zip(pal2_sources, self.loc_pal2, self.conf_pal2):\n loc_pal2.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf_pal2.append(c(x).permute(0, 2, 3, 1).contiguous())\n\n # first shot\n loc_pal1 = torch.cat([o.view(o.size(0), -1) for o in loc_pal1], 1)\n conf_pal1 = torch.cat([o.view(o.size(0), -1) for o in conf_pal1], 1)\n \n # second shot\n loc_pal2 = torch.cat([o.view(o.size(0), -1) for o in loc_pal2], 1)\n conf_pal2 = torch.cat([o.view(o.size(0), -1) for o in conf_pal2], 1)\n\n if self.phase == 'test':\n # 测试时, 仅使用shot2 的输出\n output = self.detect(\n loc_pal2.view(loc_pal2.size(0), -1, 4),\n self.softmax(conf_pal2.view(conf_pal2.size(0), -1,\n self.num_classes)), # conf preds\n )\n else:\n ## 训练时,使用shot1 和 shot2 的输出\n output = (\n loc_pal1.view(loc_pal1.size(0), -1, 4),\n conf_pal1.view(conf_pal1.size(0), -1, self.num_classes),\n loc_pal2.view(loc_pal2.size(0), -1, 4),\n conf_pal2.view(conf_pal2.size(0), -1, self.num_classes))\n return output", "def L_model_forward(X, parameters):\n pass", "def forward(network: dict, x: np.array) -> np.array:\n W1, W2, W3 = network['W1'], network['W2'], network['W3']\n b1, b2, b3 = network['b1'], network['b2'], network['b3']\n z1 = _forward(x, W1, b1, 'sigmoid')\n z2 = _forward(z1, W2, b2, 'sigmoid')\n y = _forward(z2, W3, b3, 'identity')\n return y", "def hybrid_forward(self, F, x):\n identity = x\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n out = self.conv3(out)\n out = self.bn3(out)\n if self.downsample is not None:\n identity = self.downsample(x)\n out = F.Activation(out + identity, act_type='relu')\n\n if self.nonlocal_block is not None:\n out = self.nonlocal_block(out)\n return out", "def forward(self, x):\n return self.net(x)", "def forward(self, x):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n for l in range(len(self.layers)):\n if l == 0:\n z = self.layers[l].forward(x)\n else:\n z = self.layers[l].forward(a)\n a = self.activations[l].forward(z)\n\n # output from softmax layer\n out = a\n\n ########################\n # END OF YOUR CODE #\n #######################\n\n return out", "def _forward(self, X):\n firstLayer = True\n for layer, fcn in self.model.named_children():\n if 'recurrent' in layer:\n if firstLayer:\n Y, hidden = fcn(X)\n else:\n Y, hidden = fcn(Y)\n elif 'dropout' in layer:\n Y = fcn(Y)\n elif 'linear' in layer:\n Y = fcn(Y.view((Y.shape[1], Y.shape[0]*Y.shape[-1])))\n else:\n Y = fcn(Y)\n\n firstLayer = False\n\n return Y", "def _forward(self, x):\n global global_epoch\n global_epoch += 1\n bias = -np.ones((x.shape[0], 1))\n tail = np.zeros((x.shape[0], self.dim_hid+self.dim_out))\n nodes = np.concatenate((bias, x, tail), axis=1)\n weight = self.weight * self.connectivity\n for i in range(self.dim_in, self.dim_in+self.dim_hid+self.dim_out):\n net = nodes.dot(weight[i])\n nodes[:,i] = self.__sigmoid(net)\n nodes[:,self.dim_in:self.dim_in+self.dim_hid] *= self.hidden\n return nodes", "def forward(self, x):\n assert not torch.isnan(x).any(), f\"NaN in input {x}\"\n x = F.relu(self.l1(x))\n x = F.relu(self.l2(x))\n x = self.l3(x)\n return x", "def forward_pass(X,architecture):\n \n architecture['layer1'][0] = X\n kernel_shape1 = architecture['layer1'][7]\n stride1 = architecture['layer1'][8]\n if kernel_shape1 is not None and not isinstance(kernel_shape1,int):\n X_input_1_im2col,imX = im2col(X,kernel_shape1,stride1,im_needed = False, shape_specified = True)\n architecture['layer1'][4] = X_input_1_im2col\n else:\n architecture['layer1'][4] = None\n\n for layer in range(len(architecture)): # Feedforward from the first till the second last layer\n X_input,X_output,weightsi,biasi,X_input_1_im2col,imi,output_shapei,kernel_shapei,stridei,operationi,imx = architecture['layer{}'.format(layer+1)]\n\n if operationi == 'conv_bn_relu':\n conv_output = relu(BatchNorm(torch.t(X_input_1_im2col).mm(weightsi) + biasi))\n conv_output = torch.reshape(conv_output,output_shapei)\n architecture['layer{}'.format(layer+1)][1] = conv_output # resetting output as convolved shape\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = conv_output # resetting intput of next layer as convolved shape\n kernel_shapei__1 = architecture['layer{}'.format(layer+2)][7]\n stridei__1 = architecture['layer{}'.format(layer+2)][8]\n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n if kernel_shapei__1 is not None and not isinstance(kernel_shapei__1,int):\n if operationi__1 == 'maxpool':\n architecture['layer{}'.format(layer+2)][4] = maxpool_im2col(conv_output,kernel_shapei__1,stridei__1)\n else:\n architecture['layer{}'.format(layer+2)][4],imX = im2col(conv_output,kernel_shapei__1,stridei__1,im_needed = False, shape_specified = True)\n # resetting input im2col of next layer as the im2col of the output of this layer\n else:\n architecture['layer{}'.format(layer+2)][4] = None\n elif operationi == 'conv_relu':\n conv_output = relu(torch.t(X_input_1_im2col).mm(weightsi) + biasi)\n conv_output = torch.reshape(conv_output,output_shapei)\n architecture['layer{}'.format(layer+1)][1] = conv_output # resetting output as convolved shape\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = conv_output # resetting intput of next layer as convolved shape\n kernel_shapei__1 = architecture['layer{}'.format(layer+2)][7]\n stridei__1 = architecture['layer{}'.format(layer+2)][8]\n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n if kernel_shapei__1 is not None and not isinstance(kernel_shapei__1,int):\n if operationi__1 == 'maxpool':\n architecture['layer{}'.format(layer+2)][4] = maxpool_im2col(conv_output,kernel_shapei__1,stridei__1)\n else:\n architecture['layer{}'.format(layer+2)][4],imX = im2col(conv_output,kernel_shapei__1,stridei__1,im_needed = False, shape_specified = True)\n # resetting input im2col of next layer as the im2col of the output of this layer\n else:\n architecture['layer{}'.format(layer+2)][4] = None\n elif operationi == 'conv_bn_sigmoid':\n conv_output = sigmoid(BatchNorm(torch.t(X_input_1_im2col).mm(weightsi) + biasi))\n conv_output = torch.reshape(conv_output,output_shapei)\n architecture['layer{}'.format(layer+1)][1] = conv_output # resetting output as convolved shape\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = conv_output # resetting intput of next layer as convolved shape\n kernel_shapei__1 = architecture['layer{}'.format(layer+2)][7]\n stridei__1 = architecture['layer{}'.format(layer+2)][8]\n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n if kernel_shapei__1 is not None and not isinstance(kernel_shapei__1,int):\n if operationi__1 == 'maxpool':\n architecture['layer{}'.format(layer+2)][4] = maxpool_im2col(conv_output,kernel_shapei__1,stridei__1)\n else:\n architecture['layer{}'.format(layer+2)][4],imX = im2col(conv_output,kernel_shapei__1,stridei__1,im_needed = False, shape_specified = True)\n # resetting input im2col of next layer as the im2col of the output of this layer\n else:\n architecture['layer{}'.format(layer+2)][4] = None\n elif operationi == 'conv_sigmoid':\n conv_output = sigmoid(torch.t(X_input_1_im2col).mm(weightsi) + biasi)\n conv_output = torch.reshape(conv_output,output_shapei)\n architecture['layer{}'.format(layer+1)][1] = conv_output # resetting output as convolved shape\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = conv_output # resetting intput of next layer as convolved shape\n kernel_shapei__1 = architecture['layer{}'.format(layer+2)][7]\n stridei__1 = architecture['layer{}'.format(layer+2)][8]\n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n if kernel_shapei__1 is not None and not isinstance(kernel_shapei__1,int):\n if operationi__1 == 'maxpool':\n architecture['layer{}'.format(layer+2)][4] = maxpool_im2col(conv_output,kernel_shapei__1,stridei__1)\n else:\n architecture['layer{}'.format(layer+2)][4],imX = im2col(conv_output,kernel_shapei__1,stridei__1,im_needed = False, shape_specified = True)\n # resetting input im2col of next layer as the im2col of the output of this layer\n else:\n architecture['layer{}'.format(layer+2)][4] = None\n elif operationi == 'maxpool':\n maxpool_output = maxpool(X_input,kernel_shapei,stridei)\n\n maxpool_output = torch.reshape(maxpool_output,output_shapei)\n\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = maxpool_output\n kernel_shapei__1 = architecture['layer{}'.format(layer+2)][7]\n stridei__1 = architecture['layer{}'.format(layer+2)][8]\n if kernel_shapei__1 is not None and not isinstance(kernel_shapei__1,int):\n architecture['layer{}'.format(layer+2)][4],imX = im2col(maxpool_output,kernel_shapei__1,stridei__1,im_needed = False, shape_specified = True)\n else:\n architecture['layer{}'.format(layer+2)][4] = None\n elif operationi == 'flatten_dense_relu':\n # kernel_shapei in this case refers to the output channels: stride for dense layer will be None\n output = flatten_and_dense(X_input,kernel_shapei,weightsi,biasi,activation = 'relu',initialise_weights = False)\n architecture['layer{}'.format(layer+1)][1] = output\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = output\n elif operationi == 'flatten_dense_none':\n # kernel_shapei in this case refers to the output channels: stride for dense layer will be None\n output = flatten_and_dense(X_input,kernel_shapei,weightsi,biasi,activation = 'none',initialise_weights = False)\n architecture['layer{}'.format(layer+1)][1] = output\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = output\n elif operationi == 'flatten_dense_sigmoid':\n # kernel_shapei in this case refers to the output channels: stride for dense layer will be None\n output = flatten_and_dense(X_input,kernel_shapei,weightsi,biasi,activation = 'sigmoid',initialise_weights = False)\n architecture['layer{}'.format(layer+1)][1] = output\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = output\n elif operationi == 'softmax':\n Xin = architecture['layer{}'.format(layer+1)][0]\n output = softmax(Xin).squeeze()\n architecture['layer{}'.format(layer+1)][1] = output\n if layer == len(architecture) - 1:\n y_pred = architecture['layer{}'.format(len(architecture))][1]\n \n return y_pred", "def forward(self, x):\r\n # Is it possible to calculate the acc and gyro convolution in parallel??? TO DO\r\n # split x\r\n x_split = torch.split(x, 8, dim=3)\r\n # acc\r\n acc_out = F.relu(self.acc_bn1(self.acc_conv1(x_split[0])))\r\n acc_out = F.relu(self.acc_bn2(self.acc_conv2(acc_out)))\r\n # gyro\r\n gyro_out = F.relu(self.gyro_bn1(self.gyro_conv1(x_split[1])))\r\n gyro_out = F.relu(self.gyro_bn2(self.gyro_conv2(gyro_out)))\r\n\r\n sensor_data = torch.cat([acc_out, gyro_out], 3)\r\n out = F.relu(self.bn3(self.conv3(sensor_data)))\r\n return out", "def forward(self, inputs):\n\n down0 = self.layer_0(inputs=inputs)\n down1 = self.layer_1(inputs=down0)\n down2 = self.layer_2(inputs=down1)\n down3 = self.layer_3(inputs=down2)\n down4 = self.layer_4(inputs=down3)\n\n up1 = self.layer_7(down4, down3)\n\n up2 = self.layer_8(up1, down2)\n\n up3 = self.layer_9(up2, down1)\n\n up4 = self.layer_10(up3, down0)\n\n up5 = self.layer_11(up4)\n return up5", "def forward(self, x, y=None):\n # propagate networks\n self.prior_latent_distribution = self.prior(x)\n self.unet_features = self.unet.forward(x)\n if y is not None:\n y_onehot = F.one_hot(\n y[:, 0], num_classes=self.num_classes).permute(0, -1, 1, 2)\n xy = torch.cat([x, y_onehot], dim=1)\n self.posterior_latent_distribution = self.posterior(xy)\n\n # sample latent\n if y is not None:\n self.z = self.posterior_latent_distribution.rsample()\n else:\n self.z = self.prior_latent_distribution.sample()\n\n # reconstruct image\n self.y_hat_raw = self.fcomb(self.unet_features, self.z)\n\n return self.y_hat_raw", "def forward(self):\n self.img_gen, self.loss_reg, self.parsav = self.net_G(self.input_P1, self.input_P2, self.input_BP1, self.input_BP2, self.input_SPL1, self.input_SPL2)", "def forward(self, x):\n sources = list()\n loc = list()\n conf = list()\n\n # apply vgg up to conv4_3 relu\n for k in range(23):\n x = self.vgg[k](x)\n\n s = self.L2Norm(x)\n sources.append(s)\n\n # apply vgg up to fc7\n for k in range(23, len(self.vgg)):\n x = self.vgg[k](x)\n sources.append(x)\n\n # apply extra layers and cache source layer outputs\n for k, v in enumerate(self.extras):\n x = F.relu(v(x), inplace=True)\n if k % 2 == 1:\n sources.append(x)\n\n # apply multibox head to source layers\n for (x, l, c) in zip(sources, self.loc, self.conf):\n loc.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf.append(c(x).permute(0, 2, 3, 1).contiguous())\n\n loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)\n conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)\n if self.phase == \"test\":\n output = self.detect(\n loc.view(loc.size(0), -1, 4), # loc preds\n self.softmax(conf.view(conf.size(0), -1,\n self.num_classes)), # conf preds\n self.priors.type(type(x.data)) # default boxes\n )\n else:\n output = (\n loc.view(loc.size(0), -1, 4),\n conf.view(conf.size(0), -1, self.num_classes),\n self.priors\n )\n return output", "def forward(self, x):\n sources = list()\n new_sources = list()\n\n # apply lds to the initial image\n x_pool = self.lds(x)\n\n # apply vgg up to conv4_3\n for k in range(22):\n x = self.features[k](x)\n conv4_3_bn = self.ibn1(x)\n x_pool1_skip, x_pool1_icn = self.icn1(x_pool)\n s = self.Norm1(conv4_3_bn * x_pool1_icn)\n\n # apply vgg up to fc7\n for k in range(22, 34):\n x = self.features[k](x)\n conv7_bn = self.ibn2(x)\n x_pool2_skip, x_pool2_icn = self.icn2(x_pool1_skip)\n p = self.Norm2(self.dsc1(s) + conv7_bn * x_pool2_icn)\n\n x = self.features[34](x)\n\n # apply extra layers and cache source layer outputs\n for k, v in enumerate(self.extra):\n x = v(x)\n if k == 0:\n x_pool3_skip, x_pool3_icn = self.icn3(x_pool2_skip)\n w = self.Norm3(self.dsc2(p) + x * x_pool3_icn)\n elif k == 2:\n x_pool4_skip, x_pool4_icn = self.icn4(x_pool3_skip)\n q = self.Norm4(self.dsc3(w) + x * x_pool4_icn)\n elif k == 4:\n o = self.Norm5(self.dsc4(q) + x)\n sources.append(o)\n elif k == 7 or k == 9:\n sources.append(x)\n else:\n pass\n\n # project the forward features into lower dimension.\n tmp1 = self.proj1(p)\n tmp2 = self.proj2(w)\n tmp3 = self.proj3(q)\n tmp4 = self.proj4(o)\n\n # The conv4_3 level\n proj1 = F.upsample(tmp1, scale_factor=2, mode='bilinear')\n proj2 = F.upsample(tmp2, scale_factor=4, mode='bilinear')\n proj3 = F.upsample(tmp3, scale_factor=8, mode='bilinear')\n proj4 = F.upsample(tmp4, scale_factor=16, mode='bilinear')\n proj = torch.cat([proj1, proj2, proj3, proj4], dim=1)\n\n agent1 = self.agent1(s)\n\n convert1 = self.convert1(proj)\n pred1 = torch.cat([agent1, convert1], dim=1)\n pred1 = self.merge1(pred1)\n new_sources.append(pred1)\n\n # The fc_7 level\n proj2 = F.upsample(tmp2, scale_factor=2, mode='bilinear')\n proj3 = F.upsample(tmp3, scale_factor=4, mode='bilinear')\n proj4 = F.upsample(tmp4, scale_factor=8, mode='bilinear')\n proj = torch.cat([proj2, proj3, proj4], dim=1)\n\n agent2 = self.agent2(p)\n convert2 = self.convert2(proj)\n pred2 = torch.cat([agent2, convert2], dim=1)\n pred2 = self.merge2(pred2)\n new_sources.append(pred2)\n\n # The conv8 level\n proj3 = F.upsample(tmp3, scale_factor=2, mode='bilinear')\n proj4 = F.upsample(tmp4, scale_factor=4, mode='bilinear')\n proj = torch.cat([proj3, proj4], dim=1)\n\n agent3 = self.agent3(w)\n convert3 = self.convert3(proj)\n pred3 = torch.cat([agent3, convert3], dim=1)\n pred3 = self.merge3(pred3)\n new_sources.append(pred3)\n\n # The conv9 level\n proj4 = F.upsample(tmp4, scale_factor=2, mode='bilinear')\n proj = proj4\n\n agent4 = self.agent4(q)\n convert4 = self.convert4(proj)\n pred4 = torch.cat([agent4, convert4], dim=1)\n pred4 = self.merge4(pred4)\n new_sources.append(pred4)\n\n for prediction in sources:\n new_sources.append(prediction)\n\n return new_sources", "def forward(self, x):\n\n\t\t## Conv layers\n\t\tx = self.avgpool(F.tanh(self.conv1(x)))\n\t\tx = self.avgpool(F.tanh(self.conv2(x)))\n\t\tx = F.tanh(self.conv3(x))\n\n\t\t## Flatten\n\t\tx = x.view(x.size(0), -1)\n\n\t\t## Fully connected layers\n\t\tx = F.tanh(self.fc1(x))\n\t\tx = self.fc2(x)\n\n\t\tx = F.softmax(x, dim=1)\n\n\t\treturn x", "def forward(self, x: torch.Tensor) -> torch.Tensor:\n model_output = None\n #######################################################################\n # Student code begins\n #######################################################################\n\n (N,C,H,W) = x.shape\n\n conv_features = self.conv_layers(x)\n \n flat_features = conv_features.reshape(-1, 500)\n model_output = self.fc_layers(flat_features)\n\n\n #######################################################################\n # Student code ends\n #######################################################################\n return model_output", "def forward(self, x): \n # Layer 1\n x = F.elu(self.conv1(x)) # bsize x l1_channels x 1 x Nsamples\n x = self.batchnorm1(x)\n x = F.dropout(x, 0.25)\n x = x.permute(0, 2, 1, 3) # bsize x 1 x l1_channels x Nsamples\n\n # Layer 2\n x = self.padding1(x)\n x = F.elu(self.conv2(x)) # bsize x l2_channels x l1_channels x Nsamples\n x = self.batchnorm2(x) \n x = F.dropout(x, 0.25)\n x = self.pooling2(x) # bsize x l2_channels x floor(l1_channels/2) x floor(Nsamples/4)\n\n # Layer 3\n x = self.padding2(x)\n x = F.elu(self.conv3(x)) # bsize x l3_channels x floor(l1_channels/2) x floor(Nsamples/4)\n x = self.batchnorm3(x)\n x = F.dropout(x, 0.25)\n x = self.pooling3(x) # bsize x l3_channels x floor(l1_channels/4) x floor(Nsamples/16)\n\n # Fully-connected Layer\n x = x.view(-1, self.fc1.in_features) # bsize x (l3_channels*floor(l1_channels/4)*floor(Nsamples/16))\n x = F.sigmoid(self.fc1(x)) # bisze x self.fc1.out_features \n \n if self.fc1.out_features == 1:\n x = x.view(-1) # bsize (1D if 1 output unit)\n \n return x", "def go_forward(net):\n global w, back_loss, loss, l2_loss\n start_forward_time = time.time()\n\n # feed in data\n P = net(w).t()\n\n # calculate loss\n Y = P.mv(X)\n Ybar = Y.mean()\n back_loss = (Y - Ybar).norm(1) / (J)\n loss = back_loss / Ybar\n l2_loss = ((Y - Ybar).norm(2) ** 2) / (J * Ybar)\n\n return time.time() - start_forward_time", "def forward(self, x):\n\n def run0(x, dummy):\n lout1 = self.lconv1(x)\n out1 = self.conv1(lout1)\n lout2 = self.lconv2(out1 + lout1)\n out2 = self.conv2(lout2)\n lout3 = self.lconv3(out2 + lout2)\n out3 = self.conv3(lout3)\n lout4 = self.lconv4(out3 + lout3)\n out4 = self.conv4(lout4)\n lout5 = self.lconv5(out4 + lout4)\n out5 = self.conv5(lout5)\n lout6 = self.lconv6(out5 + lout5)\n out6 = self.conv6(lout6)\n lout7 = self.lconv7(out6 + lout6)\n out7 = self.conv7(lout7)\n mat = out7[:, :, :, None] + out7[:, :, None, :]\n cur = mat\n if self.num_1d:\n output1d = self.final_1d(out7)\n return cur, output1d\n else:\n return cur\n\n dummy = torch.Tensor(1)\n dummy.requires_grad = True\n if self.num_1d:\n cur, output1d = checkpoint(run0, x, dummy)\n else:\n cur = checkpoint(run0, x, dummy)\n\n def run1(cur):\n first = True\n for lm, m in zip(self.lconvtwos[:7], self.convtwos[:7]):\n if first:\n cur = lm(cur)\n\n first = False\n else:\n cur = lm(cur) + cur\n cur = m(cur) + cur\n return cur\n\n def run2(cur):\n for lm, m in zip(self.lconvtwos[7:13], self.convtwos[7:13]):\n cur = lm(cur) + cur\n cur = m(cur) + cur\n return cur\n\n def run3(cur):\n for lm, m in zip(self.lconvtwos[13:], self.convtwos[13:]):\n cur = lm(cur) + cur\n cur = m(cur) + cur\n\n cur = self.final(cur)\n cur = 0.5 * cur + 0.5 * cur.transpose(2, 3)\n return cur\n\n cur = checkpoint(run1, cur)\n cur = checkpoint(run2, cur)\n cur = checkpoint(run3, cur)\n\n if self.num_1d:\n return cur, output1d\n else:\n return cur", "def forward(self, x):\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = self.maxpool(out)\n out = self.avgpool(out)\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = self.avgpool(out)\n out = torch.flatten(out, 1)\n out = self.fc(out)\n return out", "def forward_propagate(self, X=[]):\n A = np.zeros((len(self.architecture)), dtype=object)\n if np.size(X) > 0:\n A[0] = X\n else:\n A[0] = self.input\n\n self.all_data[f'A0'] = A[0]\n \n for layer, activation_function in zip(range(1, len(self.architecture)),self.activations):\n Z = (A[layer-1].dot(self.weights_and_biases[f'W{layer}']) + self.weights_and_biases[f'b{layer}'])\n activation_function = self.activations[layer-1]\n A[layer] = self.activation(Z,type=activation_function)\n self.all_data[f'Z{layer}'] = Z\n self.all_data[f'A{layer}'] = A[layer]\n \n y_predicted = A[layer]\n \n return y_predicted", "def forward(self, data):\n b, _, _ = data.size()\n # encode process\n skip1 = data\n out1 = self.conv1(data)\n\n out1 = self.relu(out1)\n out1 = self.pool1(out1)\n out1 = self.dropout(out1)\n skip2 = out1\n out1 = self.conv2(out1)\n out1 = self.relu(out1)\n out1 = self.pool2(out1)\n out1 = self.dropout(out1)\n skip3 = out1\n out1 = self.conv3(out1)\n out1 = self.relu(out1)\n out1 = self.pool3(out1)\n out1 = self.dropout(out1)\n skip4 = out1\n up5 = self.aap(out1)\n # decode process\n up4 = upsample(up5, skip4.size()[-1])\n up4 = up4 + skip4\n up4 = self.blend4(up4)\n up3 = upsample(up4, skip3.size()[-1])\n up3 = up3 + skip3\n up3 = self.blend3(up3)\n up2 = upsample(up3, skip2.size()[-1])\n up2 = up2 + skip2\n up2 = self.blend2(up2)\n up1 = upsample(up2, skip1.size()[-1])\n up1 = up1 + skip1\n up1 = self.blend1(up1)\n out_dense = self.sigmoid(up1)\n out_dense = out_dense.view(b, -1)\n\n return out_dense", "def forward(self, x):\n x1, x2 = x\n y1 = self.conv_net.forward(x1)\n y2 = self.sparse_net.forward(x2)\n return y1, y2", "def forward(self, x):\r\n x = self.conv1(x)\r\n x = self.conv1_BN(x)\r\n x = F.relu(x)\r\n x = self.conv1_dp(x)\r\n x = self.Block2_1(x)\r\n x = self.Block2_2(x)\r\n x = self.Block3_1(x)\r\n x = self.Block3_2(x)\r\n x = self.Block3_3(x)\r\n x = self.Block3_4(x)\r\n x = self.Block4_1(x)\r\n x = self.Block4_2(x)\r\n x = self.Block4_3(x)\r\n x = self.Block4_4(x)\r\n x = self.Block5_1(x)\r\n x = self.Block5_2(x)\r\n x = self.MP(x)\r\n x = x.view(x.size(0),-1)\r\n x = self.fc(x)\r\n \r\n return x", "def forward(self, x):\n sources = list()\n tcb_source = list()\n odm_loc = list()\n odm_conf = list()\n if self.phase == 'test':\n feat_sizes = list()\n\n # apply vgg up to conv4_3 relu and conv5_3 relu\n for k in range(self.conv5_3_layer):\n x = self.vgg[k](x)\n if self.size != 512 and self.size != 320 and self.conv3_3_layer - 1 == k:\n s = self.conv3_3_L2Norm(x)\n sources.append(s)\n if self.phase == 'test':\n feat_sizes.append(x.shape[2:])\n if self.conv4_3_layer - 1 == k:\n s = self.conv4_3_L2Norm(x)\n sources.append(s)\n if self.phase == 'test':\n feat_sizes.append(x.shape[2:])\n elif self.conv5_3_layer - 1 == k:\n s = self.conv5_3_L2Norm(x)\n sources.append(s)\n if self.phase == 'test':\n feat_sizes.append(x.shape[2:])\n\n # apply vgg up to fc7\n for k in range(self.conv5_3_layer, len(self.vgg)):\n x = self.vgg[k](x)\n sources.append(x)\n if self.phase == 'test':\n feat_sizes.append(x.shape[2:])\n\n # apply extra layers and cache source layer outputs\n for k in range(len(self.extras)):\n x = self.extras[k](x)\n if self.extra_1_layer - 1 == k:\n sources.append(x)\n if self.phase == 'test':\n feat_sizes.append(x.shape[2:])\n if (self.size == 640 or self.size == 5126) and self.extra_2_layer - 1 == k:\n sources.append(x)\n if self.phase == 'test':\n feat_sizes.append(x.shape[2:])\n\n # calculate TCB features\n p = None\n for k, v in enumerate(sources[::-1]):\n s = v\n for i in range(3):\n s = self.tcb0[(self.step-k)*3 + i](s)\n if k != 0:\n u = p\n u = self.tcb1[self.step-k](u)\n s += u\n for i in range(3):\n s = self.tcb2[(self.step-k)*3 + i](s)\n p = s\n tcb_source.append(s)\n tcb_source.reverse()\n\n # apply ODM to source layers\n for (x, l, c) in zip(tcb_source, self.odm_loc, self.odm_conf):\n odm_loc.append(l(x).permute(0, 2, 3, 1).contiguous())\n odm_conf.append(c(x).permute(0, 2, 3, 1).contiguous())\n odm_loc = torch.cat([o.view(o.size(0), -1) for o in odm_loc], 1)\n odm_conf = torch.cat([o.view(o.size(0), -1) for o in odm_conf], 1)\n\n if self.phase == \"test\":\n output = (\n odm_loc.view(odm_loc.size(0), -1, 4), # odm loc preds\n self.softmax(odm_conf.view(odm_conf.size(0), -1,\n self.num_classes)), # odm conf preds\n feat_sizes\n )\n else:\n output = (\n odm_loc.view(odm_loc.size(0), -1, 4),\n odm_conf.view(odm_conf.size(0), -1, self.num_classes),\n )\n return output", "def forward(self, Xo):\n N = Xo.size()[0]\n # assert Xo.size() == (N, 3, 448, 448)\n X = self.features(Xo)\n # assert X.size() == (N, 128, 112, 112)\n Xp = nn.MaxPool2d(kernel_size=4, stride=4)(X)\n # Xp = F.adaptive_avg_pool2d(X, (1, 1))\n # assert Xp.size() == (N, 128, 28, 28)\n Xp = Xp.view(-1, 128*28*28 )\n # 3 way, get attention mask\n X1 = self.fc1(Xp)\n X2 = self.fc2(Xp)\n X3 = self.fc3(Xp)\n # X1 = F.relu(self.fc1_(Xp))\n # X2 = F.relu(self.fc2_(Xp))\n # X3 = F.relu(self.fc3_(Xp))\n # X1 = self.fc1(X1)\n # X2 = self.fc2(X2)\n # X3 = self.fc3(X3)\n # multiple mask elementwisely, get 3 attention part\n X1 = X1.unsqueeze(dim=2).unsqueeze(dim=3) * X\n X2 = X2.unsqueeze(dim=2).unsqueeze(dim=3) * X\n X3 = X3.unsqueeze(dim=2).unsqueeze(dim=3) * X\n #get the graduate w.r.t input image and multiple, then X1 become N*3*448*448\n X1=self.weightByGrad(X1,Xo)\n X2=self.weightByGrad(X2,Xo)\n X3=self.weightByGrad(X3,Xo)\n # use stn to crop, size become (N,3,96,96)\n # X1 = self.stn(X1, 0)\n # X2 = self.stn(X2, 1)\n # X3 = self.stn(X3, 2)\n #3 BCNN 3 size==(N,200)\n X1=self.BCNN_N(X1,self.bcnnConv_1,self.bfc1)\n X2=self.BCNN_N(X2,self.bcnnConv_2,self.bfc2)\n X3=self.BCNN_N(X3,self.bcnnConv_3,self.bfc3)\n #sum them up, for the predict max\n res=X1+X2+X3\n\n return res", "def forward(self, data):\n b, _, _ = data.size()\n # encode process\n skip1 = data\n out1 = self.conv1(data)\n out1 = self.relu(out1)\n out1 = self.pool1(out1)\n out1 = self.dropout(out1)\n skip2 = out1\n out1 = self.conv2(out1)\n out1 = self.relu(out1)\n out1 = self.pool2(out1)\n out1 = self.dropout(out1)\n skip3 = out1\n out1 = self.conv3(out1)\n out1 = self.relu(out1)\n out1 = self.pool3(out1)\n out1 = self.dropout(out1)\n skip4 = out1\n up5 = self.aap(out1)\n # decode process\n up4 = upsample(up5, skip4.size()[-1])\n up4 = up4 + skip4\n up4 = self.blend4(up4)\n up3 = upsample(up4, skip3.size()[-1])\n up3 = up3 + skip3\n up3 = self.blend3(up3)\n up2 = upsample(up3, skip2.size()[-1])\n up2 = up2 + skip2\n up2 = self.blend2(up2)\n up1 = upsample(up2, skip1.size()[-1])\n up1 = up1 + skip1\n up1 = self.blend1(up1)\n out_dense = self.sigmoid(up1)\n out_dense = out_dense.view(b, -1)\n\n return out_dense", "def forward_train(self, *args, **kwargs):\n pass", "def forward(self, x):\n # pylint: disable=arguments-differ\n x = x.permute([0, 3, 1, 2, ])\n x = self.power(x * 0.00392156885937 + 0.0, 1.0)\n cat = self.conv1(x)\n x = self.conv2(cat)\n x = self.concat8((cat, x), 1)\n conv_final = self.conv3(x)\n conv_final_permute = conv_final.permute([0, 2, 3, 1])\n loc_pred, obj_pred, cls_pred = torch.split(conv_final_permute, [64, 16, 64], 3)\n # obj_perm = self.obj_pred(obj_perm) # sigmoid might lead to gradient losing.\n #cls_reshape = torch.reshape(cls_perm, (-1, 4))\n #cls_pred_prob = self.cls_pred_prob(cls_reshape)\n #cls_pred = torch.reshape(cls_pred_prob, cls_perm.shape)\n return obj_pred, cls_pred, loc_pred", "def forward(self, x):\n\t\t#######################################################################\n\t\t# ** START OF YOUR CODE **\n\t\t#######################################################################\n\n\t\toutput = self._layers[0].forward(x)\n\t\tfor i in range(1, len(self._layers)):\n\t\t\toutput = self._layers[i].forward(output)\n\t\treturn output\n\n\t\t#######################################################################\n\t\t# ** END OF YOUR CODE **\n\t\t#######################################################################", "def forward(self, x):\n x = x.float()\n n, c, t, v, m = x.size()\n x = x.permute(0, 4, 3, 1, 2).contiguous()\n x = x.view(n * m, v * c, t)\n x = self.data_bn(x)\n x = x.view(n, m, v, c, t)\n x = x.permute(0, 1, 3, 4, 2).contiguous()\n x = x.view(n * m, c, t, v)\n for gcn in self.agcn_networks:\n x = gcn(x)\n return x", "def forward(self, input):\n\n return self.network(input)", "def forward(self, input):\n\n return self.network(input)", "def forward(self, input):\n\n return self.network(input)", "def forward(self, batch):\n # Apply first convolution, followed by ReLU non-linearity; \n # use batch-normalization on its outputs\n batch = func.relu(self.conv1(self.conv1_normed(batch)))\n batch = func.relu(self.one1(self.one1_normed(batch)))\n \n # Apply conv2 and conv3 similarly\n batch = func.relu(self.conv2(self.conv2_normed(batch)))\n batch = func.relu(self.one2(self.one2_normed(batch)))\n batch = func.relu(self.conv3(self.conv3_normed(batch)))\n batch = func.relu(self.one3(self.one3_normed(batch)))\n \n \n # Pass the output of conv3 to the pooling layer\n batch = self.pool(batch)\n\n # Reshape the output of the conv3 to pass to fully-connected layer\n batch = batch.view(-1, self.num_flat_features(batch))\n \n # Connect the reshaped features of the pooled conv3 to fc1\n batch = func.relu(self.fc1(batch))\n \n # Connect fc1 to fc2 - this layer is slightly different than the rest (why?)\n batch = self.fc2(batch)\n\n\n # Return the class predictions\n #TODO: apply an activition function to 'batch'\n return func.sigmoid(batch)", "def forward(self, inputs):\r\n #print (len(inputs))\r\n out = self.fc1(inputs)\r\n out = self.fc2(out)\r\n self.out = out\r\n return out\r\n #raise NotImplementedError('Implement the forward method of the model')\r", "def forward(self, x):\n x = self.conv1(x)\n if self.use_bn:\n x = self.batchnorm(x)\n if self.use_dropout:\n x = self.dropout(x)\n x = self.activation(x)\n x = self.conv2(x)\n if self.use_bn:\n x = self.batchnorm(x)\n if self.use_dropout:\n x = self.dropout(x)\n x = self.activation(x) \n x = self.maxpool(x) \n return x", "def model_forward_pass(self, data):\n for key, value in data.items():\n data[key] = value.to(self.device)\n \n if self.fp16:\n with torch.cuda.amp.autocast():\n output, loss = self.model(**data)\n loss = loss / self.accumulate_grad_steps\n else:\n output, loss = self.model(**data)\n loss = loss / self.accumulate_grad_steps\n\n return output, loss", "def forward(self, data):\n b, _, _ = data.size()\n # encode process\n skip1 = data\n out1 = self.conv1(data)\n out1 = self.relu(out1)\n out1 = self.pool1(out1)\n out1 = self.dropout(out1)\n skip2 = out1\n out1 = self.conv2(out1)\n out1 = self.relu(out1)\n out1 = self.pool2(out1)\n out1 = self.dropout(out1)\n skip3 = out1\n out1 = self.conv3(out1)\n out1 = self.relu(out1)\n out1 = self.pool3(out1)\n out1 = self.dropout(out1)\n skip4 = out1\n # decode process\n up3 = upsample(skip4, skip3.size()[-1])\n up3 = up3 + skip3\n up3 = self.blend3(up3)\n up2 = upsample(up3, skip2.size()[-1])\n up2 = up2 + skip2\n up2 = self.blend2(up2)\n up1 = upsample(up2, skip1.size()[-1])\n up1 = up1 + skip1\n up1 = self.blend1(up1)\n out_dense = self.sigmoid(up1)\n out_dense = out_dense.view(b, -1)\n\n return out_dense", "def forward(self, x):\n x = self.first_deconv(x)\n x = self.first_batch_norm(x)\n x = F.leaky_relu(x)\n\n x = self.second_deconv(x)\n x = self.second_batch_norm(x)\n x = F.leaky_relu(x)\n\n x = self.third_deconv(x)\n x = self.third_batch_norm(x)\n x = F.leaky_relu(x)\n\n x = self.fourth_deconv(x)\n x = self.fourth_batch_norm(x)\n\n x = self.fifth_deconv(x)\n x = self.fifth_batch_norm(x)\n\n x = self.sixth_deconv(x)\n x = self.sixth_batch_norm(x)\n\n x = self.seventh_deconv(x)\n\n # sigmoid_out = nn.functional.sigmoid(x)\n tanh_out = nn.functional.tanh(x)\n\n out = (tanh_out + 1) * 255 / 2\n\n # print 'out.shape =', out.shape\n\n return out", "def forward(self, x):\n self.save_net()\n self.perturb_tensors()\n out = self.net.forward(x)\n return out", "def forward(self, inputs):\n #print(\"w1 shape\", self.w1.shape)\n z1 = np.dot(inputs, self.w1)\n self.a1 = sigmoid(z1)\n \n z2 = np.dot(self.a1, self.w2)\n self.a2 = sigmoid(z2)\n \n z3 = np.dot(self.a2, self.w3)\n self.y = sigmoid(z3)\n \n return self.y", "def forward(self, x):\n out_conv1 = self.conv1(x)\n out_conv2 = self.conv2(out_conv1)\n out_conv3 = self.conv3(out_conv2)\n out_conv4 = self.conv4(out_conv3)\n out_conv5 = self.conv5(out_conv4)\n out_conv6 = self.conv6(out_conv5)\n out_conv7 = self.conv7(out_conv6)\n\n out_upconv7 = self.crop_top_left(self.upconv7(out_conv7), out_conv6)\n concat7 = torch.cat((out_upconv7, out_conv6), 1)\n out_iconv7 = self.iconv7(concat7)\n\n out_upconv6 = self.crop_top_left(self.upconv6(out_iconv7), out_conv5)\n concat6 = torch.cat((out_upconv6, out_conv5), 1)\n out_iconv6 = self.iconv6(concat6)\n\n out_upconv5 = self.crop_top_left(self.upconv5(out_iconv6), out_conv4)\n concat5 = torch.cat((out_upconv5, out_conv4), 1)\n out_iconv5 = self.iconv5(concat5)\n\n out_upconv4 = self.crop_top_left(self.upconv4(out_iconv5), out_conv3)\n concat4 = torch.cat((out_upconv4, out_conv3), 1)\n out_iconv4 = self.iconv4(concat4)\n disp4 = self.alpha * self.predict_disp4(out_iconv4) + self.beta\n\n out_upconv3 = self.crop_top_left(self.upconv3(out_iconv4), out_conv2)\n disp4_up = self.crop_top_left(torch.nn.functional.interpolate(disp4,\n scale_factor=2,\n mode='bilinear',\n align_corners=False), out_conv2)\n concat3 = torch.cat((out_upconv3, out_conv2, disp4_up), 1)\n out_iconv3 = self.iconv3(concat3)\n disp3 = self.alpha * self.predict_disp3(out_iconv3) + self.beta\n\n out_upconv2 = self.crop_top_left(self.upconv2(out_iconv3), out_conv1)\n disp3_up = self.crop_top_left(torch.nn.functional.interpolate(disp3,\n scale_factor=2,\n mode='bilinear',\n align_corners=False), out_conv1)\n concat2 = torch.cat((out_upconv2, out_conv1, disp3_up), 1)\n out_iconv2 = self.iconv2(concat2)\n disp2 = self.alpha * self.predict_disp2(out_iconv2) + self.beta\n\n out_upconv1 = self.crop_top_left(self.upconv1(out_iconv2), x)\n disp2_up = self.crop_top_left(torch.nn.functional.interpolate(disp2,\n scale_factor=2,\n mode='bilinear',\n align_corners=False), x)\n concat1 = torch.cat((out_upconv1, disp2_up), 1)\n out_iconv1 = self.iconv1(concat1)\n disp1 = self.alpha * self.predict_disp1(out_iconv1) + self.beta\n\n if self.training:\n return disp1, disp2\n else:\n return disp1", "def forward(self, x):\n x = self._activation(self.fully_connected_1(x))\n x = self._activation(self.fully_connected_2(x))\n x = self.dropout(x)\n x = self._activation(self.fully_connected_3(x))\n x = self._activation(self.fully_connected_4(x))\n x = self.dropout(x)\n x = self._activation(self.fully_connected_5(x))\n return self.fully_connected_out(x)", "def forward_original(self, x):\n # Shared Encoder.\n x = self.relu(self.conv1a(x))\n x = self.relu(self.conv1b(x))\n x = self.pool(x)\n x = self.relu(self.conv2a(x))\n x = self.relu(self.conv2b(x))\n x = self.pool(x)\n x = self.relu(self.conv3a(x))\n x = self.relu(self.conv3b(x))\n x = self.pool(x)\n x = self.relu(self.conv4a(x))\n x = self.relu(self.conv4b(x))\n # Detector Head.\n cPa = self.relu(self.convPa(x))\n semi = self.convPb(cPa)\n\n # Descriptor Head.\n cDa = self.relu(self.convDa(x))\n desc = self.convDb(cDa)\n dn = torch.norm(desc, p=2, dim=1) # Compute the norm.\n desc = desc.div(torch.unsqueeze(dn, 1)) # Divide by norm to normalize.\n return semi, desc", "def setup_forward(self, W, input_data, prefix=\"\"):\n \n def loop_body(i, activations, outputcollect):\n \n if self.config['sequence_input']:\n # Cut out the correct input\n if self.config['net_input_add_onehot']:\n inp = tf.slice(input_data, (0,i), (self.config['batch_size'], 1), name=prefix+\"/inputSlice\") # <batch_size, 1>\n inp = tf.squeeze(inp, 1, name=prefix+\"/inputSqueeze\") # <batch_size>\n inp = tf.one_hot(indices=inp, depth=self.config['num_input']) # <batch_size, num_input>\n else:\n inp = tf.slice(input_data, (0,i,0), (self.config['batch_size'], 1, self.config['num_input']), name=prefix+\"/inputSlice\") # <batch_size, 1, num_input>\n inp = tf.squeeze(inp, 1, name=prefix+\"/inputSqueeze\") # <batch_size, num_input>\n else:\n inp = input_data\n inp = self.setup_print(inp, \"input data\")\n \n # Concatenate input, bias, activations\n inp = tf.concat([inp, self.bias, activations], axis=1, name=prefix+\"/stepconcat\") # <batch_size, from>\n inp = tf.expand_dims(inp, 1) # <batch_size, 1, from>\n \n # Fully connected\n # <batch_size, 1, to> <= <batch_size, 1, from> @ <batch_size, from, to>\n activations = tf.matmul(inp, W, name=prefix+\"/stepmatmul\")\n activations = tf.squeeze(activations, 1) # <batch_size, to>\n \n # Leaky ReLU\n # This allows values to blow up\n ## activations = tf.maximum(activations, activations * .3, name=prefix+\"/lrelu\")\n \n # Sigmoid\n activations = tf.sigmoid(activations) # <batch_size, to>\n \n # Store the output if we need outputs from all timesteps\n # Alternative may be: https://stackoverflow.com/questions/39157723/how-to-do-slice-assignment-in-tensorflow/43139565#43139565\n if self.config['sequence_output']:\n output = tf.slice( # -> <batch_size, output>\n activations, \n (0,0), \n (self.config['batch_size'], self.config['num_output']), \n name=prefix+\"/outputslice\"\n )\n output = tf.expand_dims(output, axis=1) # <batch_size, 1, output>\n outputcollect = tf.concat([outputcollect, output], axis=1)\n \n return tf.add(i,1), activations, outputcollect\n \n loop_out = tf.while_loop(\n cond=(lambda\n i, \n activations,\n outputcollect:\n tf.less(i, self.config['timesteps'])\n ),\n body=loop_body,\n loop_vars=[\n self.initial_i,\n self.initial_activations,\n self.initial_output\n ],\n shape_invariants=[\n self.initial_i.get_shape(),\n self.initial_activations.get_shape(),\n tf.TensorShape([self.config['batch_size'], None, self.config['num_output']])\n ],\n back_prop=False,\n # return_same_structure=True,\n name=prefix+\"/loop\"\n )\n \n # Get the output\n if self.config['sequence_output']:\n output = loop_out[2]\n # Set shape otherwise broadcasting messes this up\n output.set_shape((self.config['batch_size'], self.config['timesteps'], self.config['num_output']))\n else:\n activations = loop_out[1] # <batch_size, to>\n output = tf.slice( # -> <batch_size, output>\n activations, \n (0,0), \n (self.config['batch_size'], self.config['num_output']), \n name=prefix+\"/outputslice\"\n )\n\n if self.config['net_add_softmax']:\n # tf.nn.softmax\n output = tf.exp(output) / tf.expand_dims(tf.reduce_sum(tf.exp(output), axis=-1), axis=-1)\n \n return output", "def forward(self, x):\n # sources保存特征图,loc与conf保存所有PriorBox的位置与类别预测特征\n sources = list()\n loc = list()\n conf = list()\n\n # 对输入图像卷积到conv4_3,将特征添加到sources中\n for k in range(23):\n x = self.vgg[k](x)\n\n s = self.L2Norm(x)\n sources.append(s)\n\n # 继续卷积到conv7,将特征添加到sources中\n for k in range(23, len(self.vgg)):\n x = self.vgg[k](x)\n sources.append(x)\n\n # 继续利用额外的卷积层计算,并将特征添加到sources中\n for k, v in enumerate(self.extras):\n x = F.relu(v(x), inplace=True)\n if k % 2 == 1: # 间隔一层\n sources.append(x)\n\n # 对sources中的特征图利用类别与位置网络进行卷积计算,并保存到loc与conf中\n for (x, l, c) in zip(sources, self.loc, self.conf):\n loc.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf.append(c(x).permute(0, 2, 3, 1).contiguous())\n\n loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)\n conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)\n\n if self.phase == \"test\":\n output = self.detect(\n loc.view(loc.size(0), -1, 4), # loc preds\n self.softmax(conf.view(conf.size(0), -1, self.num_classes)), # conf preds\n self.priors.type(type(x.data)) # default boxes\n )\n else:\n # 对于训练来说,output包括了loc与conf的预测值以及PriorBox的信息\n output = (\n loc.view(loc.size(0), -1, 4),\n conf.view(conf.size(0), -1, self.num_classes),\n self.priors\n )\n return output", "def forward(self, x):\n previous_batch, current_batch = x\n previous_batch_pc, previous_batch_f = previous_batch[0], previous_batch[1]\n current_batch_pc, current_batch_f = current_batch[0], current_batch[1]\n\n f1 = previous_batch_pc[:, :, 3:]\n pc1 = previous_batch_pc[:, :, :3]\n\n f2 = current_batch_pc[:, :, 3:]\n pc2 = current_batch_pc[:, :, :3]\n\n batch_size, n_points_prev, _ = previous_batch_pc.shape\n batch_size, n_points_cur, _ = current_batch_pc.shape\n\n # All outputs of the following layers are tuples of (pos, features)\n # --- Point Feature Part ---\n pf_prev_1, pf_prev_2, pf_prev_3 = self._point_feature_net(pc1.float(), f1.float())\n pf_curr_1, pf_curr_2, pf_curr_3 = self._point_feature_net(pc2.float(), f2.float())\n\n # --- Flow Embedding / Point Mixture Part ---\n _, fe_2, fe_3 = self._point_mixture(x1=pf_prev_3, x2=pf_curr_3)\n\n # --- Flow Refinement Part ---\n x = self._flow_refinement(pf_curr_1=pf_curr_1, pf_curr_2=pf_curr_2, pf_curr_3=pf_curr_3, fe_2=fe_2, fe_3=fe_3)\n\n # --- Final fully connected layer ---\n pos, features = x\n features = features.transpose(1, 2)\n x = self._fc(features)\n return x", "def _forward(self):\n\n tf.summary.image(\"image\", tensor=tf.reshape(self.x, (self.batch_size, 28, 28, 1)), max_outputs=10)\n x = self.x\n\n # x = layers.dropout(self.x, keep_prob=0.7)\n # with tf.variable_scope(\"layer1\") as scope:\n h = tf.nn.relu(layers.fully_connected(x, num_outputs=self.input_size // 2, activation_fn=None))\n # tf.summary.histogram(\"moving_mean1\", tf.get_variable(scope + \"moving_mean\"))\n # with tf.variable_scope(\"layer2\") as scope:\n # h = tf.nn.relu(layers.fully_connected(h, num_outputs=32, activation_fn=None))\n # tf.summary.histogram(\"moving_mean2\", tf.get_variable(\"moving_mean\"))\n # with tf.variable_scope(\"layer3\") as scope:\n self.logits = layers.fully_connected(h, num_outputs=10, activation_fn=None)\n # tf.summary.histogram(\"moving_mean3\", tf.get_variable(\"moving_mean\"))\n\n self.probability = tf.nn.softmax(self.logits)\n self.prediction = tf.argmax(self.probability, axis=1)", "def forward(self, img):\n device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n H, W = img.size()[2], img.size()[3]\n #print('x',x)\n #print('x.shape',x.shape) ## 32 x 3 x 96 x 128\n z32 = self.start(img)\n z64 = self.layer1(z32) + self.layer1_ds(z32)\n #print('z1',z64.shape)\n z128 = self.layer2(z64) + self.layer2_ds(z64)\n #print('z2',z128.shape)\n z256 = self.layer3(z128) + self.layer3_ds(z128)\n #print('z3',z256.shape)\n z256d = self.drop_out_layer(z256)\n #print('z_drop',z256d.shape)\n z256u = self.layer4(z256d)\n #print('z4',z256u.shape)\n z128u = self.layer5(torch.cat((z256u, F.interpolate(z256d,size=z256u.size()[2:] )), 1))\n #print('z5',z128u.shape)\n z64u = self.layer6(torch.cat((z128u, F.interpolate(z128,size=z128u.size()[2:] )), 1))\n #print('z6',z64u.shape)\n\n z32u = self.final(torch.cat((z64u, F.interpolate(z64,size=z64u.size()[2:] )), 1))\n #print('z6_plus',z32u.shape)\n\n #print('z7_result',self.classifer(z32u)[:, :, :H, :W].shape)\n result_class = self.classifer(z32u)[:, :, :H, :W]\n\n #print('model result shape',result_class.shape)\n ## 16 x 1 x 300 x 400\n\n # using soft argmax\n spa_argmax = spatial_argmax(torch.squeeze(result_class,1))\n\n #one hot with spatial argmax\n #xy_val = torch.zeros(spa_argmax.shape).float()\n #for idx, pt in enumerate(spa_argmax):\n # x_val = (pt[0]+1.0)*63.5\n # y_val = (pt[1]+1.0)*47.5\n # # for each batch. [0...127][0...95]\n # xy_val[idx][0] = x_val\n # xy_val[idx][1] = y_val\n\n xy_val = (spa_argmax+1.0).to(device)\n #print('spa_argmax',spa_argmax)\n scaling_factor = torch.FloatTensor([[(W-1)/2,0.],[0.,(H-1)/2]]).to(device)\n #scaling_factor = torch.FloatTensor([[63.5,0.],[0.,44.5]]).to(device)\n xy_val = xy_val.mm(scaling_factor)\n\n return xy_val", "def forward(self, input):\n input, _ = input\n bs = input.shape[0]\n d1 = self.relu1(self.fc1(input))\n d2 = self.relu2(self.fc2(d1))\n d3 = self.fc3(d2)\n out = self.sigmoid(d3)\n\n out = out.view(bs, 17, 3)\n return out", "def forward_train(self, img, img_metas, **kwargs):\n labels = {}\n labels['trans_inv'] = kwargs['trans_inv']\n labels['intrinsic_param'] = kwargs['intrinsic_param']\n labels['joint_root'] = kwargs['joint_root']\n labels['depth_factor'] = kwargs['depth_factor']\n labels['target_uvd_29'] = kwargs['target_uvd_29']\n labels['target_xyz_24'] = kwargs['target_xyz_24']\n labels['target_weight_24'] = kwargs['target_weight_24']\n labels['target_weight_29'] = kwargs['target_weight_29']\n labels['target_xyz_17'] = kwargs['target_xyz_17']\n labels['target_weight_17'] = kwargs['target_weight_17']\n labels['target_theta'] = kwargs['target_theta']\n labels['target_beta'] = kwargs['target_beta']\n labels['target_smpl_weight'] = kwargs['target_smpl_weight']\n labels['target_theta_weight'] = kwargs['target_theta_weight']\n labels['target_twist'] = kwargs['target_twist']\n labels['target_twist_weight'] = kwargs['target_twist_weight']\n # flip_output = kwargs.pop('is_flipped', None)\n\n for k, _ in labels.items():\n labels[k] = labels[k].cuda()\n\n trans_inv = labels.pop('trans_inv')\n intrinsic_param = labels.pop('intrinsic_param')\n joint_root = labels.pop('joint_root')\n depth_factor = labels.pop('depth_factor')\n\n if self.backbone is not None:\n img = img.cuda().requires_grad_()\n features = self.backbone(img)\n features = features[0]\n else:\n features = img['features']\n\n if self.neck is not None:\n features = self.neck(features)\n\n predictions = self.head(features, trans_inv, intrinsic_param,\n joint_root, depth_factor, self.smpl)\n\n losses = self.compute_losses(predictions, labels)\n\n return losses", "def forward(self, x):\n out = self.pre_processing(x)\n out = self.a3(out)\n out = self.b3(out)\n out = self.maxpool(out)\n out = self.a4(out)\n out = self.b4(out)\n out = self.c4(out)\n out = self.d4(out)\n out = self.e4(out)\n out = self.maxpool(out)\n out = self.a5(out)\n out = self.b5(out)\n out = self.avgpool(out)\n out = out.view(out.size(0), -1) # reshape the output tensor\n out = self.linear(out)\n\n return out", "def forward(self, x):\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x", "def forward(self,bottom,top):\n # imgmaps = self.batch_loader.mixup_gen()\n # print(\"DataLayer forward!!\")\n trainX, trainY = self.batch_loader.batch_imgs()\n # print(\"trainX:\",trainX.shape)\n # print(\"trainY:\",trainY.shape)\n # print(\"trainY:\", trainY)\n # print(\"top[0].data.shape:\",top[0].data.shape)\n # print(\"top[1].data.shape:\", top[1].data.shape)\n top[0].data[:, ...] = trainX\n top[1].data[:, ...] = trainY\n # print(\"DataLayer forward!!\")", "def forwardpass_train(self, X):\n # hidden_1\n h1_input = np.dot(X, self.W1) + self.b1\n h1_output = functions.relu(h1_input)\n # hidden_2\n h2_input = np.dot(h1_output, self.W2) + self.b2\n h2_output = functions.relu(h2_input)\n # output\n o_input = np.dot(h2_output, self.W3) + self.b3\n final_output = functions.softmax(o_input)\n return h1_input, h1_output, h2_input, h2_output, final_output", "def forward(self, x):\n lay1 = self.linear1(x)\n lay1 = nn.functional.relu(lay1)\n\n lay2 = self.linear2(lay1)\n lay2 = nn.functional.relu(lay2)\n \n lay3_1 = self.linear3_1(lay2)\n lay3_1 = nn.functional.relu(lay3_1)\n\n ## CHECK HERE TOO!!!\n out_1 = self.linear4_1(lay3_1)\n out_1 = out_1.view(-1, ) # reshape it to a 1d-array\n \n # taken care by BCEWithLogitsLoss\n # out_1 = nn.functional.softmax(out_1, dim=0) \n \n lay3_2 = self.linear3_2(lay2)\n lay3_2 = nn.functional.relu(lay3_2)\n \n out_2 = self.linear4_2(lay3_2)\n \n return out_1, out_2", "def _forward(self, x, X, upto=None):\n if upto is not None: # cannot use 'if upto' here since it is 0-indexed\n # and layer0 is the first layer\n assert 0<=upto<=self._layer_counter\n counter = upto + 1\n else: counter = self._layer_counter\n\n y_previous, Y_previous = x, X\n # TODO: because we always need to compute F_i(X) at each layer i, this\n # is a huge overhead\n # feedforward\n for i in range(counter):\n layer = getattr(self, 'layer'+str(i))\n y, Y = layer(y_previous, Y_previous), layer(Y_previous, Y_previous)\n y_previous, Y_previous = y, Y\n\n return y", "def forward(self, x):\n\n x = F.max_pool2d(F.relu(self.batch_norm1(self.conv1(x))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm2(self.conv2(x))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm3_b(self.conv3_b(F.relu(self.batch_norm3_a(self.conv3_a(x)))))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm4_b(self.conv4_b(F.relu(self.batch_norm4_a(self.conv4_a(x)))))), 3, stride=2, padding=1)\n x = F.max_pool2d(F.relu(self.batch_norm5_b(self.conv5_b(F.relu(self.batch_norm5_a(self.conv5_a(x)))))), 3, stride=2, padding=1)\n x = self.avg_pool(x).view(-1,512)\n out = self.linear(x)\n\n return out", "def forward_once(self, x):\n output = self.cnn1(x)\n output = output.view(output.size()[0], -1)\n output = self.fc1(output)\n return output", "def train():\n if os.path.isfile(load_model):\n all_weights = np.load(load_model) \n else:\n print(\"Model file does not exist. Exiting....\")\n return\n\n print(\"Build up the network\")\n\n\n # Two different types of input\n image_input_var = T.tensor4('original_inputs')\n rotated_image_input_var = T.tensor4('rotated_image_input')\n target_var = T.ivector('targets')\n\n # Build teacher network\n cnn_model, cnn_mid_output, weight_decay_penalty = cifar10_merge.build_cnn(image_input_var)\n\n # Get the intermediate layer of the teacher network\n original_model_mid_output = lasagne.layers.get_output(cnn_mid_output, image_input_var, deterministic = True)\n\n # Get the softmax output of the teacher network.\n\n original_model_output_val = lasagne.layers.get_output(cnn_model, image_input_var, deterministic = True)\n \n # Build the student network\n \n rotated_cnn_model, rotated_model_mid, rotated_weight_penalty = \\\n cifar10_merge.build_cnn(rotated_image_input_var)\n \n # Get the softmax output of the student network. Since it need to be trained on, deterministic = False\n rotated_model_mid_output = lasagne.layers.get_output(rotated_model_mid, rotated_image_input_var, deterministic = False)\n\n # Get the model output of the studenet network.\n rotated_model_output = lasagne.layers.get_output(rotated_cnn_model, rotated_image_input_var, deterministic = True)\n\n # Set the weights for the teacher network\n lasagne.layers.set_all_param_values(cnn_model, all_weights)\n\n # Get the initialized weights below the intermediate layer\n rotated_net_weights_below_mid = lasagne.layers.get_all_param_values(rotated_model_mid)\n\n # Get the parameter of the student network that needs to be trained.\n rotated_net_training_param = lasagne.layers.get_all_params(rotated_model_mid, trainable=True)\n\n # Set the weights for the student network\n lasagne.layers.set_all_param_values(rotated_cnn_model, all_weights)\n\n lasagne.layers.set_all_param_values(rotated_model_mid,\n rotated_net_weights_below_mid)\n \n # cross_entropy_loss = lasagne.objectives.categorical_crossentropy(rotated_model_mid_output, target_var)\n\n # cross_entropy_loss_mean = cross_entropy_loss.mean()\n\n # L = T.mean(lasagne.objectives.squared_error(original_model_mid_output, rotated_model_mid_output), axis = 1)\n L = lasagne.objectives.squared_error(original_model_mid_output, rotated_model_mid_output).mean()\n # cost = T.mean(L)\n\n # cost = cross_entropy_loss_mean\n cost = L\n\n # updates = lasagne.updates.adagrad(cost, rotated_net_training_param, learning_rate=0.1)\n updates = lasagne.updates.adam(cost, rotated_net_training_param, learning_rate=0.001)\n\n # cross_entropy_loss = lasagne.objectives.categorical_crossentropy(model_output, target_var)\n\n # cross_entropy_loss_mean = cross_entropy_loss.mean()\n\n # loss = cross_entropy_loss_mean + weight_decay_penalty\n\n\n train_acc = T.mean(T.eq(T.argmax(rotated_model_output, axis = 1), target_var),\n dtype=theano.config.floatX)\n\n original_model_acc = T.mean(T.eq(T.argmax(original_model_output_val, axis = 1), target_var),\n dtype=theano.config.floatX)\n\n train_fn = theano.function(inputs = [image_input_var, rotated_image_input_var, target_var],\n outputs = [original_model_mid_output, rotated_model_mid_output, train_acc], updates = updates)\n\n # Return the accuracy for teacher network and student network, respectively\n val_fn = theano.function(inputs = [image_input_var, rotated_image_input_var, target_var],\n outputs = [original_model_acc, train_acc])\n\n if os.path.isfile(os.path.join(train_dir, 'latest_model.txt')):\n weight_file = \"\"\n with open(os.path.join(train_dir, 'latest_model.txt'), 'r') as checkpoint_file:\n weight_file = checkpoint_file.read().replace('\\n', '')\n print(\"Loading from: \", weight_file)\n model_weights = np.load(weight_file)\n lasagne.layers.set_all_param_values(rotated_cnn_model, model_weights)\n\n # Get images and labels for CIFAR-10.\n\n cifar10_data = cifar10_merge_input.load_cifar10()\n\n bkgimg = np.array([np.mean(cifar10_data.train.images[cifar10_data.train.labels==i], axis = 0) for i in range(10)])\n for epoch in xrange(max_steps):\n start_time = time.time()\n\n original_test_image, rotated_test_image, test_label = cifar10_data.test.next_eval_batch(batch_size)\n total_t_net_for_original = 0\n total_s_net_for_original = 0\n total_t_net_for_rotation = 0\n total_s_net_for_rotation = 0\n total_count = 0\n\n print(\"Start Evaluating\")\n\n while(rotated_test_image is not None):\n t_net_for_original, s_net_for_original = val_fn(original_test_image, original_test_image, test_label)\n total_t_net_for_original += t_net_for_original * original_test_image.shape[0]\n total_s_net_for_original += s_net_for_original * original_test_image.shape[0]\n\n t_net_for_rotated, s_net_for_rotated = val_fn(rotated_test_image, rotated_test_image, test_label)\n total_t_net_for_rotation += t_net_for_rotated * rotated_test_image.shape[0]\n total_s_net_for_rotation += s_net_for_rotated * rotated_test_image.shape[0]\n\n total_count += rotated_test_image.shape[0]\n original_test_image, rotated_test_image, test_label = cifar10_data.test.next_eval_batch(batch_size)\n \n print(\"Student Network Accuracy on Original Image: %.4f\" % (float(total_s_net_for_original / total_count)))\n print(\"Teacher Network Accuracy on Original Image: %.4f\" % (float(total_t_net_for_original / total_count)))\n\n print(\"Student Network Accuracy on Rotated Image: %.4f\" % (float(total_s_net_for_rotation / total_count)))\n print(\"Teacher Network Accuracy on Rotated Image: %.4f\" % (float(total_t_net_for_rotation / total_count)))\n\n\n print(\"Start Training...\")\n original_train_image, rotated_train_image, train_label, start = cifar10_data.train.next_batch(batch_size)\n original_train_image = generate(original_train_image, train_label, bkgimg, 16, 32, None, batch_size)\n # rotated_train_image = random_rotated_image(original_train_image[::-1])\n rotated_train_image = random_rotated_image(original_train_image)\n\n end_time_1 = time.time() - start_time\n step = 1\n loss_total = 0\n original_start = start\n\n while(start != 0):\n #loss_value, train_acc = train_fn(original_train_image, rotated_train_image, train_label)\n \n ori_mid, rot_mid, train_acc = train_fn(original_train_image, rotated_train_image, train_label)\n # ori_mid, rot_mid, train_acc = train_fn(original_train_image, np.array(np.random.rand(batch_size, 3, 32, 32), dtype = np.float32), train_label)\n step += 1\n if start == original_start:\n print(ori_mid[0])\n print(rot_mid[0])\n print(train_label)\n \n original_train_image, rotated_train_image, train_label, start = cifar10_data.train.next_batch(batch_size)\n original_train_image = generate(original_train_image, train_label, bkgimg, 16, 32, None, batch_size)\n rotated_train_image = random_rotated_image(original_train_image)\n # assert not np.isnan(loss_value), 'Model diverged with loss = NaN'\n # loss_total += loss_value\n if 1:\n if epoch % 100 == 0 or (step + 1) == max_steps:\n checkpoint_path = os.path.join(train_dir, 'model_step%d.npy' % epoch)\n weightsOfParams = lasagne.layers.get_all_param_values(rotated_cnn_model)\n np.save(checkpoint_path, weightsOfParams)\n latest_model_path = os.path.join(train_dir, 'latest_model.txt')\n try:\n os.remove(latest_model_path)\n except OSError:\n pass\n latest_model_file = open(latest_model_path, \"w\")\n latest_model_file.write(checkpoint_path)\n latest_model_file.close()\n\n # print(\"Epoch Stop, loss_averge\", float(loss_total) / float(step))\n duration = time.time() - start_time\n print(\"Duration is\", duration)", "def forward(self, x):\n flows_forward, flows_backward = self.get_flow(x)\n b, n, _, h, w = x.size()\n\n # backward branch\n out_l = []\n feat_prop = x.new_zeros(b, self.num_feat, h, w)\n for i in range(n - 1, -1, -1):\n x_i = x[:, i, :, :, :]\n if i < n - 1:\n flow = flows_backward[:, i, :, :, :]\n feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))\n feat_prop = torch.cat([x_i, feat_prop], dim=1)\n feat_prop = self.backward_trunk(feat_prop)\n out_l.insert(0, feat_prop)\n\n # forward branch\n feat_prop = torch.zeros_like(feat_prop)\n for i in range(0, n):\n x_i = x[:, i, :, :, :]\n if i > 0:\n flow = flows_forward[:, i - 1, :, :, :]\n feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))\n\n feat_prop = torch.cat([x_i, feat_prop], dim=1)\n feat_prop = self.forward_trunk(feat_prop)\n\n # upsample\n out = torch.cat([out_l[i], feat_prop], dim=1)\n out = self.lrelu(self.fusion(out))\n out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))\n out = self.lrelu(self.pixel_shuffle(self.upconv2(out)))\n out = self.lrelu(self.conv_hr(out))\n out = self.conv_last(out)\n base = F.interpolate(x_i, scale_factor=4, mode='bilinear', align_corners=False)\n out += base\n out_l[i] = out\n\n return torch.stack(out_l, dim=1)", "def forward(self, x):\n l0 = self.conv0(x)\n l1 = self.conv1(l0)\n l2 = self.conv2(l1)\n l3 = self.conv3(l2)\n l4 = self.conv4(l3)\n y = self.linear(l4.flatten(1))\n return y, (l1, l2, l3, l4)", "def forward(self, x):\n device = x.device\n x, sources = self._get_sources(x)\n\n # apply multibox head to source layers\n conf = []\n loc = []\n for i, (loc_fn, conf_fn) in enumerate(zip(self.loc, self.conf)):\n l = loc_fn(sources[i]).permute(0, 2, 3, 1).contiguous()\n l = l.view(x.size(0), -1, 4)\n loc.append(l)\n\n c = conf_fn(sources[i]).permute(0, 2, 3, 1).contiguous()\n c = c.view(x.size(0), -1, self.num_classes)\n conf.append(c)\n\n loc = torch.cat(loc, 1)\n conf = torch.cat(conf, 1)\n\n if not self.training:\n conf = F.softmax(conf, -1)\n output = self.detect(loc, conf, self.priors.float().to(device))\n output = self._post_process_inference(output)\n else:\n output = loc, conf, self.priors.to(device)\n\n return output", "def forward(self, x):\n if x.dim() == 1:\n x = torch.unsqueeze(x, 0)\n return self.net(x)", "def forward(self, x):\n # encode\n encode_block1 = self.conv_encode1(x)\n encode_pool1 = self.conv_maxpool1(encode_block1)\n encode_block2 = self.conv_encode2(encode_pool1)\n encode_pool2 = self.conv_maxpool2(encode_block2)\n encode_block3 = self.conv_encode3(encode_pool2)\n encode_pool3 = self.conv_maxpool3(encode_block3)\n # Bottleneck\n bottleneck1 = self.bottleneck(encode_pool3)\n # Decode\n decode_block3 = crop_and_concat(\n bottleneck1, encode_block3, crop=True)\n cat_layer2 = self.conv_decode3(decode_block3)\n decode_block2 = crop_and_concat(\n cat_layer2, encode_block2, crop=True)\n cat_layer1 = self.conv_decode2(decode_block2)\n decode_block1 = crop_and_concat(\n cat_layer1, encode_block1, crop=True)\n final_layer = self.final_layer(decode_block1)\n return final_layer", "def forward(self, x):\n\n x = self.first_conv_layer(x)\n x = self.second_conv_layer(x)\n x = self.third_conv_layer(x)\n x = self.fourth_conv_layer(x)\n x = self.fifth_conv_layer(x)\n\n '''\n x = x.view(-1, 4 * 4 * 512)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n '''\n\n sigmoid_out = nn.functional.sigmoid(x)\n\n return sigmoid_out", "def forward(self, data):\n template = data['template'].cuda()\n search = data['search'].cuda()\n label_cls = data['label_cls'].cuda()\n label_loc = data['label_loc'].cuda()\n\n # get feature\n zf = self.backbone(template)\n xf = self.backbone(search)\n\n if cfg.ADJUST.ADJUST:\n zf = self.neck(zf)\n xf = self.neck(xf)\n\n cls, loc = self.head(zf, xf)\n\n # get loss\n # cls loss with cross entropy loss\n cls_loss = self.select_cross_entropy_loss(cls, label_cls)\n\n # loc loss with iou loss\n loc_loss = self.select_iou_loss(loc, label_loc, label_cls)\n\n outputs = {\n 'total_loss':\n cfg.TRAIN.CLS_WEIGHT * cls_loss +\n cfg.TRAIN.LOC_WEIGHT * loc_loss,\n 'cls_loss': cls_loss,\n 'loc_loss': loc_loss\n }\n\n return outputs", "def forward(self, x):\n x1 = x[:, 0, :, :].reshape((-1, 1, obs_size * 2 + 1, obs_size * 2 + 1))\n x2 = x[:, 1, :, :].reshape((-1, (obs_size * 2 + 1) ** 2))\n if x2.shape[0] == 1:\n x2 = np.tile(x2, (minibatch_size, 1))\n h = F.relu(self.bn1(self.conv1(x)))\n h = F.relu(self.bn2(self.conv2(x)))\n h = F.relu(self.bn3(self.conv3(x)))\n h = self.l(h)\n return DiscreteActionValue(h)", "def forward(self, state):\n '''\n state = F.relu(self.conv1(state))\n state = F.relu(self.conv2(state))\n state = F.relu(self.conv3(state))\n state = F.relu(self.fc1(state))\n \n action = F.relu(self.fc2(state))\n \n return action\n '''\n \n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n \n return x", "def test_forward(self):\r\n # CIFAR\r\n model = densenet.DenseNet(\r\n depth=40,\r\n Block=densenet.BasicBlock,\r\n growth_rate=12,\r\n mask=True,\r\n compression_rate=1.0,\r\n num_classes=100,\r\n )\r\n model.forward(torch.randn((1, 3, 32, 32)))", "def forward(self, x):\n c_out = self.conv_net.forward(x)\n\n c_out_flat = c_out.flatten(start_dim=1)\n \n \n return self.linear.forward(c_out_flat)", "def forward(self, x: torch.Tensor) -> torch.Tensor:\r\n assert x.dim() == 4, \\\r\n \"Input should have 4 dimensions. Was {}\".format(x.dim())\r\n\r\n return self.net(x)", "def forward(self, x): \n out = self.layer1(x)\n out = self.layer2(out)\n\n out = out.reshape(out.size(0), -1)\n \n out = self.dropout(out)\n out = self.fc1(out)\n out = self.fc2(out)\n \n return out", "def forward(self, x):\n # Convolutional Layers\n ## add pooling layers\n x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))\n x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))\n x = x.view(-1, 256) # flatten to pass to fully connected layers\n\n # fully connected layers\n ## and dropout layers\n x = F.relu(self.dropout(self.fc1(x)))\n x = F.relu(self.dropout(self.fc2(x)))\n x = self.fc3(x)\n\n return x", "def singlestagemono3ddetector__forward(self, inputs: list, **kwargs):\n x = self.extract_feat(inputs)\n results = self.bbox_head.forward(x)\n return results[0], results[1]", "def forward(self, batch):\n raise NotImplementedError", "def forward(self, batch):\n raise NotImplementedError", "def forward(self, out):\n\n # 0th layer.\n index = 0\n out = self.pad_out(out, index)\n out = self.conv0(out)\n out = self.bn0(out)\n out = self.relu(out)\n\n # 1st layer.\n index = 1\n out = self.pad_out(out, index)\n out = self.conv1(out)\n out = self.bn1(out)\n out = self.relu(out)\n\n # 2nd layer.\n index = 2\n out = self.pad_out(out, index)\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n # Classification.\n # Average across the channels.\n # https://discuss.pytorch.org/t/global-average-pooling-in-pytorch/6721/4\n # In Keras it is implemented as: K.mean(inputs, axis=1). The channel is\n # the last dimension in Keras.\n out = torch.mean(out, dim=2)\n out = self.lin(out)\n\n # To imitate the cross entropy loss with the nll (negative log\n # likelihood) loss.\n out = log_softmax(out, dim=-1)\n\n return out", "def forward(self, inputs, end_points, mode=\"\"):\n batch_size = inputs['point_clouds'].shape[0]\n\n end_points = self.backbone_net1(inputs['point_clouds'], end_points)\n end_points = self.backbone_net2(inputs['point_clouds'], end_points, mode='net1')\n end_points = self.backbone_net3(inputs['point_clouds'], end_points, mode='net2')\n end_points = self.backbone_net4(inputs['point_clouds'], end_points, mode='net3')\n\n ### Extract feature here\n xyz = end_points['fp2_xyz']\n features1 = end_points['fp2_features']\n features2 = end_points['fp2_features'+'net1']\n features3 = end_points['fp2_features'+'net2']\n features4 = end_points['fp2_features'+'net3']\n end_points['seed_inds'] = end_points['fp2_inds']\n end_points['seed_xyz'] = xyz\n end_points['seed_features'] = features1\n \n ### Combine the feature here\n features_hd_discriptor = torch.cat((features1, features2, features3, features4), dim=1)\n features_hd_discriptor = F.relu(self.bn_agg1(self.conv_agg1(features_hd_discriptor)))\n features_hd_discriptor = F.relu(self.bn_agg2(self.conv_agg2(features_hd_discriptor)))\n\n end_points['hd_feature'] = features_hd_discriptor\n \n net_flag_z = F.relu(self.bn_flag_z1(self.conv_flag_z1(features_hd_discriptor)))\n net_flag_z = self.conv_flag_z2(net_flag_z)\n end_points[\"pred_flag_z\"] = net_flag_z\n\n net_flag_xy = F.relu(self.bn_flag_xy1(self.conv_flag_xy1(features_hd_discriptor)))\n net_flag_xy = self.conv_flag_xy2(net_flag_xy)\n end_points[\"pred_flag_xy\"] = net_flag_xy\n\n net_flag_line = F.relu(self.bn_flag_line1(self.conv_flag_line1(features_hd_discriptor)))\n net_flag_line = self.conv_flag_line2(net_flag_line)\n end_points[\"pred_flag_line\"] = net_flag_line\n\n proposal_xyz, proposal_features, center_offset, center_residual = self.vgen(xyz, features_hd_discriptor)\n proposal_features_norm = torch.norm(proposal_features, p=2, dim=1)\n proposal_features = proposal_features.div(proposal_features_norm.unsqueeze(1))\n end_points['vote_xyz'] = proposal_xyz\n end_points['vote_features'] = proposal_features\n \n voted_z, voted_z_feature, z_offset, z_residual = self.vgen_z(xyz, features_hd_discriptor)\n voted_z_feature_norm = torch.norm(voted_z_feature, p=2, dim=1)\n voted_z_feature = voted_z_feature.div(voted_z_feature_norm.unsqueeze(1))\n end_points['vote_z'] = voted_z\n end_points['vote_z_feature'] = voted_z_feature\n\n voted_xy, voted_xy_feature, xy_offset, xy_residual = self.vgen_xy(xyz, features_hd_discriptor)\n voted_xy_feature_norm = torch.norm(voted_xy_feature, p=2, dim=1)\n voted_xy_feature = voted_xy_feature.div(voted_xy_feature_norm.unsqueeze(1))\n end_points['vote_xy'] = voted_xy\n end_points['vote_xy_feature'] = voted_xy_feature\n\n voted_line, voted_line_feature, line_offset, line_residual = self.vgen_line(xyz, features_hd_discriptor)\n voted_line_feature_norm = torch.norm(voted_line_feature, p=2, dim=1)\n voted_line_feature = voted_line_feature.div(voted_line_feature_norm.unsqueeze(1))\n end_points['vote_line'] = voted_line\n end_points['vote_line_feature'] = voted_line_feature\n \n center_z, feature_z, end_points = self.pnet_z(voted_z, voted_z_feature, end_points, mode='_z')\n center_xy, feature_xy, end_points = self.pnet_xy(voted_xy, voted_xy_feature, end_points, mode='_xy')\n center_line, feature_line, end_points = self.pnet_line(voted_line, voted_line_feature, end_points, mode='_line')\n\n end_points = self.pnet_final(proposal_xyz, proposal_features, center_z, feature_z, center_xy, feature_xy, center_line, feature_line, end_points)\n return end_points", "def forward(self, X, training=False):\n pass", "def forward(self, x):\n n, t, c, h, w = x.size()\n assert h % 4 == 0 and w % 4 == 0, (\n 'The height and width of inputs should be a multiple of 4, '\n f'but got {h} and {w}.')\n\n x_center = x[:, self.center_frame_idx, :, :, :].contiguous()\n\n # extract LR features\n # L1\n l1_feat = self.lrelu(self.conv_first(x.view(-1, c, h, w)))\n l1_feat = self.feature_extraction(l1_feat)\n # L2\n l2_feat = self.feat_l2_conv2(self.feat_l2_conv1(l1_feat))\n # L3\n l3_feat = self.feat_l3_conv2(self.feat_l3_conv1(l2_feat))\n\n l1_feat = l1_feat.view(n, t, -1, h, w)\n l2_feat = l2_feat.view(n, t, -1, h // 2, w // 2)\n l3_feat = l3_feat.view(n, t, -1, h // 4, w // 4)\n\n # pcd alignment\n ref_feats = [ # reference feature list\n l1_feat[:, self.center_frame_idx, :, :, :].clone(),\n l2_feat[:, self.center_frame_idx, :, :, :].clone(),\n l3_feat[:, self.center_frame_idx, :, :, :].clone()\n ]\n aligned_feat = []\n for i in range(t):\n neighbor_feats = [\n l1_feat[:, i, :, :, :].clone(), l2_feat[:, i, :, :, :].clone(),\n l3_feat[:, i, :, :, :].clone()\n ]\n aligned_feat.append(self.pcd_alignment(neighbor_feats, ref_feats))\n aligned_feat = torch.stack(aligned_feat, dim=1) # (n, t, c, h, w)\n\n if self.with_tsa:\n feat = self.fusion(aligned_feat)\n else:\n aligned_feat = aligned_feat.view(n, -1, h, w)\n feat = self.fusion(aligned_feat)\n\n # reconstruction\n out = self.reconstruction(feat)\n out = self.lrelu(self.upsample1(out))\n out = self.lrelu(self.upsample2(out))\n out = self.lrelu(self.conv_hr(out))\n out = self.conv_last(out)\n base = self.img_upsample(x_center)\n out += base\n return out", "def forward(self, *args, mode=\"train\", **kwargs):\n raise NotImplementedError", "def forward(self, x):\n h = self.l1(x)\n h = h.view(x.shape[0], -1, self.bottom_width, self.bottom_width)\n h = self.block2(h)\n h = self.block3(h)\n h = self.block4(h)\n h = self.b5(h)\n h = self.activation(h)\n h = torch.tanh(self.c5(h))\n\n return h", "def forward(self, images):\n x0 = self.lrelu(self.bn0(self.conv0(images)))\n x1 = self.lrelu(self.bn1(self.conv1(x0)))\n x2 = self.lrelu(self.bn2(self.conv2(x1)))\n x3 = self.lrelu(self.bn3(self.conv3(x2)))\n x4 = self.lrelu(self.bn4(self.conv4(x3)))\n x5 = self.lrelu(self.bn5(self.conv5(x4)))\n\n # x1 = self.lrelu(self.bn1(self.conv1(x0)))\n out = x5\n return out", "def _forward(self, z):\n raise NotImplementedError(\"Forward shouldn't be called!\")", "def forwardPropagate (self, x):\n\t\tif type(x) is not list:\n\t\t\tx = [x]\n\t\tx = np.concatenate((np.array([1]),np.array(x)),axis=0) # absorb w_0\n\n\t\t# No transformation, but needed later\n\t\tfor i in range(self.inn):\n\t\t\tself.z_in[i] = x[i]\n\t\t\n\t\t# For every hidden neuron (1 hidden layer only!)\n\t\tfor j in range(self.hidden):\n\t\t\tsumIn = 0\n\t\t\tfor i in range(self.inn):\n\t\t\t\tsumIn += self.w_hd[j][i]*self.z_in[i]\n\t\t\tself.a_hd[j] = sumIn # Needed for backprop (5.56)\n\t\t\tself.z_hd[j] = self.act(sumIn)\n\n\t\t# For every output neuron\n\t\tfor k in range(self.out):\n\t\t\tsumHdn = 0\n\t\t\tfor j in range(self.hidden):\n\t\t\t\tsumHdn += self.w_out[k][j]*self.z_hd[j]\n\t\t\tself.z_out[k] = sumHdn\n\t\treturn self.z_out", "def forward(self, x):\n x = tensor(x).unsqueeze(1)\n x = self.cnn(x)\n\n # LSTM from here\n batch_size = x.shape[0]\n x = x.view(batch_size, x.shape[1] * x.shape[2], x.shape[3])\n x = x.permute(2, 0, 1) # Converting from (B,H,W)->(W,B,H)\n\n output = self.rnn(x)\n return output", "def forward(self, x):\n assert not torch.isnan(x).any(), f\"NaN in input {x}\"\n x = F.relu(self.l1(x))\n x = F.relu(self.l2(x))\n x = self.l3(x)\n return torch.clamp(x, -1, 1)", "def forward(self, x, h, u, time, feat_kernels_enc_conv, feat_bias_enc_conv, feat_kernels_enc_fc, feat_bias_enc_fc, feat_kernels_enc_3dgru, feat_bias_enc_3dgru):\n\n\n conv1a_wt,conv1b_wt,conv2a_wt,conv2b_wt,conv2c_wt,conv3a_wt,conv3b_wt,conv3c_wt,conv4a_wt,conv4b_wt,conv5a_wt,conv5b_wt,conv5c_wt,conv6a_wt,conv6b_wt = feat_kernels_enc_conv\n conv1a_bias,conv1b_bias,conv2a_bias,conv2b_bias,conv2c_bias,conv3a_bias,conv3b_bias,conv3c_bias,conv4a_bias,conv4b_bias,conv5a_bias,conv5b_bias,conv5c_bias,conv6a_bias,conv6b_bias = feat_bias_enc_conv\n t_x_s_update_fc_layer, t_x_s_update_conv3d, t_x_s_reset_fc_layer, t_x_s_reset_conv3d, t_x_rs_fc_layer, t_x_rs_conv3d = feat_kernels_enc_3dgru\n t_x_s_update_bias, t_x_s_reset_bias, t_x_rs_bias = feat_bias_enc_3dgru\n\n conv1a = F.conv2d(x, conv1a_wt, bias=conv1a_bias, padding=3) #self.conv1a(x)\n rect1a = self.leaky_relu(conv1a)\n conv1b = F.conv2d(rect1a, conv1b_wt, bias=conv1b_bias, padding=1) #self.conv1b(rect1a)\n rect1 = self.leaky_relu(conv1b)\n pool1 = self.pool(rect1)\n \n \n conv2a = F.conv2d(pool1, conv2a_wt, bias=conv2a_bias, padding=1) #self.conv2a(pool1)\n rect2a = self.leaky_relu(conv2a)\n conv2b = F.conv2d(rect2a, conv2b_wt, bias=conv2b_bias, padding=1) #self.conv2b(rect2a)\n rect2 = self.leaky_relu(conv2b)\n conv2c = F.conv2d(pool1, conv2c_wt, bias=conv2c_bias) #self.conv2c(pool1)\n res2 = conv2c + rect2\n pool2 = self.pool(res2)\n \n \n conv3a = F.conv2d(pool2, conv3a_wt, bias=conv3a_bias, padding=1) #self.conv3a(pool2)\n rect3a = self.leaky_relu(conv3a)\n conv3b = F.conv2d(rect3a, conv3b_wt, bias=conv3b_bias, padding=1) #self.conv3b(rect3a)\n rect3 = self.leaky_relu(conv3b)\n conv3c = F.conv2d(pool2, conv3c_wt, bias=conv3c_bias) #self.conv3c(pool2)\n res3 = conv3c + rect3\n pool3 = self.pool(res3)\n \n conv4a = F.conv2d(pool3, conv4a_wt, bias=conv4a_bias, padding=1) #self.conv4a(pool3)\n rect4a = self.leaky_relu(conv4a)\n conv4b = F.conv2d(rect4a, conv4b_wt, bias=conv4b_bias, padding=1) #self.conv4b(rect4a)\n rect4 = self.leaky_relu(conv4b)\n pool4 = self.pool(rect4)\n \n \n conv5a = F.conv2d(pool4, conv5a_wt, bias=conv5a_bias, padding=1) #self.conv5a(pool4)\n rect5a = self.leaky_relu(conv5a)\n conv5b = F.conv2d(rect5a, conv5b_wt, bias=conv5b_bias, padding=1) #self.conv5b(rect5a)\n rect5 = self.leaky_relu(conv5b)\n conv5c = F.conv2d(pool4, conv5c_wt, bias=conv5c_bias) #self.conv5c(pool4)\n res5 = conv5c + rect5\n pool5 = self.pool(res5)\n \n \n conv6a = F.conv2d(pool5, conv6a_wt, bias=conv6a_bias, padding=1) #self.conv6a(pool5)\n rect6a = self.leaky_relu(conv6a)\n conv6b = F.conv2d(rect6a, conv6b_wt, bias=conv6b_bias, padding=1) #self.conv6b(rect6a)\n rect6 = self.leaky_relu(conv6b)\n res6 = pool5 + rect6\n pool6 = self.pool(res6)\n \n \n pool6 = pool6.view(pool6.size(0), -1)\n \n \n fc7 = F.linear(pool6, feat_kernels_enc_fc[0], bias=feat_bias_enc_fc[0]) #self.fc7(pool6)\n rect7 = self.leaky_relu(fc7)\n \n t_x_s_update = self.t_x_s_update(rect7, h, t_x_s_update_fc_layer, t_x_s_update_conv3d, t_x_s_update_bias)\n t_x_s_reset = self.t_x_s_reset(rect7, h, t_x_s_reset_fc_layer, t_x_s_reset_conv3d, t_x_s_reset_bias)\n \n update_gate = self.sigmoid(t_x_s_update)\n complement_update_gate = 1 - update_gate\n reset_gate = self.sigmoid(t_x_s_reset)\n \n rs = reset_gate * h\n t_x_rs = self.t_x_rs(rect7, rs, t_x_rs_fc_layer, t_x_rs_conv3d, t_x_rs_bias)\n tanh_t_x_rs = self.tanh(t_x_rs)\n \n gru_out = update_gate * h + complement_update_gate * tanh_t_x_rs\n \n return gru_out, update_gate", "def forward(self, x):\n x = self.efficient_net(x)\n return x", "def forward_graph(self):\n raise NotImplementedError", "def forward(self, x): # pylint: disable=invalid-name\n x = self.layer4(self.layer3(self.layer2(self.layer1(x))))\n return x.mean((-2, -1))", "def forward(self, output, target):\n raise NotImplementedError" ]
[ "0.65314513", "0.6292418", "0.6263829", "0.6261159", "0.62541807", "0.6251553", "0.6219152", "0.62162906", "0.61927277", "0.6178997", "0.6141839", "0.6136382", "0.6127676", "0.6119838", "0.6111306", "0.61022437", "0.6099669", "0.60930973", "0.60845137", "0.60679704", "0.60615677", "0.6059268", "0.6059033", "0.6057288", "0.60497737", "0.6039589", "0.60393703", "0.6035947", "0.6032868", "0.60318774", "0.60292166", "0.60262567", "0.60207886", "0.60191494", "0.60020494", "0.5977448", "0.59735554", "0.5970075", "0.5970075", "0.5970075", "0.59677905", "0.59625787", "0.5960974", "0.59498614", "0.59477663", "0.59471023", "0.59463966", "0.59337074", "0.59253246", "0.5924014", "0.5922054", "0.59163886", "0.5914745", "0.5911501", "0.5906483", "0.5899833", "0.58966583", "0.58965474", "0.58907795", "0.58905065", "0.58866787", "0.58850026", "0.58813673", "0.5880616", "0.5872259", "0.58681285", "0.58670133", "0.5865918", "0.5861004", "0.58488494", "0.58441967", "0.5836523", "0.58314735", "0.5829589", "0.58220184", "0.5819596", "0.58182013", "0.5813271", "0.5808285", "0.58007693", "0.58000565", "0.5793291", "0.57926536", "0.57926536", "0.57917297", "0.5789312", "0.5785124", "0.5782238", "0.57700837", "0.57653457", "0.57642305", "0.57603157", "0.5759451", "0.5759258", "0.57588327", "0.57564145", "0.5756222", "0.5748082", "0.57451844", "0.5744772" ]
0.7524717
0
Set nonmaximum suppression parameters.
def set_nms(self, nms_thresh=0.45, nms_topk=400, post_nms=100): self._clear_cached_op() self.nms_thresh = nms_thresh self.nms_topk = nms_topk self.post_nms = post_nms
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_suppress_flow(self):\n self.suppressed = self.packet_count\n self.fcip.update_one({'hash': self.fcip_hash},\n {'$set': {'suppressed': self.suppressed},})", "def non_maxima_suppression(boxes, probs, classes_num, thr=0.2):\n for i, box in enumerate(boxes):\n if probs[i] == 0:\n continue\n for j in range(i+1, len(boxes)):\n if classes_num[i] == classes_num[j] and iou(box, boxes[j]) > thr:\n probs[j] = 0.0\n\n return probs", "def set_cycle_suppression(self):\n self._cyclesuppression = True\n self.suppression_used = False", "def non_heap_max(self, non_heap_max):\n\n self._non_heap_max = non_heap_max", "def NoMore():\n\n if assem.MoreParameters():\n errors.DoWarning('extrign', False)", "def NoMore():\n\n if assem.MoreParameters():\n errors.DoWarning('extrign', False)", "def box_non_maximum_suppression(data=None, overlap_thresh=_Null, topk=_Null, coord_start=_Null, score_index=_Null, id_index=_Null, force_suppress=_Null, in_format=_Null, out_format=_Null, out=None, name=None, **kwargs):\n return (0,)", "def SetLimit(self, *args):\n return _BRepAlgo.BRepAlgo_NormalProjection_SetLimit(self, *args)", "def setPTLimits(*args):\n args[0].Limit.PTLimit.pt_limit = args[1]", "def reset_uncertainties(self):\n\n # Make a new temporary ExoParameter using the original self.template\n # dictionary and copy the uncertainty values.\n blank = ExoParameter(\"fake\", attr_dict=self.template)\n self.uncertainty = blank.uncertainty\n self.uncertainty_lower = blank.uncertainty_lower\n self.uncertainty_upper = blank.uncertainty_upper", "def suppress_pd(pars):\n pars = pars.copy()\n for p in pars:\n if p.endswith(\"_pd_n\"): pars[p] = 0\n return pars", "def _build_non_max_suppressor(type):\n\n if type == model_config.SSD:\n score_threshold = config.cfg.POSTPROCESSOR.SCORE_THRESHOLD\n iou_threshold = config.cfg.POSTPROCESSOR.IOU_THRESHOLD\n max_detections_per_class = config.cfg.POSTPROCESSOR.MAX_DETECTIONS_PER_CLASS\n max_total_detections = config.cfg.POSTPROCESSOR.MAX_TOTAL_DETECTIONS\n elif type == model_config.FASTER_RCNN:\n score_threshold = config.cfg.POSTPROCESSOR.SCORE_THRESHOLD\n iou_threshold = config.cfg.POSTPROCESSOR.IOU_THRESHOLD\n max_detections_per_class = config.cfg.POSTPROCESSOR.MAX_DETECTIONS_PER_CLASS\n max_total_detections = config.cfg.POSTPROCESSOR.MAX_TOTAL_DETECTIONS\n else:\n raise ValueError('type must be ssd or faster_rcnn string')\n\n if iou_threshold < 0 or iou_threshold > 1.0:\n raise ValueError('iou_threshold not in [0, 1.0].')\n if max_detections_per_class > max_total_detections:\n raise ValueError('max_detections_per_class should be no greater than '\n 'max_total_detections.')\n\n non_max_suppressor_fn = functools.partial(\n post_processing.batch_multiclass_non_max_suppression,\n score_thresh=score_threshold,\n iou_thresh=iou_threshold,\n max_size_per_class=max_detections_per_class,\n max_total_size=max_total_detections)\n\n return non_max_suppressor_fn", "def test_warning(self):\n self.p.compute_termination_criteria = True\n self.set_parameter_and_step(\"max_iter\", True, 5, \"ignore\")", "def non_max_suppression(pred_bboxes, pred_labels, **kwargs):\n return tf.image.combined_non_max_suppression(\n pred_bboxes,\n pred_labels,\n **kwargs\n )", "def set_parameters(self, **kwargs):\n kwargs.pop('population_size', None)\n super().set_parameters(population_size=1, **kwargs)\n self.candidates = None", "def set_max_nb_instructions(nb): #py:set_max_nb_instructions\n RUR._set_max_nb_instructions_(nb)", "def set_silent(self, **kw):\n self._set_attr(silent=True, **kw)", "def non_maximum_suppression(image):\n # Find local maximas.\n neighborhood = generate_binary_structure(2,2)\n local_max = maximum_filter(image, footprint=neighborhood)==image\n local_max[image<(image.max()*0.1)] = False\n\n # Erode areas to single points.\n lbs, num = label(local_max)\n centers = center_of_mass(local_max, lbs, np.arange(num)+1)\n centers = np.stack(centers).round().astype(np.int)\n ret = np.zeros_like(image, dtype=np.bool)\n ret[centers[:,0], centers[:,1]] = True\n\n return ret", "def limit_plasma(self, n_min=1e11, n_max=1e22, T_min=0.001, T_max=100.0):\n self.ne = np.clip(self.ne, n_min, n_max)\n self.ni = np.clip(self.ni, n_min, n_max)\n self.nn = np.clip(self.nn, n_min, n_max)\n self.Te = np.clip(self.Te, T_min, T_max)\n self.Ti = np.clip(self.Ti, T_min, T_max)", "def set_params(self, maxn=None, minn=None):\n if maxn is not None:\n self._maxn = maxn\n if minn is not None:\n self._minn = minn", "def set_default_parameters(self):\n super().set_default_parameters()\n if not \"n_sub_images\" in vars(self):\n self.n_sub_images = -1 # do all-sub-images", "def suppress(self, t, w=None):\n return super(SmartCentroidPublisher, self).suppress(t, w)", "def noisePreset() :\n s.noisePreset()", "def setSeverityOverride(self, *args):\n return _libsbml.XMLErrorLog_setSeverityOverride(self, *args)", "def _set_var_ignore(self):\n self._var_ignore = [k for k in self.__dict__.keys() if k[0] != '_']", "def non_maximum_suppression(image):\n # Find local maximas.\n neighborhood = generate_binary_structure(2, 2)\n local_max = maximum_filter(image, footprint=neighborhood) == image\n local_max[image < (image.max() * 0.1)] = False\n\n # Erode areas to single points.\n lbs, num = label(local_max)\n centers = center_of_mass(local_max, lbs, np.arange(num) + 1)\n centers = np.stack(centers).round().astype(np.int)\n ret = np.zeros_like(image, dtype=np.bool)\n ret[centers[:, 0], centers[:, 1]] = True\n\n return ret", "def non_maximum_suppression(image):\n # Find local maximas.\n neighborhood = generate_binary_structure(2, 2)\n local_max = maximum_filter(image, footprint=neighborhood) == image\n local_max[image < (image.max() * 0.1)] = False\n\n # Erode areas to single points.\n lbs, num = label(local_max)\n centers = center_of_mass(local_max, lbs, np.arange(num) + 1)\n centers = np.stack(centers).round().astype(np.int)\n ret = np.zeros_like(image, dtype=np.bool)\n ret[centers[:, 0], centers[:, 1]] = True\n\n return ret", "def suppressWarningClass(clazz):\n _enabled.insert(0, (clazz, 0))", "def setSilent(self) -> None:\n ...", "def noiseoff(subarray=DEFAULT) :\n multiSubarray('noiseSource', subarray, False, False)\n multiSubarray('rfPower', subarray, True)", "def invalidate_min_max(self):\n self.max_amplitude = None\n self.min_amplitude = None\n self.max_wavenumber = None\n self.min_wavenumber = None", "def set_samples(samples):\n if samples is not None and not isinstance(samples, int):\n raise TypeError('samples must be an int or None')\n elif isinstance(samples, int) and samples < 1:\n raise ValueError('samples must be positive')\n else:\n __SETTINGS__._SAMPLES = samples", "def security_policy_num_not(self, security_policy_num_not):\n\n self._security_policy_num_not = security_policy_num_not", "def setLSLimits(*args):\n args[0].Limit.LSLimit.ls_limit = args[1]", "def suppress(self):\n pass", "def setwarnings(self, on):\n # diese Funktion macht eigentlich nichts, ist aber wegen der Kombatibilitaet vorhanden\n print(f\"setwarnings: {on}\")", "def postproc_disable(self):\n self.write(\":CALC:MATH:STATE OFF\")\n self.write(\":CALC2:LIM:STATE OFF\")\n self.write(\":CALC3:AVER:STATE OFF\")", "def ignores(self, value):\n if value is None:\n value = tuple()\n self._ignores = value", "def feedback_suppression(self, k, z, log_Mc, eta_b, z_c):\n K,Z = np.meshgrid(k,z)\n\n # Model is valid only for eta_b > 0\n if eta_b <= 0.: raise ValueError(\"eta_b must be grater than 0.\")\n\n # Stellar component\n ks = 55.\n stellar = 1. + (K/ks)**2.\n \n # Baryon suppression\n B0 = 0.105*log_Mc - 1.27\n assert B0>0., \"log_Mc must be grater than 12.096\"\n B = B0*1./(1.+(Z/z_c)**2.5)\n\n k_g = 0.7*((1.-B)**4.)*eta_b**(-1.6)\n scale_ratio = K/k_g\n\n suppression = B/(1.+scale_ratio**3.)+(1.-B)\n\n return suppression*stellar", "def _set_max_suppress_time(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..255']}), is_leaf=True, yang_name=\"max-suppress-time\", rest_name=\"max-suppress-time\", parent=self, choice=(u'ch-dampening-source', u'ca-dampening-specify-values'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='damp-max-suppress-value', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"max_suppress_time must be of a type compatible with damp-max-suppress-value\"\"\",\n 'defined-type': \"brocade-bgp:damp-max-suppress-value\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..255']}), is_leaf=True, yang_name=\"max-suppress-time\", rest_name=\"max-suppress-time\", parent=self, choice=(u'ch-dampening-source', u'ca-dampening-specify-values'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='damp-max-suppress-value', is_config=True)\"\"\",\n })\n\n self.__max_suppress_time = t\n if hasattr(self, '_set'):\n self._set()", "def parameters(self):\n ps = super().parameters()\n exclude = set(self.estimator.parameters())\n ps = (p for p in ps if not p in exclude)\n return ps", "def set_photon_counting_thres(self, mini, maxi):\n self.lib.SetPhotonCountingThreshold(ct.c_long(mini), ct.c_long(maxi))", "def set_default_parameters(self):\n super().set_default_parameters()\n self.n_threads = 4\n if not \"n_sub_images\" in vars(self):\n self.n_sub_images = -1 # do all-sub-images", "def nop_minifier(arg):\n return arg", "def non_max_suppression(prediction, conf_thres=0.5, nms_thres=0.4):\n\n # From (center x, center y, width, height) to (x1, y1, x2, y2)\n prediction[..., :4] = change_box_order(prediction[..., :4], order=\"xywh2xyxy\")\n output = [None for _ in range(len(prediction))]\n for image_i, image_pred in enumerate(prediction):\n # Filter out confidence scores below threshold\n image_pred = image_pred[image_pred[:, 4] >= conf_thres]\n # If none are remaining => process next image\n if not image_pred.size(0):\n continue\n # Object confidence times class confidence\n score = image_pred[:, 4] * image_pred[:, 5:].max(1)[0]\n # Sort by it\n image_pred = image_pred[(-score).argsort()]\n class_confs, class_preds = image_pred[:, 5:].max(1, keepdim=True)\n detections = torch.cat(\n (image_pred[:, :5], class_confs.float(), class_preds.float()), 1\n )\n # Perform non-maximum suppression\n keep_boxes = []\n while detections.size(0):\n large_overlap = (\n box_iou(detections[0, :4].unsqueeze(0), detections[:, :4], order=\"xyxy\")\n > nms_thres\n )\n label_match = detections[0, -1] == detections[:, -1]\n # Indices of boxes with lower confidence scores, large IOUs and matching labels\n invalid = large_overlap & label_match\n weights = detections[invalid, 4:5]\n # Merge overlapping bboxes by order of confidence\n detections[0, :4] = (weights * detections[invalid, :4]).sum(\n 0\n ) / weights.sum()\n keep_boxes += [detections[0]]\n detections = detections[~invalid]\n if keep_boxes:\n output[image_i] = torch.stack(keep_boxes)\n return output", "def set_as_not_feedback(self):\n self.feedback = False", "def ignores(self, value):\n value += self.__default_ignores\n tags, attributes = self._process_ignores(value)\n self.__ignores = list([tags, attributes])", "def setTCLimits(*args):\n args[0].Limit.TCLimit.tc_limit = args[1]", "def setIgnoreNumbers(self, value):\n self.setBooleanOption(1, value)", "def set_n(self, value):\n\n # set the negative register if greater than 0x80\n self.p &= ~(const.FLAG_NEGATIVE)\n self.p |= const.FLAG_NEGATIVE if value >= 0x80 else 0b0", "def setNoOfCaptures(self, noCaptures):\n self._lowLevelSetNoOfCaptures(noCaptures)", "def strict_limit(self, strict_limit):\n\n self._strict_limit = strict_limit", "def suppressMessages():\n dislin.unit(0)", "def set_proba(self):\n self.__c_elem().log_normalise()", "def suppression(y_subs, buckets, its, windows):\n\n for i in range(its):\n w0 = windows[i]\n\n for j in range(1, buckets):\n v = min(j, w0, buckets-j)\n a = np.mean(y_subs[j-v:j+v+1])\n y_subs[j] = min(a, y_subs[j])\n\n for j in range(buckets-1, 0, -1):\n v = min(j, w0, buckets-j)\n a = np.mean(y_subs[j-v:j+v+1])\n y_subs[j] = min(a, y_subs[j])\n\n return y_subs", "def __init__(self):\n super().__init__()\n self.nan_penalty = nan_penalty\n self.nan_tol = nan_tol", "def non_max_suppression(inputs, n_classes, max_output_size, iou_threshold, confidence_threshold):\n batch = tf.unstack(inputs)\n boxes_dicts = []\n\n for boxes in batch:\n boxes = tf.boolean_mask(boxes, boxes[:, 4] > confidence_threshold)\n classes = tf.argmax(boxes[:, 5:], axis=-1)\n classes = tf.expand_dims(tf.cast(classes, tf.float32), axis=-1)\n boxes = tf.concat([boxes[:, :5], classes], axis=-1)\n\n boxes_dict = dict()\n for cls in range(n_classes):\n mask = tf.equal(boxes[:, 5], cls)\n mask_shape = mask.get_shape()\n if mask_shape.ndims != 0:\n class_boxes = tf.boolean_mask(boxes, mask)\n boxes_coords, boxes_conf_scores, _ = tf.split(class_boxes, [4, 1, -1], axis=-1)\n boxes_conf_scores = tf.reshape(boxes_conf_scores, [-1])\n indices = tf.image.non_max_suppression(boxes_coords,\n boxes_conf_scores,\n max_output_size,\n iou_threshold)\n class_boxes = tf.gather(class_boxes, indices)\n boxes_dict[cls] = class_boxes[:, :5]\n\n boxes_dicts.append(boxes_dict)\n return boxes_dicts", "def reset(self):\n super().reset()\n self.sample_count = 1\n self.miss_prob = 1.0\n self.miss_std = 0.0\n self.miss_prob_sd_min = float(\"inf\")\n self.miss_prob_min = float(\"inf\")\n self.miss_sd_min = float(\"inf\")", "def set_limit(self, errors):\n self.limit = errors", "def set_max_evaluations(self,ev):\n self.max_evaluations = ev", "def exclude(self):\n\n self.eod.value = 0\n self.public.value = 0", "def __init__(self,name,maneuverability,protection):\n\t\tsuper().__init__(name)\n\t\tself.maneuverability = super().limitsValues(maneuverability)\n\t\tself.protection = super().limitsValues(protection)", "def SuppressEdgeSet(self, *args):\n return _BRepAlgo.BRepAlgo_DSAccess_SuppressEdgeSet(self, *args)", "def __init__(self, config):\n\n # controls for scope logging\n self.vars = None\n self.log = {}\n self.conf = config\n pe.set_default_val(self.conf, 'clip_by_norm', 0.3)", "def reset_parameters(self):\n self.lin.reset_parameters()\n self.att.reset_parameters()\n self.gnn_score.reset_parameters()\n if self.gnn_intra_cluster is not None:\n self.gnn_intra_cluster.reset_parameters()\n self.select.reset_parameters()", "def non_heap_used(self, non_heap_used):\n\n self._non_heap_used = non_heap_used", "def num_process_noise_parameters(self):\n return self._num_process_noise_parameters", "def fixupMaxEvents(process):\n if not hasattr(process, \"maxEvents\"):\n process.maxEvents = cms.untracked.PSet(input=cms.untracked.int32(-1))\n if not hasattr(process.maxEvents, \"input\"):\n process.maxEvents.input = cms.untracked.int32(-1)\n return", "def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, labels=()):\n nc = prediction.shape[2] - 5\n xc = prediction[..., 4] > conf_thres\n min_wh, max_wh = 2, 4096\n max_det = 300\n max_nms = 30000\n time_limit = 10.0\n redundant = True\n multi_label &= nc > 1\n merge = False\n t = time.time()\n output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]\n for xi, x in enumerate(prediction):\n x = x[xc[xi]]\n if labels and len(labels[xi]):\n l = labels[xi]\n v = torch.zeros((len(l), nc + 5), device=x.device)\n v[:, :4] = l[:, 1:5]\n v[:, 4] = 1.0\n v[range(len(l)), l[:, 0].long() + 5] = 1.0\n x = torch.cat((x, v), 0)\n if not x.shape[0]:\n continue\n x[:, 5:] *= x[:, 4:5]\n box = xywh2xyxy(x[:, :4])\n if multi_label:\n i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T\n x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)\n else:\n conf, j = x[:, 5:].max(1, keepdim=True)\n x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]\n if classes is not None:\n x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]\n n = x.shape[0]\n if not n:\n continue\n elif n > max_nms:\n x = x[x[:, 4].argsort(descending=True)[:max_nms]]\n c = x[:, 5:6] * (0 if agnostic else max_wh)\n boxes, scores = x[:, :4] + c, x[:, 4]\n i = torchvision.ops.nms(boxes, scores, iou_thres)\n if i.shape[0] > max_det:\n i = i[:max_det]\n if merge and 1 < n < 3000.0:\n iou = box_iou(boxes[i], boxes) > iou_thres\n weights = iou * scores[None]\n x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True)\n if redundant:\n i = i[iou.sum(1) > 1]\n output[xi] = x[i]\n if time.time() - t > time_limit:\n None\n break\n return output", "def reject_test(self):\n self.__genes_test = None\n self.__fitness_test = None", "def SetAntLimit(cls, value=0):\n cls.antLimit = value", "def onSkipSegLimit(self):\r\n profprint()\r\n #research\r\n logic = self.logic\r\n logic.placeAxialLimitMarker(assign=False)", "def __init__(self, nuCtrl, nuNoise, limL=None, limU=None):\n \n if __debug__:\n assert nuCtrl >=0\n assert nuNoise >= 0\n \n limLUisNone = (limL is None, limU is None)\n \n super(boxInputCstrNoised, self).__init__(nuCtrl+nuNoise, limL, limU)\n \n if limLUisNone[0]:\n self.limL[nuCtrl:,0] *= -1.\n self.thislimL = self.limL\n if limLUisNone[1]:\n self.limU[nuCtrl:,0] *= -1.\n self.thislimU = self.limU", "def _reset_parameters(self):\r\n\t\tfor p in self.parameters():\r\n\t\t\tif p.dim() > 1:\r\n\t\t\t\txavier_uniform_(p)", "def set_max_nb_robots(nb): #py:set_max_nb_robots\n RUR._set_max_nb_robots_(nb)", "def unconstrain_positive(self):\n self.unconstrain(Logexp())", "def disable(self):\n\n super().disable()\n self._slo_image_size.disable()\n self._slo_neural_network.disable()\n self._slo_number_of_epochs.disable()\n self._slo_examples_per_batch.disable()", "def set_limit(self, limit):\n self.limit = limit\n self._prune()", "def set_verbosity(self,verbosity):\n type_name = type(verbosity).__name__\n if re.search('int',type_name) != None:\n \n # It is an integer, tes bounds\n if verbosity < 4 and verbosity > -1:\n self.verbosity = verbosity\n else:\n raise KINSOL_Exception(\"The variable sent to 'set_verbosity' must be either 0, 1, 2 or 3.\")\n else:\n raise KINSOL_Exception(\"The variable sent to 'set_verbosity' must be an integer.\")", "def libc_prctl_pr_set_no_new_privs(i: int) -> None:\n _call_c_style(libc, \"prctl\", PR_SET_NO_NEW_PRIVS, i, 0, 0, 0)", "def warning(self, *args, **kwargs):", "def setInfinity(*args, attribute: Union[AnyStr, List[AnyStr]]=\"\", controlPoints: bool=False,\n hierarchy: AnyStr=\"\", postInfinite: Union[AnyStr, bool]=\"\", preInfinite:\n Union[AnyStr, bool]=\"\", shape: bool=True, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[None, Any]:\n pass", "def setUncertainty(self, uncertainty):\n self.uncertainty = uncertainty", "def setDiscardFlags(self, flags):\r\n self.__data.discardFlags = flags", "def _suppress(self, key):\n return key in self.SUPPRESS", "def __init__(self, min_cut=0.1, max_cut=0.9):\n self._min_cut = min_cut\n self._max_cut = max_cut\n self._stopwords = set(stopwords.words('english') + list(punctuation))", "def no_of_dofs_unconstrained(self, new_no_of_dofs_unconstrained):\n self._no_of_dofs_unconstrained = new_no_of_dofs_unconstrained\n self._update_flag = True", "def __init__(self, min_cut=0.1, max_cut=0.9):\n self._min_cut = min_cut\n self._max_cut = max_cut\n self._stopwords = set(stopwords.words('english') + list(punctuation))", "def removeExplicitThisParameters(self, monitor: ghidra.util.task.TaskMonitor) -> None:\n ...", "def removeAssignmentNotifiers():\n\n assignmentNotifiers(Implementation.unregisterNotify)", "def get_nuisance_parameters(self):\n pass", "def set_noptr_lvar(self, *args):\n return _ida_hexrays.vdui_t_set_noptr_lvar(self, *args)", "def non_max_suppression(bboxes, iou_threshold, threshold, box_format=\"corners\"):\n\n # 49 x 6 \n assert type(bboxes) == list\n # print(bboxes)\n bboxes = [box for box in bboxes if box[1] > threshold]\n bboxes = sorted(bboxes, key=lambda x: x[1], reverse=True)\n bboxes_after_nms = []\n # print(bboxes)\n while bboxes:\n chosen_box = bboxes.pop(0)\n bbox_temp = bboxes.copy()\n bboxes = []\n for box in bbox_temp: # not the same class or not overlap a lot \n if box[0] != chosen_box[0] or intersection_over_union(torch.tensor(chosen_box[2:]),torch.tensor(box[2:]), box_format=box_format,) < iou_threshold:\n bboxes.append(box)\n\n bboxes_after_nms.append(chosen_box)\n # print(\"NMS: \" + str(len(bboxes_after_nms)))\n return bboxes_after_nms", "def unsetSeverityOverride(self):\n return _libsbml.XMLErrorLog_unsetSeverityOverride(self)", "def set_warnings(numpy_level='ignore', astropy_level='ignore'):\n from astropy.utils.exceptions import AstropyWarning\n \n np.seterr(all=numpy_level)\n warnings.simplefilter(astropy_level, category=AstropyWarning)", "def remove_pruned_supersets(supersets, max_non_deps):\n for n in supersets[:]:\n if max_non_deps.contains_subset(n.attrs):\n supersets.remove(n)", "def __init__(self, ApparentPowerLimits=None, *args, **kw_args):\n self._ApparentPowerLimits = []\n self.ApparentPowerLimits = [] if ApparentPowerLimits is None else ApparentPowerLimits\n\n super(ApparentPowerLimitSet, self).__init__(*args, **kw_args)", "def disable_default_cuts(gmodel):\n gmodel.setParam('PreCrush', 1)\n gmodel.setParam(GRB.Param.CoverCuts,0)\n gmodel.setParam(GRB.Param.CliqueCuts,0)\n gmodel.setParam(GRB.Param.FlowCoverCuts,0)\n gmodel.setParam(GRB.Param.FlowPathCuts,0)\n gmodel.setParam(GRB.Param.GUBCoverCuts,0)\n gmodel.setParam(GRB.Param.ImpliedCuts,0)\n gmodel.setParam(GRB.Param.InfProofCuts,0)\n gmodel.setParam(GRB.Param.MIPSepCuts,0)\n gmodel.setParam(GRB.Param.MIRCuts,0)\n gmodel.setParam(GRB.Param.ModKCuts,0)\n gmodel.setParam(GRB.Param.NetworkCuts,0)\n gmodel.setParam(GRB.Param.ProjImpliedCuts,0)\n gmodel.setParam(GRB.Param.StrongCGCuts,0)\n gmodel.setParam(GRB.Param.SubMIPCuts,0)\n gmodel.setParam(GRB.Param.ZeroHalfCuts,0)\n gmodel.setParam(GRB.Param.GomoryPasses,0)", "def restoreTrackThreshold(ants=0, subarray=DEFAULT) :\n subNo = subarrayNo\n if subarray == SCI2: subNo = 2\n toleranceMpName = \"Control.Subarray%d.trackTolerance\"%subNo\n tolerance = queryDouble(toleranceMpName, 24) # 24 retries (12 seconds)\n trackThreshold(tolerance, ants, subarray=subarray)", "def set_verbosity(self, value):\n for source in self._sources.itervalues():\n source.verbosity = value" ]
[ "0.5966529", "0.55709445", "0.55309284", "0.5446378", "0.5345884", "0.5345884", "0.5295788", "0.52417874", "0.5239064", "0.52170867", "0.52112365", "0.520788", "0.5186509", "0.51748765", "0.51727885", "0.5166071", "0.510891", "0.50601727", "0.5058307", "0.50549585", "0.50475556", "0.5040859", "0.5023763", "0.5018043", "0.49946138", "0.4983832", "0.4983832", "0.4980431", "0.49724668", "0.4964485", "0.49499208", "0.49489352", "0.49432415", "0.49391678", "0.49350682", "0.49234664", "0.49213412", "0.49031848", "0.49029294", "0.48980796", "0.48931912", "0.4890455", "0.4882905", "0.48784843", "0.48776302", "0.4876679", "0.4872908", "0.4870488", "0.4866956", "0.4861221", "0.48564845", "0.48560917", "0.4856047", "0.48520112", "0.48478577", "0.4838042", "0.48366356", "0.48313698", "0.48297757", "0.48212796", "0.48130813", "0.48085675", "0.4806926", "0.47959298", "0.47950804", "0.4790001", "0.4782079", "0.4777511", "0.47758391", "0.47731954", "0.47712365", "0.47661027", "0.47633204", "0.47606152", "0.47549123", "0.4751891", "0.4751265", "0.47330964", "0.47310746", "0.4730465", "0.4729017", "0.47224006", "0.4722222", "0.47185197", "0.47151747", "0.47092763", "0.47075325", "0.4705407", "0.47034755", "0.47027105", "0.47026372", "0.4700761", "0.46997932", "0.46909195", "0.46861216", "0.4686", "0.46828827", "0.46813586", "0.46809098", "0.46795633" ]
0.59055877
1
Reset class categories and class predictors.
def reset_class(self, classes): self._clear_cached_op() self._classes = classes if self._pos_iou_thresh >= 1: self._target_generator = YOLOV3TargetMerger(len(classes), self._ignore_iou_thresh) for outputs in self.yolo_outputs: outputs.reset_class(classes)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset(self):\n self.pred_classes.clear()\n self.gold_classes.clear()\n self.pred_probas.clear()\n self.gold_probas.clear()\n self.loss = 0\n self.nb_batches = 0\n self.prec_rec_f1 = None\n self.acc = None\n self.mcc = None", "def reset(self):\n logging.info(\"Resetting DINTModel.\")\n if self.classifier:\n self.server.remove_model(self.classifier)\n # for ds in self.server.datasets:\n # self.server.remove_dataset(ds)\n # TODO: remove datasets?\n self.classifier = None", "def _reset():\n global g_list_of_classifier\n global g_state\n\n g_state = False\n g_list_of_classifier = disco_classifiers([])", "def reset(self):\n self.epochs = 0\n self.num_classes = 2 # Minimum of 2 classes\n self._random_state = check_random_state(self.random_state)\n if self.base_estimators:\n self.experts = [\n self.WeightedExpert(\n cp.deepcopy(be), 1, self.labels)\n for be in self.base_estimators\n ]\n else:\n self.experts = [\n self._construct_new_expert()\n ]", "def reset(self):\n # must NOT reset color map here, otherwise we loose provided configs by user,\n # which are more important in this case for result images vs whatever the model task specified\n self.class_names = None\n self._map = None", "def _untrain(self):\n if self.__clf:\n self.__clf._untrain()", "def reset(self):\n self.train_loss.reset_states()\n self.train_accuracy.reset_states()\n self.val_loss.reset_states()\n self.val_accuracy.reset_states()\n self.train_mIoU.reset_states()\n self.val_mIoU.reset_states()", "def reset(self):\n self.edges = None\n self.chi = None\n self.k = None\n self.n_bins = None\n self.classes = None\n self.n_params = None", "def reset(self):\n self.pred = None\n self.target = None", "def reset(self):\n self.pred = None\n self.target = None", "def _reset(self) -> None:\n self.images = []\n self.activations = []\n self.labels = []\n self.preds = []\n self.n_found = 0", "def _untrain(self):\n if not self.trained:\n return\n for clf in self.clfs:\n clf.untrain()\n super(BoostedClassifier, self)._untrain()", "def _reset(self):\n self._set(\"_n_init_features\", None)\n self._set(\"_n_output_features\", None)\n self._set(\"_n_intervals\", None)\n self._set(\"_mapper\", {})\n self._set(\"_cpp_preprocessor\", None)\n self._set(\"_fitted\", False)", "def reset(self):\n self._coco_gt = COCO()\n # Create an empty detection array with 7 columns:\n # (image_id, xmin, ymin, width, height, score, class)\n self._detections = np.empty(shape=(0, 7))\n self._images = set()", "def finalize_class_set(self) -> None:\n logger.info(\"We have {} distinct classes, let's cluster it!\", len(self.classes))\n\n logger.debug(\"Created a cluster instance {} and this will cluster {} samples\", self.cluster, self.classes)\n try:\n assigned_clusters = self.cluster.cluster(vectors=[self.convert_str_list_to_vector(c) for c in self.classes],\n assign_clusters=True, trace=not execute_on_ssh_compute)\n except Exception:\n logger.exception(\"Failed to cluster the actual class set ({} samples)\", len(self.classes))\n return\n\n self.classes_to_one_hot_encode_dict.clear()\n for i in range(len(self.classes)):\n self.classes_to_one_hot_encode_dict[self.classes[i]] = assigned_clusters[i]", "def reset(self):\n self.epochs = 0\n # Shuffle the training data\n perm = np.arange(self.num_train)\n np.random.shuffle(perm)\n assert self.num_train == self.train_images.shape[\n 0], 'Error incorrect shuffling mask'\n self.train_images = self.train_images[perm]\n self.train_labels = self.train_labels[perm]\n self.curr_train_index = 0", "def delete_classification_head(self) -> None:\n del self.model.classifier", "def reset_train_results(self):\n self.train_loss_results = {}\n self.train_accuracy_results = {}\n self.train_pred_results = {}", "def reset(self) -> None:\n self.precision.reset()\n self.recall.reset()", "def reset(self):\n\n self.scaler = None\n self.isFitted = False\n self.__create_scaler()", "def _untrain(self):\n # untrain the mapper\n if self.__mapper is not None:\n self.__mapper.untrain()\n # let base class untrain as well\n super(MappedClassifier, self)._untrain()", "def _reset(self):\n self._model._reset()\n super(RDPAnalyzer, self)._reset()", "def _reset(self):\n [delattr(self, attr) for attr in ('_XtX', '_XtY', 'coef_', 'intercept_', 'is_fitted_') if hasattr(self, attr)]", "def test_reset_training_set():\n classifier = classifier_module.Classifier(None)\n classifier.reset_training_set(117, \"a\")\n assert classifier.training_set == []\n assert classifier.training_size == 0\n assert classifier.ultimate_training_size == 117", "def reset_train(self):\n\n self.model.apply(self._reset_weights)\n self.epoch_loss.reset()\n self.epoch = 0\n del self.batch_process\n self.batch_process = None", "def reset_variables(self) -> None:\n self.attributs = {}\n self.data = []", "def reset(self):\n self._pkgs.clear()\n self._catalogs.clear()\n self._categories.clear()\n self._command_to_category.clear()\n self._version = None", "def reset(self):\n\n self.rotation = 0\n self.iteration = 0\n self.predictions = []\n self.prediction = 0\n self.current_position = 0\n self.rotation_list = [0]\n self.prediction = 0\n self.initial_adjust = False", "def _reset(self):\n\n # Checking one attribute is enough, because they are all set together\n # in partial_fit\n if hasattr(self, 'scale_'):\n del self.scale_\n del self.n_samples_seen_\n del self.mean_\n del self.var_", "def _post_transform(self):\n # Reclassify strategy post __init__, if needed.\n for (reclassifier, args, kwargs) in self._reclassifiers:\n self.classifier = reclassifier(self.classifier, *args, **kwargs)", "def reset(self):\n # Clear mutable data, but leave the immutables intact\n self.train_data = {}\n self.val_data = {}\n self.test_data = {}\n self.model_files = []\n self.custom_data = {}\n # Remove all the physical assets\n for item in os.scandir(self.root_path):\n os.remove(item.path)\n # Reserialize\n self.serialize()", "def reset_state(self):\n self.intersection_per_class.assign(\n tf.zeros_like(self.intersection_per_class)\n )\n self.union_per_class.assign(tf.zeros_like(self.union_per_class))", "def reset(self):\n self.dynamic_predictions = {}\n self.position = 0\n self.references = []", "def reset_states(self):\n self.model.reset_states()", "def reset(self):\n self.reset_image_estimate()\n self.init_m_aux()\n self.reset_hessian_and_bias()\n self.reset_adadelta_variables()", "def reset(self):\n self.mode = 0\n self.graphs = [[], [], []]\n self.coefficients = []\n self.sample = []", "def _reset(self):\n\n # Checking one attribute is enough, becase they are all set together\n # in partial_fit\n if hasattr(self, 'scale_'):\n del self.scale_\n del self.mean_\n del self.var_", "def reset(self, custom_dataset=None):\n self.dataset = (\n self.test_dataset if custom_dataset is None\n else custom_dataset\n )\n self._set_dataset_classes(self.dataset)", "def reset(self, dataset):\n assert dataset, 'Groundtruth should not be empty.'\n assert isinstance(dataset,\n dict), 'annotation file format {} not supported'.format(\n type(dataset))\n self.anns, self.cats, self.imgs = dict(), dict(), dict()\n self.dataset = copy.deepcopy(dataset)\n self.createIndex()", "def reset_attributes(self):\n\n self.ell = None\n self.ell_jacobian = None\n self.ell_hessian = None\n\n self.ell_hyperparam = None\n self.ell_jacobian_hyperparam = None\n self.ell_hessian_hyperparam = None\n\n self.Y = None\n self.Cinv = None\n self.C = None\n self.Mz = None\n self.MMz = None\n self.sigma2 = None\n self.sigma02 = None\n self.Kninv = None\n self.KnpKninv = None\n\n self.Y_C_Mz_hyperparam = None\n self.sigma_hyperparam = None\n self.MMz_hyperparam = None\n self.Kninv_KnpKninv_hyperparam = None", "def reset(self):\n # FIXME: this state does not make sense\n self.reset_creation_info()\n self.reset_document()\n self.reset_package()\n self.reset_file_stat()\n self.reset_reviews()\n self.reset_annotations()\n self.reset_extr_lics()", "def reset(self):\n\t\tself.pos = self.start\n\n\t\tself.weighted_n_left = 0.0\n\t\tself.weighted_n_right = self.weighted_n_node_samples\n\n\t\tself.label_count_left \t= np.zeros(self.n_classes)\n\t\tself.label_count_right \t= np.copy(self.label_count_total)", "def reset_training_data(self):\n logger.info(\"resetting training data\")\n if self.shuffle:\n random.shuffle(self.tweets)\n self.batch_generator = self.get_batch()", "def reset(self):\n self.acc_loss = 0\n self.norm_term = 0", "def reset(self):\n\n def reset_function(module):\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n m.reset_parameters()\n\n self.apply(reset_function)", "def reset(self):\n self._setupObjects()", "def reset_metrics(self):\n self.metrics['loss'] = 0.0\n self.metrics['num_tokens'] = 0\n self.metrics['correct_tokens'] = 0\n self.metrics['correct_pred'] = 0\n self.metrics['pred_count'] = 0", "def reset(self):\n self.score = None\n self.true = None\n self.meta = None", "def reset(self):\n self.dims.clear()\n self.xlabels.clear()\n self.annotators.clear()\n self._figTitle = None\n self.tbmTitle = None\n self._isSubplot = False\n self._universal_xlabel = False\n self._plotter = None\n self.Nsp = 0", "def reset_state(self):\n for name in self.metrics:\n self.metrics[name].reset_state()", "def reset(self):\n # FIXME: this state does not make sense\n self.reset_creation_info()\n self.reset_document()\n self.reset_package()\n self.reset_file_stat()\n self.reset_reviews()\n self.reset_annotations()\n self.reset_extr_lics()\n self.reset_snippet()", "def reset(self):\n self.satisfiability = Satisfiability.UNTESTED\n self.model = None\n self.unsatCore = []", "def reset(cls):\n cls._options = None\n cls._scoped_instances = {}", "def reset(self):\n self.best_model = None\n self.best_res = -1", "def reset(self):\n self.__sets = []\n self._computed = False", "def reset(self):\n for layer in self.network:\n layer.clean()", "def reset(self):\n self.current_exposure = None\n self.scores = {}", "def _reset(self):\n\n # Checking one attribute is enough, becase they are all set together\n # in partial_fit\n if hasattr(self, 'scale_'):\n del self.scale_", "def _reset(self):\n\n # Checking one attribute is enough, becase they are all set together\n # in partial_fit\n if hasattr(self, 'scale_'):\n del self.scale_", "def test_reset_predictions():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run(\"MNB\")\n print(atom.mnb.score_test)\n atom.mnb.reset_predictions()\n assert atom.mnb._pred_attrs[9] is None", "def reset_model(self):\n raise NotImplementedError", "def reset_model(self):\n raise NotImplementedError", "def reset_model(self):\n raise NotImplementedError", "def clear_all(cls):\n del cls.text_labels[:]", "def reset(self):\n self._weights.clear()", "def load_classes(self):\n\t\t\t# Load class names (name -> label).\n\t\t\tcategories = self.coco.loadCats(self.coco.getCatIds())\n\t\t\tcategories.sort(key=lambda x: x['id'])\n\n\t\t\tself.classes = {}\n\t\t\tself.coco_labels = {}\n\t\t\tself.coco_labels_inverse = {}\n\t\t\tfor c in categories:\n\t\t\t\tself.coco_labels[len(self.classes)] = c['id']\n\t\t\t\tself.coco_labels_inverse[c['id']] = len(self.classes)\n\t\t\t\tself.classes[c['name']] = len(self.classes)\n\n\t\t\t# Also load the reverse (label -> name).\n\t\t\tself.labels = {}\n\t\t\tfor key, value in self.classes.items():\n\t\t\t\tself.labels[value] = key", "def reset(self):\n for provider in self.providers.values():\n provider.reset()\n\n for observation in self.observations.values():\n observation.reset()", "def reset_scorer(self):\n logger.info('Resetting the scorer')\n self.scorer = get_eidos_bayesian_scorer()\n for corpus_id, corpus in self.corpora.items():\n corpus.curations = {}", "def reset(self):\r\n err = self._cfuncs['ka_reset'](self._core._get_ka())\r\n self._core._handle_error(err)", "def tearDownClass(cls):\n os.removedirs(cls.test_dir)\n del cls.checkpoint\n del cls.dataset\n del cls.experiment\n del cls.test_dir\n del cls.tokenizer_parameters\n gc.collect()", "def reset(cls):\r\n cls._ROOTS_BY_TYPE = {}\r\n cls._TYPES_BY_ROOT = {}\r\n cls._SEARCHED = set()", "def _clear_state(self):\n if hasattr(self, 'estimators_'):\n self.estimators_ = np.empty((0, 0), dtype=np.object)\n if hasattr(self, 'train_score_'):\n del self.train_score_\n if hasattr(self, 'oob_improvement_'):\n del self.oob_improvement_\n if hasattr(self, 'init_'):\n del self.init_", "def reset(self):\n self._clusters = {}\n self._clusters_val = {}\n self._centroids = {}\n self.store()", "def reset(self):\n\t\tself.graph = OrderedDict()\n\t\tself.bottoms = OrderedDict()\n\t\tself.output_shape = OrderedDict()\n\t\tself.cur_tensor = None\n\t\tself.cur_id = None\n\t\tself.tmp_list = []\n\t\tself.log_init()", "def reset(self):\n weight = self.module.weight.data\n self.sensitivity_in = torch.zeros(weight.shape[1]).to(weight.device)\n self._features = torch.Tensor()\n self._current_batch = 1", "def finalize(self):\n self.classifier.finalize()", "def apply_classifier(self):\n for detected_object in self.detected_objects:\n detected_object.predict_class(self.original_image)", "def reset(self):\n for i in range(0, len(self.current_state)):\n self.current_state[i] = 0\n\n for i in range(0, len(self.weights)):\n self.weights[i] = 0", "def reset_transform(self):\n self._impl.reset_transform()", "def reset(self):\n self.observation = None\n self.history.clear()\n for i in range(len(self.answers)):\n self.answers[i] = None\n self.reset_metrics()", "def resetParams(self):\n self.prediction = cons.init_pred # Classifier payoff - initialized to a constant initial payoff value\n self.error = cons.init_err # Classifier error - initialized to a constant initial error value\n self.fitness = cons.init_fit # Classifier fitness - initialized to a constant initial fitness value", "def reset(self):\n inv_perm = np.argsort(self._current_order)\n self._current_order = self._current_order[inv_perm]\n self.inputs = self.inputs[inv_perm]\n self.targets = self.targets[inv_perm]\n self.target_ids = self.target_ids[inv_perm]\n self.new_epoch()", "def shutdown_training(self):\n\n self._train_data_set = None\n self._test_data_set = None", "def predict_category(self):\n pass", "def reset(self) -> None:\n self.best = self.mode_worse\n self.cooldown_counter = 0\n self.num_bad_epochs = 0", "def reset(self, model):\n self.reset_strategy(model)", "def reset(self):\n self.c_count = 0\n self.a_count = -1\n self.epsilon = self.init_epsilon", "def reset(self):\n self.total_pulls = 0\n self.total_score = 0\n self.npulls = np.zeros(self.k)\n self.score = np.zeros(self.k)", "def reset_all(self) -> None:\n for metric in self:\n metric.reset()", "def reset(self):\n self.clean_cache_upstream()\n self.set_mode_train()\n for step_obj in self.all_upstream_steps.values():\n step_obj.is_fittable = DEFAULT_TRAINING_SETUP['is_fittable']\n step_obj.force_fitting = DEFAULT_TRAINING_SETUP['force_fitting']\n step_obj.persist_output = DEFAULT_TRAINING_SETUP['persist_output']\n step_obj.cache_output = DEFAULT_TRAINING_SETUP['cache_output']\n step_obj.load_persisted_output = DEFAULT_TRAINING_SETUP['load_persisted_output']\n logger.info('Step {}, reset all upstream Steps to default training parameters, '\n 'including this Step'.format(self.name))\n return self", "def reset(self):\n self.loss = []\n self.funcargs = []\n self.nSteps = 0 \n self.converged = False", "def reset(self):\n self._unset_defaults_and_overrides()\n self.clear()", "def reset(self, runs):\n\n self.answer_wrong = 0\n self.answer_right = 0\n self.train_new(runs)", "def reset(self):\n inv_perm = np.argsort(self._current_order)\n self._current_order = self._current_order[inv_perm]\n self.inputs = self.inputs[inv_perm]\n self.targets = self.targets[inv_perm]\n self.new_epoch()", "def reset(self):\n self.data = {}\n self.pf.reset()\n\n self.tc.reset()\n # Reset the neuron grid\n (self.n_n, XE, YE, IE, _, _) = self.init_pix_rf_centers(\n self.l_n, self.l_i, self.ds, self.de, mode=self.neuron_layout,\n drop_prob=self.drop_prob\n )\n self.tc.t_XE.set_value(XE)\n self.tc.t_YE.set_value(YE)\n self.tc.t_IE.set_value(IE)\n self.pf = self.init_particle_filter(self.motion_prior, self.n_p)", "def reset(self):\n self.data = self._defaults", "def reset_step(self):\n # reset all levels\n for l in self.levels:\n l.reset_level()", "def full_clear(self):\n self.clear()\n self.class_hooks.clear()", "def reset(self):\n self.reset_traits(('grow_hair', 'n_scaling_params', 'scale_x',\n 'scale_y', 'scale_z', 'rot_x', 'rot_y', 'rot_z',\n 'trans_x', 'trans_y', 'trans_z'))", "def reset_parameters(self) -> None:\n \n self.classifier.apply(xavier)\n if len(self.old_cols) > 0:\n self.adaptor1.apply(xavier)\n self.adaptor2.apply(xavier)" ]
[ "0.72172534", "0.6899433", "0.6748963", "0.6726632", "0.66773576", "0.6629043", "0.6529936", "0.6484326", "0.6478405", "0.6478405", "0.6471619", "0.6466328", "0.6457058", "0.62709737", "0.6202787", "0.61770207", "0.6149169", "0.612528", "0.60959744", "0.6091237", "0.6087531", "0.60872006", "0.6081047", "0.6066839", "0.60223013", "0.6012965", "0.6002564", "0.59998363", "0.59868664", "0.5971098", "0.5958256", "0.5952775", "0.594279", "0.5918428", "0.59096086", "0.5900852", "0.58881825", "0.5887112", "0.5883134", "0.58729875", "0.5870646", "0.58677757", "0.58662874", "0.5846744", "0.5833687", "0.57987237", "0.5793188", "0.5791179", "0.5765317", "0.5741407", "0.57341814", "0.5720904", "0.5706785", "0.5705799", "0.57011515", "0.5695678", "0.569285", "0.5689148", "0.5689148", "0.56810266", "0.5667778", "0.5667778", "0.5667778", "0.56664187", "0.5660187", "0.56423366", "0.56423163", "0.56394273", "0.5638186", "0.5629467", "0.56273055", "0.5625295", "0.5611326", "0.56112677", "0.56112003", "0.5595357", "0.5577755", "0.5574758", "0.556521", "0.5562065", "0.5554791", "0.5549733", "0.55386674", "0.55382806", "0.55375427", "0.55356026", "0.5535315", "0.5530717", "0.5524059", "0.55220574", "0.55189806", "0.5518207", "0.55178887", "0.55174625", "0.55083424", "0.5503669", "0.5503157", "0.54903287", "0.54900455", "0.54896235" ]
0.70744306
1
YOLO3 multiscale with darknet53 base network on VOC dataset.
def yolo3_darknet53_voc(pretrained_base=True, pretrained=False, num_sync_bn_devices=-1, **kwargs): from ...data import VOCDetection pretrained_base = False if pretrained else pretrained_base base_net = darknet53( pretrained=pretrained_base, num_sync_bn_devices=num_sync_bn_devices, **kwargs) stages = [base_net.features[:15], base_net.features[15:24], base_net.features[24:]] anchors = [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]] strides = [8, 16, 32] classes = VOCDetection.CLASSES return get_yolov3( 'darknet53', stages, [512, 256, 128], anchors, strides, classes, 'voc', pretrained=pretrained, num_sync_bn_devices=num_sync_bn_devices, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n # TODO\n self.confThreshold = 0.6\n self.nmsThreshold = 0.5\n self.inpWidth = 320\n self.inpHeight = 320\n classesFile = \"/content/drive/My Drive/tracking_course/Detection/yolo_workshop/coco.names\"\n self.classes = None\n with open(classesFile,'rt') as f:\n self.classes = f.read().rstrip('\\n').split('\\n')\n\n modelConfiguration = \"/content/drive/My Drive/tracking_course/Detection/yolo_workshop/yolov3.cfg\"\n modelWeights = \"/content/drive/My Drive/tracking_course/Detection/yolo_workshop/yolov3.weights\"\n self.net = cv2.dnn.readNetFromDarknet(modelConfiguration, modelWeights)\n self.net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)\n self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)", "def get_pytorch_yolo(get_default_cifar10_subset):\n import cv2\n import torch\n\n from pytorchyolo import models\n from pytorchyolo.utils.loss import compute_loss\n\n from art.estimators.object_detection.pytorch_yolo import PyTorchYolo\n\n model_path = \"/tmp/PyTorch-YOLOv3/config/yolov3.cfg\"\n weights_path = \"/tmp/PyTorch-YOLOv3/weights/yolov3.weights\"\n model = models.load_model(model_path=model_path, weights_path=weights_path)\n\n class YoloV3(torch.nn.Module):\n def __init__(self, model):\n super().__init__()\n self.model = model\n\n def forward(self, x, targets=None):\n if self.training:\n outputs = self.model(x)\n # loss is averaged over a batch. Thus, for patch generation use batch_size = 1\n loss, loss_components = compute_loss(outputs, targets, self.model)\n\n loss_components_dict = {\"loss_total\": loss}\n\n return loss_components_dict\n else:\n return self.model(x)\n\n model = YoloV3(model)\n\n object_detector = PyTorchYolo(\n model=model, input_shape=(3, 416, 416), clip_values=(0, 1), attack_losses=(\"loss_total\",)\n )\n\n n_test = 10\n (_, _), (x_test_cifar10, y_test_cifar10) = get_default_cifar10_subset\n x_test_cifar10 = x_test_cifar10[0:n_test]\n\n x_test = cv2.resize(\n x_test_cifar10[0].transpose((1, 2, 0)), dsize=(416, 416), interpolation=cv2.INTER_CUBIC\n ).transpose((2, 0, 1))\n x_test = np.expand_dims(x_test, axis=0)\n x_test = np.repeat(x_test, repeats=2, axis=0)\n\n # Create labels\n\n result = object_detector.predict(x=x_test)\n\n y_test = [\n {\n \"boxes\": result[0][\"boxes\"],\n \"labels\": result[0][\"labels\"],\n \"scores\": np.ones_like(result[0][\"labels\"]),\n },\n {\n \"boxes\": result[1][\"boxes\"],\n \"labels\": result[1][\"labels\"],\n \"scores\": np.ones_like(result[1][\"labels\"]),\n },\n ]\n\n yield object_detector, x_test, y_test", "def model_fn(model_dir):\n ctx = mx.cpu()\n net = gcv.model_zoo.get_model(\n 'yolo3_darknet53_voc',\n pretrained=False,\n ctx=ctx)\n batchify = gcv.data.batchify._stack_arrs\n net.load_parameters(os.path.join(model_dir, 'yolo3_darknet53_voc.params'), mx.cpu(0))\n net.hybridize()\n def image_transform(im_bytes):\n \"\"\"\n Apply image transformation to raw byte images\n \"\"\"\n img = [mx.image.imdecode(bytes.fromhex(im.lstrip('0x'))) for im in im_bytes]\n out = gcv.data.transforms.presets.yolo.transform_test(img)\n return out[0]\n\n return net, image_transform, batchify", "def test():\n args = parse_args()\n\n devid = int(os.getenv('DEVICE_ID')) if os.getenv('DEVICE_ID') else 0\n context.set_context(mode=context.GRAPH_MODE, device_target='Ascend', save_graphs=True, device_id=devid)\n\n # logger\n args.outputs_dir = os.path.join(args.log_path,\n datetime.datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S'))\n rank_id = int(os.environ.get('RANK_ID')) if os.environ.get('RANK_ID') else 0\n args.logger = get_logger(args.outputs_dir, rank_id)\n\n context.reset_auto_parallel_context()\n parallel_mode = ParallelMode.STAND_ALONE\n context.set_auto_parallel_context(parallel_mode=parallel_mode, gradients_mean=True, device_num=1)\n\n args.logger.info('Creating Network....')\n network = SolveOutput(YOLOV3DarkNet53(is_training=False))\n\n data_root = args.data_root\n ann_file = args.annFile\n\n args.logger.info(args.pretrained)\n if os.path.isfile(args.pretrained):\n param_dict = load_checkpoint(args.pretrained)\n param_dict_new = {}\n for key, values in param_dict.items():\n if key.startswith('moments.'):\n continue\n elif key.startswith('yolo_network.'):\n param_dict_new[key[13:]] = values\n else:\n param_dict_new[key] = values\n load_param_into_net(network, param_dict_new)\n args.logger.info('load_model {} success'.format(args.pretrained))\n else:\n args.logger.info('{} not exists or not a pre-trained file'.format(args.pretrained))\n assert FileNotFoundError('{} not exists or not a pre-trained file'.format(args.pretrained))\n exit(1)\n\n config = ConfigYOLOV3DarkNet53()\n if args.testing_shape:\n config.test_img_shape = conver_testing_shape(args)\n\n ds, data_size = create_yolo_dataset(data_root, ann_file, is_training=False, batch_size=1,\n max_epoch=1, device_num=1, rank=rank_id, shuffle=False,\n config=config)\n\n args.logger.info('testing shape : {}'.format(config.test_img_shape))\n args.logger.info('totol {} images to eval'.format(data_size))\n\n network.set_train(False)\n # build attacker\n attack = DeepFool(network, num_classes=80, model_type='detection', reserve_ratio=0.9, bounds=(0, 1))\n input_shape = Tensor(tuple(config.test_img_shape), ms.float32)\n\n args.logger.info('Start inference....')\n batch_num = args.samples_num\n adv_example = []\n for i, data in enumerate(ds.create_dict_iterator(num_epochs=1)):\n if i >= batch_num:\n break\n image = data[\"image\"]\n image_shape = data[\"image_shape\"]\n\n gt_boxes, gt_logits = network(image, input_shape)\n gt_boxes, gt_logits = gt_boxes.asnumpy(), gt_logits.asnumpy()\n gt_labels = np.argmax(gt_logits, axis=2)\n\n adv_img = attack.generate((image.asnumpy(), image_shape.asnumpy()), (gt_boxes, gt_labels))\n adv_example.append(adv_img)\n np.save('adv_example.npy', adv_example)", "def yolo_object_detection(image_filename, net, confidence, threshold, labels, colors):\n # read image file\n # image is an array of image data (row, column, channel)\n image = cv2.imread(image_filename)\n (H, W) = image.shape[:2]\n\n # preprocess image data with rescaling and resizing to fit YOLO input shape\n # OpenCV assumes BGR images: we have to convert to RGB, with swapRB=True\n blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416), swapRB=True, crop=False)\n\n # set a new input to the network\n net.setInput(blob)\n\n # get YOLOv3's output layer names\n ln = net.getLayerNames()\n ln_out = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\n # perform object detection\n layerOutputs = net.forward(ln_out)\n\n\n # Get the result from outputs, and filter them by confidence\n boxes = []\n scores = []\n classes = []\n for output in layerOutputs: # There are three output layers in YOLO v3\n # Filter outputs by confidence\n (xywh_filterd, score_filtered, class_filtered) = filter_outputs(output, confidence)\n\n boxes.append(xywh_filterd)\n scores.append(score_filtered)\n classes.append(class_filtered)\n\n # Change shapes of arrays so that all boxes from any output layers are stored together\n boxes = np.vstack([r for r in boxes])\n scores = np.concatenate([r for r in scores], axis=None)\n classes = np.concatenate([r for r in classes], axis=None)\n\n # Apply Non-max supression\n boxes_coord = rescale_box_coord(boxes, W, H)\n nms_idx = yolo_non_max_supression(boxes_coord, scores, confidence, threshold)\n \n # filter the good ones\n return image, [{'box':boxes[_], 'score':scores[_], 'class':classes[_]} for _ in nms_idx]", "def VLocNet_v3(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, classes=1000): # pooling=None,\n\n if weights not in {'imagenet', None}:\n raise ValueError('The `weights` argument should be either '\n '`None` (random initialization) or `imagenet` '\n '(pre-training on ImageNet).')\n\n if weights == 'imagenet' and include_top and classes != 1000:\n raise ValueError('If using `weights` as imagenet with `include_top`'\n ' as true, `classes` should be 1000')\n\n # Determine proper input shape\n # input_shape = _obtain_input_shape(input_shape,\n # default_size=224,\n # min_size=197,\n # data_format=K.image_data_format(),\n # include_top=include_top)\n #\n # if input_tensor is None:\n # img_input = Input(shape=input_shape)\n # else:\n # if not K.is_keras_tensor(input_tensor):\n # img_input = Input(tensor=input_tensor, shape=input_shape)\n # else:\n # img_input = input_tensor\n # if K.image_data_format() == 'channels_last':\n # bn_axis = 3\n # else:\n # bn_axis = 1\n\n # 1st branch for the t-1 odometry regression\n input_odo_0 = Input(shape=(224, 224, 3), name='input_odo_0')\n\n odo_1_0 = ResNet_50_unit_1(input_tensor=input_odo_0, bn_axis=3, activation='elu', strides=(2, 2), branch='_odo_t_p')\n\n odo_2_0 = ResNet_50_unit_2(input_tensor=odo_1_0, activation='elu', strides=(1, 1), branch='_odo_t_p')\n\n # odo_3_0 = ResNet_50_unit_3(input_tensor=odo_2_0, activation='elu', branch='_odo_t_p')\n\n odo_4_0 = ResNet_50_unit_4(input_tensor=odo_2_0, activation='elu', branch='_odo_t_p')\n\n # 2nd branch for the t odometry regression\n input_odo_1 = Input(shape=(224, 224, 3), name='input_odo_1')\n\n odo_1_1 = ResNet_50_unit_1(input_tensor=input_odo_1, bn_axis=3, activation='elu', strides=(2, 2), branch='_odo_t')\n\n odo_2_1 = ResNet_50_unit_2(input_tensor=odo_1_1, activation='elu', strides=(1, 1), branch='_odo_t')\n\n # odo_3_1 = ResNet_50_unit_3(input_tensor=odo_2_1, activation='elu', branch='_odo_t')\n\n odo_4_1 = ResNet_50_unit_4(input_tensor=odo_2_1, activation='elu', branch='_odo_t')\n\n # Concatenate the features from 1st and 2nd branches\n conca = concatenate([odo_4_0, odo_4_1], name='conca')\n\n odo_5 = ResNet_50_unit_5(input_tensor=conca, activation='elu', branch='_odo_all')\n\n # avg_pool = AveragePooling2D((7, 7), name='avg_pool')(odo_5)\n\n odo_glo_ave = GlobalAveragePooling2D()(odo_5)\n\n odo_fc_1 = Dense(1024, name='odo_fc_1', kernel_initializer=initializers.RandomNormal(mean=0, stddev=0.05),\n bias_initializer=initializers.RandomNormal(mean=0, stddev=0.05))(odo_glo_ave)\n\n odo_fc_2 = Dense(3, name='odo_fc_2', kernel_initializer=initializers.RandomNormal(mean=0, stddev=0.05),\n bias_initializer=initializers.RandomNormal(mean=0, stddev=0.05))(odo_fc_1)\n\n odo_fc_3 = Dense(4, name='odo_fc_3', kernel_initializer=initializers.RandomNormal(mean=0, stddev=0.05),\n bias_initializer=initializers.RandomNormal(mean=0, stddev=0.05))(odo_fc_1)\n\n odo_merge = concatenate([odo_fc_2, odo_fc_3], name='odo_merge') # Modification\n\n # The network branch for the Pose part:\n\n pose_4 = ResNet_50_unit_4(input_tensor=odo_2_1, activation='elu', branch='_geo')\n\n # The Previous Pose back-feeding\n\n input_previous_pose = Input(shape=(7, ), name='input_previous_pose')\n\n previous_fc_4 = Dense(802816, name='previous_fc_4')(input_previous_pose)\n\n res_previous = Reshape((28, 28, 1024), name='res_previous')(previous_fc_4)\n\n # Concatenation the previous pose back to the residual unit\n con_4 = concatenate([pose_4, res_previous], name='previous_and_geo4_merge')\n\n pose_5 = ResNet_50_unit_5(input_tensor=con_4, activation='elu', branch='_geo')\n\n pose_glo_ave = GlobalAveragePooling2D()(pose_5)\n\n pose_fc_1 = Dense(1024, name='pose_fc_1', kernel_initializer=initializers.RandomNormal(mean=0, stddev=0.05),\n bias_initializer=initializers.RandomNormal(mean=0, stddev=0.05))(pose_glo_ave)\n\n pose_fc_2 = Dense(3, name='pose_fc_2', kernel_initializer=initializers.RandomNormal(mean=0, stddev=0.05),\n bias_initializer=initializers.RandomNormal(mean=0, stddev=0.05))(pose_fc_1)\n\n pose_fc_3 = Dense(4, name='pose_fc_3', kernel_initializer=initializers.RandomNormal(mean=0, stddev=0.05),\n bias_initializer=initializers.RandomNormal(mean=0, stddev=0.05))(pose_fc_1)\n\n pose_merge = concatenate([odo_fc_2, odo_fc_3, pose_fc_2, pose_fc_3], name='pose_merge') # Modification\n\n # Create model.\n # model = Model(input=[input_odo_0, input_odo_1], output=[odo_fc_2, odo_fc_3, pose_fc_2, pose_fc_3],\n # name='VLocNet_full')\n\n # changed the model from 4 outputs to 2 outputs\n model = Model(input=[input_odo_0, input_odo_1, input_previous_pose], output=[odo_merge, pose_merge],\n name='VLocNet_full')\n\n # load weights\n if weights == 'imagenet':\n if include_top:\n weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels.h5',\n WEIGHTS_PATH,\n cache_subdir='models',\n md5_hash='a7b3fe01876f51b976af0dea6bc144eb')\n else:\n weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',\n WEIGHTS_PATH_NO_TOP,\n cache_subdir='models',\n md5_hash='a268eb855778b3df3c7506639542a6af')\n model.load_weights(weights_path)\n if K.backend() == 'theano':\n layer_utils.convert_all_kernels_in_model(model)\n\n if K.image_data_format() == 'channels_first':\n if include_top:\n maxpool = model.get_layer(name='avg_pool')\n shape = maxpool.output_shape[1:]\n dense = model.get_layer(name='fc1000')\n layer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first')\n\n if K.backend() == 'tensorflow':\n warnings.warn('You are using the TensorFlow backend, yet you '\n 'are using the Theano '\n 'image data format convention '\n '(`image_data_format=\"channels_first\"`). '\n 'For best performance, set '\n '`image_data_format=\"channels_last\"` in '\n 'your Keras config '\n 'at ~/.keras/keras.json.')\n return model", "def yolo_detection(raw_image):\n class_ids = []\n confidences = []\n boxes = []\n height , width ,c= raw_image.shape\n blob = cv2.dnn.blobFromImage(raw_image, 0.00392, (416,416), (0,0,0), True, crop=False)\n net.setInput(blob)\n outs = net.forward(output_layers)\n\n for out in outs:\n for detection in out:\n scores = detection[5:]\n class_id = np.argmax(scores)\n confidence = scores[class_id]\n if confidence > 0.4:\n center_x = int(detection[0]*width)\n center_y = int(detection[1]*height)\n w = int(detection[2]*width)\n h = int(detection[3]*height)\n ##Rectangle Draw\n topleft_x = int(center_x-(w/2))\n topleft_y = int(center_y-(h/2))\n\n boxes.append([topleft_x,topleft_y,w,h])\n confidences.append(float(confidence))\n class_ids.append(class_id)\n indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)\n #DISPLAY DETECTION\n total_detections = len(boxes)\n for i in range(total_detections):\n if i in indexes:\n topleft_x, topleft_y, w,h = boxes[i]\n label = detection_classes[class_ids[i]]\n cv2.rectangle(raw_image, (topleft_x,topleft_y), (topleft_x+w,topleft_y+h), (0,100,255), 1)\n cv2.putText(raw_image, label, (topleft_x, topleft_y),cv2.FONT_HERSHEY_COMPLEX,1,(0,165,255))\n\n\n return raw_image", "def yolo_v3(inputs, num_classes, is_training=False, data_format='NCHW', reuse=False, with_spp=False):\n # it will be needed later on\n img_size = inputs.get_shape().as_list()[1:3]\n\n # transpose the inputs to NCHW\n if data_format == 'NCHW':\n inputs = tf.transpose(inputs, [0, 3, 1, 2])\n\n # normalize values to range [0..1]\n inputs = inputs / 255\n\n # set batch norm params\n batch_norm_params = {\n 'decay': _BATCH_NORM_DECAY,\n 'epsilon': _BATCH_NORM_EPSILON,\n 'scale': True,\n 'is_training': is_training,\n 'fused': None, # Use fused batch norm if possible.\n }\n\n # Set activation_fn and parameters for conv2d, batch_norm.\n with slim.arg_scope([slim.conv2d, slim.batch_norm, _fixed_padding], data_format=data_format, reuse=reuse):\n with slim.arg_scope([slim.conv2d], normalizer_fn=slim.batch_norm,\n normalizer_params=batch_norm_params,\n biases_initializer=None,\n activation_fn=lambda x: tf.nn.leaky_relu(x, alpha=_LEAKY_RELU)):\n with tf.variable_scope('darknet-53'):\n route_1, route_2, inputs = darknet53(inputs)\n\n with tf.variable_scope('yolo-v3'):\n route, inputs = _yolo_block(inputs, 512, data_format, with_spp)\n\n detect_1 = _detection_layer(\n inputs, num_classes, _ANCHORS[6:9], img_size, data_format)\n detect_1 = tf.identity(detect_1, name='detect_1')\n\n inputs = _conv2d_fixed_padding(route, 256, 1)\n upsample_size = route_2.get_shape().as_list()\n inputs = _upsample(inputs, upsample_size, data_format)\n inputs = tf.concat([inputs, route_2],\n axis=1 if data_format == 'NCHW' else 3)\n\n route, inputs = _yolo_block(inputs, 256)\n\n detect_2 = _detection_layer(\n inputs, num_classes, _ANCHORS[3:6], img_size, data_format)\n detect_2 = tf.identity(detect_2, name='detect_2')\n\n inputs = _conv2d_fixed_padding(route, 128, 1)\n upsample_size = route_1.get_shape().as_list()\n inputs = _upsample(inputs, upsample_size, data_format)\n inputs = tf.concat([inputs, route_1],\n axis=1 if data_format == 'NCHW' else 3)\n\n _, inputs = _yolo_block(inputs, 128)\n\n detect_3 = _detection_layer(\n inputs, num_classes, _ANCHORS[0:3], img_size, data_format)\n detect_3 = tf.identity(detect_3, name='detect_3')\n\n detections = tf.concat([detect_1, detect_2, detect_3], axis=1)\n detections = tf.identity(detections, name='detections')\n return detections", "def yolo3_mobilenetv3large_body(inputs, num_anchors, num_classes, alpha=1.0):\r\n mobilenetv3large = MobileNetV3Large(input_tensor=inputs, weights='imagenet', include_top=False, alpha=alpha)\r\n print('backbone layers number: {}'.format(len(mobilenetv3large.layers)))\r\n\r\n # input: 416 x 416 x 3\r\n # activation_38(layer 194, final feature map): 13 x 13 x (960*alpha)\r\n # expanded_conv_14/Add(layer 191, end of block14): 13 x 13 x (160*alpha)\r\n\r\n # activation_29(layer 146, middle in block12) : 26 x 26 x (672*alpha)\r\n # expanded_conv_11/Add(layer 143, end of block11) : 26 x 26 x (112*alpha)\r\n\r\n # activation_15(layer 79, middle in block6) : 52 x 52 x (240*alpha)\r\n # expanded_conv_5/Add(layer 76, end of block5): 52 x 52 x (40*alpha)\r\n\r\n # NOTE: activation layer name may different for TF1.x/2.x, so we\r\n # use index to fetch layer\r\n # f1: 13 x 13 x (960*alpha)\r\n f1 = mobilenetv3large.layers[194].output\r\n # f2: 26 x 26 x (672*alpha)\r\n f2 = mobilenetv3large.layers[146].output\r\n # f3: 52 x 52 x (240*alpha)\r\n f3 = mobilenetv3large.layers[79].output\r\n\r\n f1_channel_num = int(960*alpha)\r\n f2_channel_num = int(672*alpha)\r\n f3_channel_num = int(240*alpha)\r\n #f1_channel_num = 1024\r\n #f2_channel_num = 512\r\n #f3_channel_num = 256\r\n\r\n y1, y2, y3 = yolo3_predictions((f1, f2, f3), (f1_channel_num, f2_channel_num, f3_channel_num), num_anchors, num_classes)\r\n\r\n return Model(inputs = inputs, outputs=[y1,y2,y3])", "def darknet53():\r\n\r\n darknet = DarkNet(\r\n block=ResidualBlock,\r\n layer_nums=[1, 2, 8, 8, 4],\r\n in_channels=[32, 64, 128, 256, 512],\r\n out_channels=[64, 128, 256, 512, 1024],\r\n )\r\n\r\n return darknet", "def cspdarknet53_tiny(input_data):\n input_data = common.convolutional(input_data, (3, 3, 3, 32), downsample=True)\n input_data = common.convolutional(input_data, (3, 3, 32, 64), downsample=True)\n input_data = common.convolutional(input_data, (3, 3, 64, 64))\n\n route = input_data\n input_data = common.route_group(input_data, 2, 1)\n input_data = common.convolutional(input_data, (3, 3, 32, 32))\n route_1 = input_data\n input_data = common.convolutional(input_data, (3, 3, 32, 32))\n input_data = tf.concat([input_data, route_1], -1)\n input_data = common.convolutional(input_data, (1, 1, 32, 64))\n input_data = tf.concat([route, input_data], -1)\n input_data = tf.keras.layers.MaxPool2D(2, 2, 'same')(input_data)\n\n input_data = common.convolutional(input_data, (3, 3, 64, 128))\n route = input_data\n input_data = common.route_group(input_data, 2, 1)\n input_data = common.convolutional(input_data, (3, 3, 64, 64))\n route_1 = input_data\n input_data = common.convolutional(input_data, (3, 3, 64, 64))\n input_data = tf.concat([input_data, route_1], -1)\n input_data = common.convolutional(input_data, (1, 1, 64, 128))\n input_data = tf.concat([route, input_data], -1)\n input_data = tf.keras.layers.MaxPool2D(2, 2, 'same')(input_data)\n\n input_data = common.convolutional(input_data, (3, 3, 128, 256))\n route = input_data\n input_data = common.route_group(input_data, 2, 1)\n input_data = common.convolutional(input_data, (3, 3, 128, 128))\n route_1 = input_data\n input_data = common.convolutional(input_data, (3, 3, 128, 128))\n input_data = tf.concat([input_data, route_1], -1)\n input_data = common.convolutional(input_data, (1, 1, 128, 256))\n route_1 = input_data\n input_data = tf.concat([route, input_data], -1)\n input_data = tf.keras.layers.MaxPool2D(2, 2, 'same')(input_data)\n\n input_data = common.convolutional(input_data, (3, 3, 512, 512))\n\n return route_1, input_data", "def VLocNet_v2(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, classes=1000): # pooling=None,\n\n if weights not in {'imagenet', None}:\n raise ValueError('The `weights` argument should be either '\n '`None` (random initialization) or `imagenet` '\n '(pre-training on ImageNet).')\n\n if weights == 'imagenet' and include_top and classes != 1000:\n raise ValueError('If using `weights` as imagenet with `include_top`'\n ' as true, `classes` should be 1000')\n\n # Determine proper input shape\n # input_shape = _obtain_input_shape(input_shape,\n # default_size=224,\n # min_size=197,\n # data_format=K.image_data_format(),\n # include_top=include_top)\n #\n # if input_tensor is None:\n # img_input = Input(shape=input_shape)\n # else:\n # if not K.is_keras_tensor(input_tensor):\n # img_input = Input(tensor=input_tensor, shape=input_shape)\n # else:\n # img_input = input_tensor\n # if K.image_data_format() == 'channels_last':\n # bn_axis = 3\n # else:\n # bn_axis = 1\n\n # 1st branch for the t-1 odometry regression\n input_odo_0 = Input(shape=(224, 224, 3), name='input_odo_0')\n\n odo_1_0 = ResNet_50_unit_1(input_tensor=input_odo_0, bn_axis=3, activation='elu', strides=(2, 2), branch='_odo_t_p')\n\n odo_2_0 = ResNet_50_unit_2(input_tensor=odo_1_0, activation='elu', strides=(1, 1), branch='_odo_t_p')\n\n odo_3_0 = ResNet_50_unit_3(input_tensor=odo_2_0, activation='elu', branch='_odo_t_p')\n\n odo_4_0 = ResNet_50_unit_4(input_tensor=odo_3_0, activation='elu', branch='_odo_t_p')\n\n # 2nd branch for the t odometry regression\n input_odo_1 = Input(shape=(224, 224, 3), name='input_odo_1')\n\n odo_1_1 = ResNet_50_unit_1(input_tensor=input_odo_1, bn_axis=3, activation='elu', strides=(2, 2), branch='_odo_t')\n\n odo_2_1 = ResNet_50_unit_2(input_tensor=odo_1_1, activation='elu', strides=(1, 1), branch='_odo_t')\n\n odo_3_1 = ResNet_50_unit_3(input_tensor=odo_2_1, activation='elu', branch='_odo_t')\n\n odo_4_1 = ResNet_50_unit_4(input_tensor=odo_3_1, activation='elu', branch='_odo_t')\n\n # Concatenate the features from 1st and 2nd branches\n conca = concatenate([odo_4_0, odo_4_1], name='conca')\n\n odo_5 = ResNet_50_unit_5(input_tensor=conca, activation='elu', branch='_odo_all')\n\n # avg_pool = AveragePooling2D((7, 7), name='avg_pool')(odo_5)\n\n odo_glo_ave = GlobalAveragePooling2D()(odo_5)\n\n odo_fc_1 = Dense(1024, name='odo_fc_1', kernel_initializer=initializers.RandomNormal(mean=0, stddev=0.05),\n bias_initializer=initializers.RandomNormal(mean=0, stddev=0.05))(odo_glo_ave)\n\n odo_fc_2 = Dense(3, name='odo_fc_2', kernel_initializer=initializers.RandomNormal(mean=0, stddev=0.05),\n bias_initializer=initializers.RandomNormal(mean=0, stddev=0.05))(odo_fc_1)\n\n odo_fc_3 = Dense(4, name='odo_fc_3', kernel_initializer=initializers.RandomNormal(mean=0, stddev=0.05),\n bias_initializer=initializers.RandomNormal(mean=0, stddev=0.05))(odo_fc_1)\n\n odo_merge = concatenate([odo_fc_2, odo_fc_3], name='odo_merge') # Modification\n\n # The network branch for the Pose part:\n\n pose_4 = ResNet_50_unit_4(input_tensor=odo_3_1, activation='elu', branch='_geo')\n\n # The Previous Pose back-feeding\n\n input_previous_pose = Input(shape=(7, ), name='input_previous_pose')\n\n previous_fc_4 = Dense(200704, name='previous_fc_4')(input_previous_pose)\n\n res_previous = Reshape((14, 14, 1024), name='res_previous')(previous_fc_4)\n\n # Concatenation the previous pose back to the residual unit\n con_4 = concatenate([pose_4, res_previous], name='previous_and_geo4_merge')\n\n pose_5 = ResNet_50_unit_5(input_tensor=con_4, activation='elu', branch='_geo')\n\n pose_glo_ave = GlobalAveragePooling2D()(pose_5)\n\n pose_fc_1 = Dense(1024, name='pose_fc_1', kernel_initializer=initializers.RandomNormal(mean=0, stddev=0.05),\n bias_initializer=initializers.RandomNormal(mean=0, stddev=0.05))(pose_glo_ave)\n\n pose_fc_2 = Dense(3, name='pose_fc_2', kernel_initializer=initializers.RandomNormal(mean=0, stddev=0.05),\n bias_initializer=initializers.RandomNormal(mean=0, stddev=0.05))(pose_fc_1)\n\n pose_fc_3 = Dense(4, name='pose_fc_3', kernel_initializer=initializers.RandomNormal(mean=0, stddev=0.05),\n bias_initializer=initializers.RandomNormal(mean=0, stddev=0.05))(pose_fc_1)\n\n pose_merge = concatenate([odo_fc_2, odo_fc_3, pose_fc_2, pose_fc_3], name='pose_merge') # Modification\n\n # Create model.\n # model = Model(input=[input_odo_0, input_odo_1], output=[odo_fc_2, odo_fc_3, pose_fc_2, pose_fc_3],\n # name='VLocNet_full')\n\n # changed the model from 4 outputs to 2 outputs\n model = Model(input=[input_odo_0, input_odo_1, input_previous_pose], output=[odo_merge, pose_merge],\n name='VLocNet_full')\n\n # load weights\n if weights == 'imagenet':\n if include_top:\n weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels.h5',\n WEIGHTS_PATH,\n cache_subdir='models',\n md5_hash='a7b3fe01876f51b976af0dea6bc144eb')\n else:\n weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',\n WEIGHTS_PATH_NO_TOP,\n cache_subdir='models',\n md5_hash='a268eb855778b3df3c7506639542a6af')\n model.load_weights(weights_path)\n if K.backend() == 'theano':\n layer_utils.convert_all_kernels_in_model(model)\n\n if K.image_data_format() == 'channels_first':\n if include_top:\n maxpool = model.get_layer(name='avg_pool')\n shape = maxpool.output_shape[1:]\n dense = model.get_layer(name='fc1000')\n layer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first')\n\n if K.backend() == 'tensorflow':\n warnings.warn('You are using the TensorFlow backend, yet you '\n 'are using the Theano '\n 'image data format convention '\n '(`image_data_format=\"channels_first\"`). '\n 'For best performance, set '\n '`image_data_format=\"channels_last\"` in '\n 'your Keras config '\n 'at ~/.keras/keras.json.')\n return model", "def main(\n image = None ,\n gpu = -1,\n weights_path= f\"{ Path(__file__).parent }/weights/yolov3.weights\",\n background = False\n):\n print( weights_path )\n my_path = Path( __file__ ).parent\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu', type=int, default= gpu )\n parser.add_argument('--cfg', type=str, default=my_path/'config/yolov3_default.cfg')\n parser.add_argument('--ckpt', type=str,\n help='path to the checkpoint file')\n parser.add_argument('--weights_path', type=str,\n default= weights_path, help='path to weights file')\n parser.add_argument('--image', type=str , default= image )\n parser.add_argument('--background', type=bool,\n default= background , help='background(no-display mode. save \"./output.png\")')\n parser.add_argument('--detect_thresh', type=float,\n default= 0.5 , help='confidence threshold')\n args = parser.parse_args()\n\n with open(args.cfg, 'r') as f:\n cfg = yaml.load(f)\n\n imgsize = cfg['TEST']['IMGSIZE']\n model = YOLOv3(cfg['MODEL'])\n\n confthre = cfg['TEST']['CONFTHRE'] \n nmsthre = cfg['TEST']['NMSTHRE']\n\n if args.detect_thresh:\n confthre = args.detect_thresh\n\n\n\n img = imread( args.image )\n if img is None :\n print( \"load image failed\" )\n print( args.image )\n return\n\n img_raw = img.copy()[:, :, ::-1].transpose((2, 0, 1))\n img, info_img = preprocess(img, imgsize, jitter=0) # info = (h, w, nh, nw, dx, dy)\n img = np.transpose(img / 255., (2, 0, 1))\n img = torch.from_numpy(img).float().unsqueeze(0)\n\n if args.gpu >= 0:\n model.cuda(args.gpu)\n img = Variable(img.type(torch.cuda.FloatTensor))\n else:\n img = Variable(img.type(torch.FloatTensor))\n\n assert args.weights_path or args.ckpt, 'One of --weights_path and --ckpt must be specified'\n\n if args.weights_path:\n print(\"loading yolo weights %s\" % (args.weights_path))\n parse_yolo_weights(model, args.weights_path)\n elif args.ckpt:\n print(\"loading checkpoint %s\" % (args.ckpt))\n state = torch.load(args.ckpt)\n if 'model_state_dict' in state.keys():\n model.load_state_dict(state['model_state_dict'])\n else:\n model.load_state_dict(state)\n\n model.eval()\n\n\n with torch.no_grad():\n outputs1 = model(img)\n # np.save(\"output.npy\" , outputs.numpy() )\n # torch.save( outputs1 , \"outputs1.pt\" )\n out1 = torch.load( \"outputs1.pt\" )\n rere = torch.equal( outputs1 , out1 )\n outputs = postprocess(outputs1, 80, confthre, nmsthre)\n\n a = \"hoho\"\n\n\n if outputs[0] is None:\n print(\"No Objects Deteted!!\")\n return\n\n coco_class_names, coco_class_ids, coco_class_colors = get_coco_label_names()\n\n bboxes = list()\n classes = list()\n colors = list()\n\n for x1, y1, x2, y2, conf, cls_conf, cls_pred in outputs[0]:\n\n cls_id = coco_class_ids[int(cls_pred)]\n print(int(x1), int(y1), int(x2), int(y2), float(conf), int(cls_pred))\n print('\\t+ Label: %s, Conf: %.5f' %\n (coco_class_names[cls_id], cls_conf.item()))\n box = yolobox2label([y1, x1, y2, x2], info_img)\n bboxes.append(box)\n classes.append(cls_id)\n colors.append(coco_class_colors[int(cls_pred)])\n\n # args.background = True\n\n if args.background:\n import matplotlib\n matplotlib.use('Agg')\n\n from utils.vis_bbox import vis_bbox\n\n vis_bbox(\n img_raw, bboxes, label=classes, label_names=coco_class_names,\n instance_colors=colors, linewidth=2)\n\n\n if args.background:\n output = Path( \"./output\" )\n output.mkdir( parents=True , exist_ok=True )\n now = datetime.now().strftime(\"%Y-%m-%d %H-%M-%S\")\n output /= f\"output-{now}.png\"\n plt.savefig( output )\n\n return str( output.absolute() )\n # return plt_to_qpixmap(plt.gca())\n else :\n plt.show()", "def build_model_mobilenet(num_classes):", "def yolo4_mobilenetv3large_body(inputs, num_anchors, num_classes, alpha=1.0):\r\n mobilenetv3large = MobileNetV3Large(input_tensor=inputs, weights='imagenet', include_top=False, alpha=alpha)\r\n print('backbone layers number: {}'.format(len(mobilenetv3large.layers)))\r\n\r\n # input: 416 x 416 x 3\r\n # activation_38(layer 194, final feature map): 13 x 13 x (960*alpha)\r\n # expanded_conv_14/Add(layer 191, end of block14): 13 x 13 x (160*alpha)\r\n\r\n # activation_29(layer 146, middle in block12) : 26 x 26 x (672*alpha)\r\n # expanded_conv_11/Add(layer 143, end of block11) : 26 x 26 x (112*alpha)\r\n\r\n # activation_15(layer 79, middle in block6) : 52 x 52 x (240*alpha)\r\n # expanded_conv_5/Add(layer 76, end of block5): 52 x 52 x (40*alpha)\r\n\r\n # NOTE: activation layer name may different for TF1.x/2.x, so we\r\n # use index to fetch layer\r\n # f1: 13 x 13 x (960*alpha)\r\n f1 = mobilenetv3large.layers[194].output\r\n # f2: 26 x 26 x (672*alpha) for 416 input\r\n f2 = mobilenetv3large.layers[146].output\r\n # f3: 52 x 52 x (240*alpha) for 416 input\r\n f3 = mobilenetv3large.layers[79].output\r\n\r\n f1_channel_num = int(960*alpha)\r\n f2_channel_num = int(672*alpha)\r\n f3_channel_num = int(240*alpha)\r\n #f1_channel_num = 1024\r\n #f2_channel_num = 512\r\n #f3_channel_num = 256\r\n\r\n y1, y2, y3 = yolo4_predictions((f1, f2, f3), (f1_channel_num, f2_channel_num, f3_channel_num), num_anchors, num_classes)\r\n\r\n return Model(inputs, [y1, y2, y3])", "def do_stuff(self, net, meta):\n cv2_img = self.img_to_cv2(self.last_img)\n # Now we can use cv2 functions as the image is <type 'numpy.ndarray'>\n # rospy.loginfo(\"cv2_img: \" + str(type(cv2_img)))\n # Your OpenCV stuff\n # cv2_img = cv2.resize(cv2_img, (0,0), fx=0.25, fy=0.25) \n\n (rows,cols,channels) = cv2_img.shape\n # if cols > 60 and rows > 60 :\n # cv2.circle(cv2_img, (50,50), 10, 255)\n \n global x_old\n global no_meas_counter\n global est\n global cor\n global w\n global h\n \n\n r = darknet.detect(net, meta, cv2_img)\n # print(r)\n\n if not r:\n no_meas_counter += 1\n\n for i in r:\n if i[0].decode() == \"person\":\n x, y, w, h = i[2][0], i[2][1], i[2][2], i[2][3]\n xmin, ymin, xmax, ymax = darknet.convertBack(float(x), float(y), float(w), float(h))\n pt1 = (xmin, ymin)\n pt2 = (xmax, ymax)\n cv2.rectangle(cv2_img, pt1, pt2, (0, 255, 0), 2)\n cv2.putText(cv2_img, i[0].decode() + \" [\" + str(round(i[1] * 100, 2)) + \"]\", (pt1[0], pt1[1] + 20), cv2.FONT_HERSHEY_SIMPLEX, 1, [0, 255, 0], 4)\n \n global mp\n mp = np.array([[np.float32(x)],[np.float32(y)]])\n cor = kalman.correct(mp)\n no_meas_counter = 0\n\t\t\n\n else:\n no_meas_counter += 1\n \n # x_old = x\n\n # cv2.imshow(\"cv2_img\", cv2_img)\n # k = cv2.waitKey(1)\n # if k == 27:\n # cv2.destroyAllWindows()\n # exit()\n\n if no_meas_counter < 30:\n est = kalman.predict()\n msg = PolygonStamped()\n msg.header.stamp = rospy.Time.now()\n # msg.polygon.points = [Point32(x=x, y=y), Point32(x=cols, y=rows), Point32(x=w, y=h)]\n msg.polygon.points = [Point32(x=est[0], y=est[1]), Point32(x=cols, y=rows), Point32(x=w, y=h)] \n self.pub_yolo_detection.publish(msg)\n\n # cv2.imshow(\"Image window\", cv2_img)\n # cv2.waitKey(3)\n\n self.pub_images(cv2_img)\n self.is_new_img = False", "def multiclass():\n # load\n print(\"Loading data...\")\n train_y_cpu, train_x_cpu = dataio.bin_to_tensors(constants.TRAIN_ONEHOT, 10)\n val_y_cpu, val_x_cpu = dataio.bin_to_tensors(constants.VAL_ONEHOT, 10)\n\n print(\"Moving data to GPU...\")\n train_y = train_y_cpu.type(IntTT)\n train_x = train_x_cpu.type(FloatTT)\n val_y = val_y_cpu.type(IntTT)\n val_x = val_x_cpu.type(FloatTT)\n\n print(\"Starting experiments...\")\n dummy = 0.0\n\n # OLS analytic solution. uses CPU tensors to go to/from numpy for\n # pseudoinverse.\n w = ols_analytic(train_x_cpu, train_y_cpu)\n report(\n \"[multiclass] OLS analytic (train)\",\n w,\n train_x,\n train_y,\n dummy,\n multiclass_eval,\n ols_loss,\n )\n report(\n \"[multiclass] OLS analytic (val)\",\n w,\n val_x,\n val_y,\n dummy,\n multiclass_eval,\n ols_loss,\n )\n\n # # OLS gradient descent\n # ols_gd_settings: GDSettings = {'lr': 0.02, 'epochs': 3500, 'report_interval': 500}\n # w = gradient_descent(train_x, train_y, -1, ols_loss, ols_gradient, ols_gd_settings)\n # report('[multiclass] OLS GD (train)', w, train_x, train_y, dummy, multiclass_eval, ols_loss)\n # report('[multiclass] OLS GD (val)', w, val_x, val_y, dummy, multiclass_eval, ols_loss)\n\n # # OLS coordinate descent\n # w = coordinate_descent(train_x, train_y, dummy, ols_cd_weight_update, ols_loss, {'epochs': 150, 'report_interval': 10})\n # report('[multiclass] OLS CD (train)', w, train_x, train_y, dummy, multiclass_eval, ols_loss)\n # report('[multiclass] OLS CD (val)', w, val_x, val_y, dummy, multiclass_eval, ols_loss)\n\n # ridge analytic solution\n for lmb in [0.2]:\n w = ridge_analytic(train_x, train_y, lmb)\n report(\n \"[multiclass] Ridge analytic (train) lambda={}\".format(lmb),\n w,\n train_x,\n train_y,\n lmb,\n multiclass_eval,\n ridge_loss,\n )\n report(\n \"[multiclass] Ridge analytic (val) lambda={}\".format(lmb),\n w,\n val_x,\n val_y,\n lmb,\n multiclass_eval,\n ridge_loss,\n )\n\n # ridge gradient descent\n # ridge_gd_settings: GDSettings = {'lr': 0.02, 'epochs': 3500, 'report_interval': 500}\n # for lmb in [0.2]:\n # w = gradient_descent(train_x, train_y, lmb, ridge_loss, ridge_gradient, ridge_gd_settings)\n # report('[multiclass] Ridge GD (train)', w, train_x, train_y, lmb, multiclass_eval, ridge_loss)\n # report('[multiclass] Ridge GD (val)', w, val_x, val_y, lmb, multiclass_eval, ridge_loss)\n\n # # ridge coordinate descent\n # ridge_cd_settings: CDSettings = {'epochs': 150, 'report_interval': 10}\n # for lmb in [0.2]:\n # w = coordinate_descent(train_x, train_y, lmb, ridge_cd_weight_update, ridge_loss, ridge_cd_settings)\n # report('[multiclass] Ridge CD (train)', w, train_x, train_y, lmb, multiclass_eval, ridge_loss)\n # report('[multiclass] Ridge CD (val)', w, val_x, val_y, lmb, multiclass_eval, ridge_loss)\n\n # # lasso GD\n # lasso_gd_settings: GDSettings = {'lr': 0.02, 'epochs': 3000, 'report_interval': 500}\n # for lmb in [0.2]:\n # w = gradient_descent(train_x, train_y, lmb, lasso_loss, lasso_gradient, lasso_gd_settings)\n # report('[multiclass] Lasso GD (train) lambda={}'.format(lmb), w, train_x, train_y, lmb, multiclass_eval, lasso_loss)\n # report('[multiclass] Lasso GD (val) lambda={}'.format(lmb), w, val_x, val_y, lmb, multiclass_eval, lasso_loss)\n\n # lasso CD\n lasso_cd_settings: CDSettings = {\"epochs\": 150, \"report_interval\": 10}\n for lmb in [0.01]:\n w, record = coordinate_descent(\n train_x, train_y, lmb, lasso_cd_weight_update, lasso_loss, lasso_cd_settings\n )\n report(\n \"[multiclass] Lasso CD (train) lambda={}\".format(lmb),\n w,\n train_x,\n train_y,\n lmb,\n multiclass_eval,\n lasso_loss,\n )\n report(\n \"[multiclass] Lasso CD (val) lambda={}\".format(lmb),\n w,\n val_x,\n val_y,\n lmb,\n multiclass_eval,\n lasso_loss,\n )", "def test_confidence_thresholding_2thresholds_3d_vis_api(csv_filename):\n input_features = [\n text_feature(encoder={\"vocab_size\": 10, \"min_len\": 1, \"type\": \"stacked_cnn\"}),\n number_feature(),\n category_feature(encoder={\"vocab_size\": 10, \"embedding_size\": 5}),\n set_feature(),\n sequence_feature(encoder={\"vocab_size\": 10, \"max_len\": 10, \"type\": \"embed\"}),\n ]\n output_features = [\n category_feature(decoder={\"vocab_size\": 2}, reduce_input=\"sum\"),\n category_feature(decoder={\"vocab_size\": 2}, reduce_input=\"sum\"),\n ]\n encoder = \"parallel_cnn\"\n with TemporaryDirectory() as tmpvizdir:\n # Generate test data\n data_csv = generate_data(input_features, output_features, os.path.join(tmpvizdir, csv_filename))\n input_features[0][ENCODER][TYPE] = encoder\n model = run_api_experiment(input_features, output_features)\n test_df, train_df, val_df = obtain_df_splits(data_csv)\n _, _, output_dir = model.train(\n training_set=train_df, validation_set=val_df, output_directory=os.path.join(tmpvizdir, \"results\")\n )\n test_stats, predictions, _ = model.evaluate(\n dataset=test_df, collect_predictions=True, output_directory=output_dir\n )\n\n output_feature_name1 = output_features[0][\"name\"]\n output_feature_name2 = output_features[1][\"name\"]\n\n ground_truth_metadata = model.training_set_metadata\n feature1_cols = [\n f\"{output_feature_name1}_probabilities_{label}\"\n for label in ground_truth_metadata[output_feature_name1][\"idx2str\"]\n ]\n feature2_cols = [\n f\"{output_feature_name2}_probabilities_{label}\"\n for label in ground_truth_metadata[output_feature_name2][\"idx2str\"]\n ]\n\n # probabilities need to be list of lists containing each row data from the\n # probability columns ref: https://ludwig-ai.github.io/ludwig-docs/latest/user_guide/api/LudwigModel#evaluate\n probability1 = predictions.loc[:, feature1_cols].values\n probability2 = predictions.loc[:, feature2_cols].values\n\n target_predictions1 = test_df[output_feature_name1]\n target_predictions2 = test_df[output_feature_name2]\n ground_truth1 = np.asarray(\n [ground_truth_metadata[output_feature_name1][\"str2idx\"][prediction] for prediction in target_predictions1]\n )\n ground_truth2 = np.asarray(\n [ground_truth_metadata[output_feature_name2][\"str2idx\"][prediction] for prediction in target_predictions2]\n )\n viz_outputs = (\"pdf\", \"png\")\n for viz_output in viz_outputs:\n vis_output_pattern_pdf = os.path.join(output_dir, f\"*.{viz_output}\")\n visualize.confidence_thresholding_2thresholds_3d(\n [probability1, probability2],\n [ground_truth1, ground_truth2],\n model.training_set_metadata,\n [output_feature_name1, output_feature_name2],\n labels_limit=0,\n output_directory=output_dir,\n file_format=viz_output,\n )\n figure_cnt = glob.glob(vis_output_pattern_pdf)\n assert 1 == len(figure_cnt)", "def run_yolo(net, image, coco_classes, save_image=False):\n\n global frame, classes\n # Give the configuration and weight files for the model and load the network using them.\n classes = coco_classes\n\n frame = cv2.imread(str(image))\n\n # Crop the frame\n # (y_min, y_max) (x_min, x_max)\n # frame = frame[300:1080, 200:1920] # Classifying people\n # frame = frame[0:500, 0:1920] # Classifying Cars\n\n # Stop the program if reached end of video\n if frame is None:\n return\n\n # Create a 4D blob from a frame.\n blob = cv2.dnn.blobFromImage(\n frame, 1 / 255, (inpWidth, inpHeight), [0, 0, 0], 1, crop=False\n )\n\n # Sets the input to the network\n net.setInput(blob)\n\n # Runs the forward pass to get output of the output layers\n outs = net.forward(getOutputsNames(net))\n\n # Remove the bounding boxes with low confidence\n postprocess(frame, outs, save_image)\n\n # Get the overall time for inference(t) and the timings for each of the layers(in layersTimes)\n t, _ = net.getPerfProfile()\n label = \"Inference time: %.2f ms\" % (t * 1000.0 / cv2.getTickFrequency())\n # cv2.putText(frame, label, (0, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))\n print(label)\n\n # Save image with all bounding boxes\n # utils.write_image(frame)", "def run(self):\n r = rospy.Rate(30)\n\n net = darknet.load_net(b\"/home/nvidia/darknet/cfg/yolov3-tiny.cfg\", b\"/home/nvidia/darknet/yolov3-tiny.weights\", 0)\n meta = darknet.load_meta(b\"/home/nvidia/darknet/cfg/coco.data\")\n\n # cv2.namedWindow(\"cv2_img\", cv2.WINDOW_NORMAL)\n\n while not rospy.is_shutdown():\n if self.last_img is not None:\n self.do_stuff(net,meta)\n r.sleep()", "def detect_video(yolo_v3_model, video_path, batch_frames, output_path, train_input_size, classes_file_path, \n score_threshold, iou_threshold, num_of_anchor_bbox, strides, anchors, show = False, \n rectangle_colors = ''):\n \n # obtain number of classes\n num_of_classes = len(read_class_names(classes_file_path))\n \n # obtain VideoCapture object \n vid = cv2.VideoCapture(video_path)\n \n # obtain width, height and fps of video\n # by default VideoCapture returns float instead of int\n width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = int(vid.get(cv2.CAP_PROP_FPS))\n\n # obtain video codec\n codec = cv2.VideoWriter_fourcc(*'XVID')\n \n # obtain output_path\n # output_path must be .mp4\n out = cv2.VideoWriter(output_path, codec, fps+1, (width, height)) \n\n # create list to store images\n images = []\n \n # variable to track frame\n frame = 0 \n \n while True:\n \n try:\n \n # grabs, decodes and returns the next video frame\n _, image = vid.read()\n \n # append original image to original_images list\n images.append(image[:])\n \n # increment frame\n frame += 1\n \n \n # if current frame is less than batch_frames\n if frame < batch_frames:\n \n # move to next frame \n continue\n \n # iterate over images in chronological order (last image is image of interest to put bbox)\n for x in range(batch_frames):\n \n # convert original image to grayscale \n image = cv2.cvtColor(images[-batch_frames + x + 1], cv2.COLOR_BGR2RGB)\n \n # preprocess image\n image = transform_images(image[:], train_input_size)\n \n # obtain concat frame if none exist\n if x == 0: \n \n concat_image = image[:]\n \n # concatenate subsequent frames to concat_image\n else:\n \n concat_image = np.concatenate((concat_image, image), axis = -1)\n \n except:\n \n break\n \n # add batch dimensions to concatenated image \n concat_image = concat_image[np.newaxis, ...].astype(np.float32)\n \n # create constant tensor from concatenated image and feed it to yolo_v3_model\n batched_input = tf.constant(concat_image)\n yolo_output = yolo_v3_model(batched_input)\n\n # list to store bboxes from respective scales\n pred_bbox = []\n\n # iterate over 3 scales\n for i in range(3):\n\n # decode resepctive yolo_output from each scale\n pred_result = decode(yolo_output = yolo_output[i], num_of_anchor_bbox = num_of_anchor_bbox, \n classes = num_of_classes, strides = strides, anchors = anchors, index = i)\n\n # append to pred_bbox\n pred_bbox.append(pred_result)\n \n # obtain results of shape (:, 5 + num_classes), i.e all bboxes\n pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]\n \n # concatenate all bboxes from all scales\n pred_bbox = tf.concat(pred_bbox, axis = 0)\n\n # post process all bboxes using latest image in orignal_images\n bboxes = postprocess_boxes(pred_bbox, images[-1], train_input_size, score_threshold)\n\n # non maximal supression for bboxes\n bboxes = nms(bboxes, iou_threshold, method = 'nms')\n\n # draw bbox on latest image in orignal_images\n image = draw_bbox(images[-1], bboxes, classes_file_path, rectangle_colors = rectangle_colors)\n \n # save image frame to video path if path to save is given\n if output_path != '': out.write(image)\n \n # display image frame (i.e play video) if show is true \n if show:\n \n # show the image\n cv2.imshow('output', image)\n \n # if q key is presssed\n if cv2.waitKey(25) & 0xFF == ord(\"q\"):\n \n # end session\n cv2.destroyAllWindows()\n \n # break out of while loop\n break\n \n # When everything done, release the capture\n vid.release()\n cv2.destroyAllWindows()", "def yolo_body(inputs, num_anchors, num_classes, architecture=\"yolov4\", base_ops=DarknetConv2D_BN_Leaky):\n if architecture == \"yolov4\":\n config = get_yolo_config(\"yolov4\", num_anchors, num_classes, base_ops=base_ops)\n outputs = spatial_pyramid_block(cspdarknet_body(inputs), base_ops=base_ops) if config.spp else cspdarknet_body(\n inputs)\n body = Model(inputs, outputs)\n features = [body.layers[131].output, body.layers[204].output, body.output] # mish_37 58\n elif architecture == \"yolov4_efficientnetb0\":\n config = get_yolo_config(\"yolov4\", num_anchors, num_classes, base_ops=base_ops)\n backbone = EfficientNetB0(include_top=False, weights=None, input_tensor=inputs)\n\n outputs = spatial_pyramid_block(\n backbone.get_layer(\"top_activation\").output, base_ops=base_ops) if config.spp else backbone.get_layer(\n \"top_activation\").output\n body = Model(inputs, outputs)\n features = [body.get_layer(\"block4a_expand_activation\").output,\n body.get_layer(\"block6a_expand_activation\").output,\n body.output]\n elif architecture == \"yolov4_efficientnetb1\":\n config = get_yolo_config(\"yolov4\", num_anchors, num_classes, base_ops=base_ops)\n backbone = EfficientNetB1(include_top=False, weights=None, input_tensor=inputs, activation=\"relu\")\n outputs = spatial_pyramid_block(\n backbone.get_layer(\"top_activation\").output, base_ops=base_ops) if config.spp else backbone.get_layer(\n \"top_activation\").output\n body = Model(inputs, outputs)\n features = [body.get_layer(\"block4a_expand_activation\").output,\n body.get_layer(\"block6a_expand_activation\").output,\n body.output]\n elif architecture == \"yolov4\":\n config = get_yolo_config(\"yolov4_efficientnetb0\", num_anchors, num_classes, base_ops=base_ops)\n backbone = EfficientNetB2(include_top=False, weights=None, input_tensor=inputs, activation=\"relu\")\n outputs = spatial_pyramid_block(\n backbone.get_layer(\"top_activation\").output, base_ops=base_ops) if config.spp else backbone.get_layer(\n \"top_activation\").output\n body = Model(inputs, outputs)\n features = [body.get_layer(\"block4a_expand_activation\").output,\n body.get_layer(\"block6a_expand_activation\").output,\n body.output]\n elif architecture == \"yolov4_efficientnetliteb1\":\n config = get_yolo_config(\"yolov4\", num_anchors, num_classes, base_ops=base_ops)\n backbone = EfficientNetLiteB1(include_top=False, weights=None, input_tensor=inputs, activation=\"relu\")\n outputs = spatial_pyramid_block(\n backbone.get_layer(\"top_activation\").output, base_ops=base_ops) if config.spp else backbone.get_layer(\n \"top_activation\").output\n body = Model(inputs, outputs)\n features = [body.get_layer(\"block4a_expand_activation\").output,\n body.get_layer(\"block6a_expand_activation\").output,\n body.output]\n elif architecture == \"yolov4_efficientnetliteb2\":\n config = get_yolo_config(\"yolov4\", num_anchors, num_classes, base_ops=base_ops)\n backbone = EfficientNetLiteB2(include_top=False, weights=None, input_tensor=inputs, activation=\"relu\")\n outputs = spatial_pyramid_block(\n backbone.get_layer(\"top_activation\").output, base_ops=base_ops) if config.spp else backbone.get_layer(\n \"top_activation\").output\n body = Model(inputs, outputs)\n features = [body.get_layer(\"block4a_expand_activation\").output,\n body.get_layer(\"block6a_expand_activation\").output,\n body.output]\n elif architecture == \"yolov4_efficientnetliteb3\":\n config = get_yolo_config(\"yolov4\", num_anchors, num_classes, base_ops=base_ops)\n backbone = EfficientNetLiteB3(include_top=False, weights=None, input_tensor=inputs, activation=\"relu\")\n outputs = spatial_pyramid_block(\n backbone.get_layer(\"top_activation\").output, base_ops=base_ops) if config.spp else backbone.get_layer(\n \"top_activation\").output\n body = Model(inputs, outputs)\n features = [body.get_layer(\"block4a_expand_activation\").output,\n body.get_layer(\"block6a_expand_activation\").output,\n body.output]\n elif architecture == \"yolov4_efficientnetliteb4\":\n config = get_yolo_config(\"yolov4\", num_anchors, num_classes, base_ops=base_ops)\n backbone = EfficientNetLiteB4(include_top=False, weights=None, input_tensor=inputs, activation=\"relu\")\n outputs = spatial_pyramid_block(\n backbone.get_layer(\"top_activation\").output, base_ops=base_ops) if config.spp else backbone.get_layer(\n \"top_activation\").output\n body = Model(inputs, outputs)\n features = [body.get_layer(\"block4a_expand_activation\").output,\n body.get_layer(\"block6a_expand_activation\").output,\n body.output]\n elif architecture == \"yolov4_mobilenetv2\":\n config = get_yolo_config(\"yolov4\", num_anchors, num_classes, base_ops=base_ops)\n backbone = MobileNetV2(include_top=False, weights=None, input_tensor=inputs)\n outputs = spatial_pyramid_block(\n backbone.get_layer(\"out_relu\").output, base_ops=base_ops) if config.spp else backbone.get_layer(\n \"out_relu\").output\n body = Model(inputs, outputs)\n features = [body.get_layer(\"block_6_expand_relu\").output,\n body.get_layer(\"block_13_expand_relu\").output,\n body.output]\n elif architecture == \"yolov3_efficientnetliteb4_spp\":\n config = get_yolo_config(\"yolov3\", num_anchors, num_classes, base_ops=base_ops)\n config.agg_method = \"fpn\"\n backbone = EfficientNetLiteB4(include_top=False, weights=None, input_tensor=inputs, activation=\"relu\")\n outputs = spatial_pyramid_block(backbone.get_layer(\"top_activation\").output, base_ops=base_ops)\n body = Model(inputs, outputs)\n features = [body.get_layer(\"block4a_expand_activation\").output,\n body.get_layer(\"block6a_expand_activation\").output,\n body.output]\n elif architecture == \"yolov3_efficientnetliteb0_spp\":\n config = get_yolo_config(\"yolov3\", num_anchors, num_classes, base_ops=base_ops)\n config.agg_method = \"fpn\"\n backbone = EfficientNetB0(include_top=False, weights=None, input_tensor=inputs, activation=\"relu\")\n outputs = spatial_pyramid_block(backbone.get_layer(\"top_activation\").output, base_ops=base_ops)\n body = Model(inputs, outputs)\n features = [body.get_layer(\"block4a_expand_activation\").output,\n body.get_layer(\"block6a_expand_activation\").output,\n body.output]\n elif architecture == \"yolov3_efficientnetliteb1_spp\":\n config = get_yolo_config(\"yolov3\", num_anchors, num_classes, base_ops=base_ops)\n config.agg_method = \"fpn\"\n backbone = EfficientNetLiteB1(include_top=False, weights=None, input_tensor=inputs, activation=\"relu\")\n outputs = spatial_pyramid_block(backbone.get_layer(\"top_activation\").output, base_ops=base_ops)\n body = Model(inputs, outputs)\n features = [body.get_layer(\"block4a_expand_activation\").output,\n body.get_layer(\"block6a_expand_activation\").output,\n body.output]\n elif architecture == \"yolov3_efficientnetliteb2_spp\":\n config = get_yolo_config(\"yolov3\", num_anchors, num_classes, base_ops=base_ops)\n config.agg_method = \"fpn\"\n backbone = EfficientNetLiteB2(include_top=False, weights=None, input_tensor=inputs, activation=\"relu\")\n outputs = spatial_pyramid_block(backbone.get_layer(\"top_activation\").output, base_ops=base_ops)\n body = Model(inputs, outputs)\n features = [body.get_layer(\"block4a_expand_activation\").output,\n body.get_layer(\"block6a_expand_activation\").output,\n body.output]\n elif architecture == \"yolov3_efficientnetb0\":\n config = get_yolo_config(\"yolov3\", num_anchors, num_classes, base_ops=base_ops)\n backbone = EfficientNetB0(include_top=False, weights=None, input_tensor=inputs, activation=\"relu\")\n\n outputs = spatial_pyramid_block(\n backbone.get_layer(\"top_activation\").output, base_ops=base_ops) if config.spp else backbone.get_layer(\n \"top_activation\").output\n body = Model(inputs, outputs)\n features = [body.get_layer(\"block4a_expand_activation\").output,\n body.get_layer(\"block6a_expand_activation\").output,\n body.output]\n elif architecture == \"yolov3_efficientnetb1\":\n config = get_yolo_config(\"yolov3\", num_anchors, num_classes, base_ops=base_ops)\n backbone = EfficientNetB1(include_top=False, weights=None, input_tensor=inputs, activation=\"relu\")\n outputs = spatial_pyramid_block(\n backbone.get_layer(\"top_activation\").output, base_ops=base_ops) if config.spp else backbone.get_layer(\n \"top_activation\").output\n body = Model(inputs, outputs)\n features = [body.get_layer(\"block4a_expand_activation\").output,\n body.get_layer(\"block6a_expand_activation\").output,\n body.output]\n elif architecture == \"yolov3_efficientnetb1_spp\":\n config = get_yolo_config(\"yolov3\", num_anchors, num_classes, base_ops=base_ops)\n config.spp = True\n backbone = EfficientNetLiteB1(include_top=False, weights=None, input_tensor=inputs, activation=\"relu\")\n outputs = spatial_pyramid_block(\n backbone.get_layer(\"top_activation\").output, base_ops=base_ops) if config.spp else backbone.get_layer(\n \"top_activation\").output\n body = Model(inputs, outputs)\n features = [body.get_layer(\"block4a_expand_activation\").output,\n body.get_layer(\"block6a_expand_activation\").output,\n body.output]\n elif architecture == \"yolov3_efficientnetliteb1\":\n config = get_yolo_config(\"yolov3\", num_anchors, num_classes, base_ops=base_ops)\n backbone = EfficientNetLiteB1(include_top=False, weights=None, input_tensor=inputs, activation=\"relu\")\n outputs = spatial_pyramid_block(\n backbone.get_layer(\"top_activation\").output, base_ops=base_ops) if config.spp else backbone.get_layer(\n \"top_activation\").output\n body = Model(inputs, outputs)\n features = [body.get_layer(\"block4a_expand_activation\").output,\n body.get_layer(\"block6a_expand_activation\").output,\n body.output]\n elif architecture == \"yolov3_mobilenetv2_spp\":\n config = get_yolo_config(\"yolov3\", num_anchors, num_classes, base_ops=base_ops)\n config.agg_method = \"fpn\"\n backbone = MobileNetV2(include_top=False, weights=None, input_tensor=inputs)\n outputs = spatial_pyramid_block(\n backbone.get_layer(\"out_relu\").output, base_ops=base_ops) if config.spp else backbone.get_layer(\n \"out_relu\").output\n body = Model(inputs, outputs)\n features = [body.get_layer(\"block_6_expand_relu\").output,\n body.get_layer(\"block_13_expand_relu\").output,\n body.output]\n else:\n config = get_yolo_config(\"yolov3\", num_anchors, num_classes, base_ops=base_ops)\n # print(config)\n outputs = spatial_pyramid_block(darknet_body(inputs), base_ops=base_ops) if config.spp else darknet_body(inputs)\n body = Model(inputs, outputs)\n features = [body.layers[92].output, body.layers[152].output, body.output]\n pass\n # print(config.agg_method)\n if config.agg_method == \"panet\":\n new_features = pan_network(features, config)\n y1, y2, y3 = new_features[::-1]\n else:\n new_features = fpn_network(features, config)\n y1, y2, y3 = new_features[::-1]\n\n return Model(inputs, [y1, y2, y3])", "def get_mobilenet_v3(model_name:str, pretrained=True, **kwargs) -> nn.Module:\n\n mbconfig = partial(MBConvConfig, depth_mult=1.0, width_mult=1.0, norm_layer=nn.BatchNorm2d,\n se_act2=partial(nn.Hardsigmoid, inplace=True), se_reduction_ratio=4, se_reduce_mode='adjust')\n\n if model_name == 'mobilenet_v3_large':\n residual_config = [\n # expand k s in out layers act\n mbconfig(1, 3, 1, 16, 16, 1, act=nn.ReLU, use_se=False),\n mbconfig(4, 3, 2, 16, 24, 1, act=nn.ReLU, use_se=False),\n mbconfig(3, 3, 1, 24, 24, 1, act=nn.ReLU, use_se=False),\n mbconfig(3, 5, 2, 24, 40, 1, act=nn.ReLU, use_se=True),\n mbconfig(3, 5, 1, 40, 40, 2, act=nn.ReLU, use_se=True),\n mbconfig(6, 3, 2, 40, 80, 1, act=nn.Hardswish, use_se=False),\n mbconfig(2.5, 3, 1, 80, 80, 1, act=nn.Hardswish, use_se=False),\n mbconfig(2.3, 3, 1, 80, 80, 1, act=nn.Hardswish, use_se=False),\n mbconfig(2.3, 3, 1, 80, 80, 1, act=nn.Hardswish, use_se=False),\n mbconfig(6, 3, 1, 80, 112, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 3, 1, 112, 112, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 5, 2, 112, 160, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 5, 1, 160, 160, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 5, 1, 160, 160, 1, act=nn.Hardswish, use_se=True),\n ]\n last_channel = 1280\n elif model_name == 'mobilenet_v3_small':\n residual_config = [\n # expand k s in out layers act\n mbconfig(1, 3, 2, 16, 16, 1, act=nn.ReLU, use_se=True),\n mbconfig(4.5, 3, 2, 16, 24, 1, act=nn.ReLU, use_se=False),\n mbconfig(3.5, 3, 1, 24, 24, 1, act=nn.ReLU, use_se=False),\n mbconfig(4, 5, 2, 24, 40, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 5, 1, 40, 40, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 5, 1, 40, 40, 1, act=nn.Hardswish, use_se=True),\n mbconfig(3, 5, 1, 40, 48, 1, act=nn.Hardswish, use_se=True),\n mbconfig(3, 5, 1, 48, 48, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 5, 2, 48, 96, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 5, 1, 96, 96, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 5, 1, 96, 96, 1, act=nn.Hardswish, use_se=True),\n ]\n last_channel = 1024\n\n model = MobileNetV3(residual_config, last_channel=last_channel, block=MBConvSE, act_layer=nn.Hardswish, norm_layer=nn.BatchNorm2d)\n\n mobilenet_v2_init(model)\n\n if pretrained:\n load_from_zoo(model, model_name)\n\n return model", "def forward(self, x):\n for name, module in self.base._modules.items():\n if name == 'avgpool':\n break\n\n if name == 'layer3':\n l2 = Variable(x)\n\n x = Variable(module(x))\n l4 = Variable(x)\n\n \"\"\"for name, param in self.base.named_parameters():\n print(name, param.size())\n\n res50_model = self.base\n res50_conv2 = ResNet50Bottom(res50_model)\n for i,child in enumerate(self.base.children()):\n print(i)\n if i==8:\n l4=x\n break\n if i==6:\n l2=x\n x=res50_conv2(x.detach())\"\"\"\n\n s2 = l2.sum(1) #/ 100\n #\n s4 = l4.sum(1) #/ 1000\n\n\n sw2 = s2 / (s2.view(x.size(0), -1).sum(1)).unsqueeze(1).unsqueeze(2)\n\n sw4 = s4 / (s4.view(x.size(0), -1).sum(1)).unsqueeze(1).unsqueeze(2)\n\n\n l2 = l2 * sw2.unsqueeze(1)\n l4 = l4 * sw4.unsqueeze(1)\n\n \n c2 = self.inconv2(l2)\n c4 = self.inconv4(l4)\n c2 = self.bn2(c2)\n c4 = self.bn4(c4)\n \n n2 = F.softmax(torch.mean(torch.mean(c2, dim=2), dim=2), dim=1)\n n4 = F.softmax(torch.mean(torch.mean(c4, dim=2), dim=2), dim=1)\n nn2 = n2.data.cpu().numpy()\n nn4 = n4.data.cpu().numpy()\n cam2 = np.zeros((x.size(0), 28, 28), dtype=float)\n cam4 = np.zeros((x.size(0), 7, 7), dtype=float)\n\n\n for i in range(0, x.size(0)):\n for j in range(0, 2):\n temp1 = c2[i, j, :, :].data.cpu().numpy()\n temp1 = np.maximum(temp1, 0)\n temp1 = temp1 - np.min(temp1)\n temp1 = temp1 / (np.max(temp1)+1e-8)\n cam2[i] = cam2[i] + nn2[i, j] * temp1\n cam2 = torch.FloatTensor(cam2)\n l2 = l2 * (cam2.unsqueeze(1).cuda())\n l2 = self.stack1(l2)\n l2 = self.stack1_1(l2)\n\n for i in range(0, x.size(0)):\n for j in range(0, 8):\n temp2 = c4[i, j, :, :].data.cpu().numpy()\n temp2 = np.maximum(temp2, 0)\n temp2 = temp2 - np.min(temp2)\n temp2 = temp2 / (np.max(temp2)+1e-8)\n cam4[i] =cam4[i] + nn4[i, j] * temp2\n cam4 = torch.FloatTensor(cam4)\n l4 = l4 * cam4.unsqueeze(1).cuda()\n l4 = self.stack3(l4)\n X = l2.view(x.size(0), 512, 7 ** 2)\n Y = l4.view(x.size(0), 512, 7 ** 2)\n Z = self.cross_bilinear(X, Y)\n return n2, n4, Z", "def L14_Net112(mode=\"train\"):\n data = mx.symbol.Variable(name=\"data\")\n landmark_target = mx.symbol.Variable(name=\"landmark_target\")\n landmark_vis = mx.symbol.Variable(name=\"landmark_vis\")\n \n # data = 112X112\n # conv1 = 56X56\n conv1 = Conv(data, num_filter=res_base_dim, kernel=(3, 3), pad=(1, 1), stride=(2, 2), name=\"conv1\")\n conv2 = Residual(conv1, num_block=1, num_out= res_base_dim, kernel=(3, 3), stride=(1, 1), pad=(1, 1), num_group=res_base_dim, name=\"res2\")\n \n\t#conv23 = 28X28\n conv23 = DResidual(conv2, num_out=res_base_dim*2, kernel=(3, 3), stride=(2, 2), pad=(1, 1), num_group=res_base_dim*2, name=\"dconv23\")\n conv3 = Residual(conv23, num_block=2, num_out=res_base_dim*2, kernel=(3, 3), stride=(1, 1), pad=(1, 1), num_group=res_base_dim*2, name=\"res3\")\n \n\t#conv34 = 14X14\n conv34 = DResidual(conv3, num_out=res_base_dim*4, kernel=(3, 3), stride=(2, 2), pad=(1, 1), num_group=res_base_dim*4, name=\"dconv34\")\n conv4 = Residual(conv34, num_block=3, num_out=res_base_dim*4, kernel=(3, 3), stride=(1, 1), pad=(1, 1), num_group=res_base_dim*4, name=\"res4\")\n \n\t#conv45 = 7X7\n conv45 = DResidual(conv4, num_out=res_base_dim*8, kernel=(3, 3), stride=(2, 2), pad=(1, 1), num_group=res_base_dim*8, name=\"dconv45\")\n conv5 = Residual(conv45, num_block=2, num_out=res_base_dim*8, kernel=(3, 3), stride=(1, 1), pad=(1, 1), num_group=res_base_dim*8, name=\"res5\")\n \n\t# conv6 = 1x1\n conv6 = Conv(conv5, num_filter=res_base_dim*8, kernel=(7, 7), pad=(0, 0), stride=(1, 1), name=\"conv6\")\n fc1 = Conv(conv6, num_filter=res_base_dim*16, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name=\"fc1\")\n fc2 = Conv(fc1, num_filter=res_base_dim*32, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name=\"fc2\")\t\n conv6_3 = mx.symbol.FullyConnected(data=fc2, num_hidden=42, name=\"conv6_3\")\t\n bn6_3 = mx.sym.BatchNorm(data=conv6_3, name='bn6_3', fix_gamma=False,momentum=0.9)\n\t\n if mode == \"test\":\n landmark_pred = bn6_3\n group = mx.symbol.Group([landmark_pred])\n else:\n \n out = mx.symbol.Custom(landmark_vis = landmark_vis, landmark_pred=bn6_3, landmark_target=landmark_target, \n op_type='negativemining_hand21', name=\"negative_mining\")\n group = mx.symbol.Group([out])\n \n return group", "def yolo_forward(net, LABELS, image, confidence_level, save_image=False):\n\n # initialize a list of colors to represent each possible class label\n np.random.seed(42)\n colors = np.random.randint(0, 255, size=(10000, 3),\n dtype='uint8')\n\n # grab image spatial dimensions\n (H, W) = image.shape[:2]\n\n # determine only the *output* layer names that we need from YOLO\n ln = net.getLayerNames()\n ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\n # construct a blob from the input image and then perform a forward\n # pass of the YOLO object detector, giving us our bounding boxes and\n # associated probabilities\n # also time it\n blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416),\n swapRB=True, crop=False)\n net.setInput(blob)\n start = time.time()\n layer_outputs = net.forward(ln)\n end = time.time()\n\n # show timing information on YOLO\n print('[INFO] YOLO took {:.6f} seconds'.format(end - start))\n\n # initialize our lists of detected bounding boxes, confidences, and\n # class IDs, respectively\n boxes = []\n confidences = []\n class_ids = []\n\n # loop over each of the layer outputs\n for output in layer_outputs:\n # loop over each of the detections\n for detection in output:\n # extract the class ID and confidence (i.e., probability) of\n # the current object detection\n scores = detection[5:]\n class_id = np.argmax(scores)\n confidence = scores[class_id]\n\n # filter out weak predictions by ensuring the detected\n # probability is greater than the minimum probability\n if confidence > confidence_level:\n # scale the bounding box coordinates back relative to the\n # size of the image, keeping in mind that YOLO actually\n # returns the center (x, y)-coordinates of the bounding\n # box followed by the boxes' width and height\n box = detection[0:4] * np.array([W, H, W, H])\n (centerX, centerY, width, height) = box.astype('int')\n\n # use the center (x, y)-coordinates to derive the top and\n # and left corner of the bounding box\n x = int(centerX - (width / 2))\n y = int(centerY - (height / 2))\n\n # update our list of bounding box coordinates, confidences,\n # and class IDs\n boxes.append([x, y, int(width), int(height)])\n confidences.append(float(confidence))\n class_ids.append(class_id)\n\n # apply non-maxima suppression to suppress weak, overlapping bounding\n # boxes\n # idxs = cv2.dnn.NMSBoxes(boxes, confidences, confidence_level, threshold)\n\n print(class_ids)\n print(LABELS)\n # print(labels)\n\n labels = [LABELS[i] for i in class_ids]\n\n if save_image:\n yolo_save_img(image, class_ids, boxes, labels, confidences, colors, 'python_predictions.jpg')\n\n return class_ids, labels, boxes, confidences", "def get_yolo_net(cfg_path, weight_path):\n\n if not cfg_path or not weight_path:\n raise Exception('missing inputs. See file.')\n\n print('[INFO] loading YOLO from disk...')\n net = cv2.dnn.readNetFromDarknet(cfg_path, weight_path)\n\n return net", "def VLocNet_full(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, classes=1000): # pooling=None,\n\n if weights not in {'imagenet', None}:\n raise ValueError('The `weights` argument should be either '\n '`None` (random initialization) or `imagenet` '\n '(pre-training on ImageNet).')\n\n if weights == 'imagenet' and include_top and classes != 1000:\n raise ValueError('If using `weights` as imagenet with `include_top`'\n ' as true, `classes` should be 1000')\n\n # Determine proper input shape\n # input_shape = _obtain_input_shape(input_shape,\n # default_size=224,\n # min_size=197,\n # data_format=K.image_data_format(),\n # include_top=include_top)\n #\n # if input_tensor is None:\n # img_input = Input(shape=input_shape)\n # else:\n # if not K.is_keras_tensor(input_tensor):\n # img_input = Input(tensor=input_tensor, shape=input_shape)\n # else:\n # img_input = input_tensor\n # if K.image_data_format() == 'channels_last':\n # bn_axis = 3\n # else:\n # bn_axis = 1\n\n # 1st branch for the t-1 odometry regression\n input_odo_0 = Input(shape=(224, 224, 3), name='input_odo_0')\n\n odo_1_0 = ResNet_50_unit_1(input_tensor=input_odo_0, bn_axis=3, activation='elu', strides=(2, 2), branch='_odo_t_p')\n\n odo_2_0 = ResNet_50_unit_2(input_tensor=odo_1_0, activation='elu', strides=(1, 1), branch='_odo_t_p')\n\n odo_3_0 = ResNet_50_unit_3(input_tensor=odo_2_0, activation='elu', branch='_odo_t_p')\n\n odo_4_0 = ResNet_50_unit_4(input_tensor=odo_3_0, activation='elu', branch='_odo_t_p')\n\n # 2nd branch for the t odometry regression\n input_odo_1 = Input(shape=(224, 224, 3), name='input_odo_1')\n\n odo_1_1 = ResNet_50_unit_1(input_tensor=input_odo_1, bn_axis=3, activation='elu', strides=(2, 2), branch='_odo_t')\n\n odo_2_1 = ResNet_50_unit_2(input_tensor=odo_1_1, activation='elu', strides=(1, 1), branch='_odo_t')\n\n odo_3_1 = ResNet_50_unit_3(input_tensor=odo_2_1, activation='elu', branch='_odo_t')\n\n odo_4_1 = ResNet_50_unit_4(input_tensor=odo_3_1, activation='elu', branch='_odo_t')\n\n # Concatenate the features from 1st and 2nd branches\n conca = concatenate([odo_4_0, odo_4_1], name='conca')\n\n odo_5 = ResNet_50_unit_5(input_tensor=conca, activation='elu', branch='_odo_all')\n\n # avg_pool = AveragePooling2D((7, 7), name='avg_pool')(odo_5)\n\n odo_glo_ave = GlobalAveragePooling2D()(odo_5)\n\n odo_fc_1 = Dense(1024, name='odo_fc_1')(odo_glo_ave)\n\n odo_fc_2 = Dense(3, name='odo_fc_2')(odo_fc_1)\n odo_fc_3 = Dense(4, name='odo_fc_3')(odo_fc_1)\n\n odo_merge = concatenate([odo_fc_2, odo_fc_3], name='odo_merge') # Modification\n\n # The network branch for the Pose part:\n\n pose_4 = ResNet_50_unit_4(input_tensor=odo_3_1, activation='elu', branch='_geo')\n\n pose_5 = ResNet_50_unit_5(input_tensor=pose_4, activation='elu', branch='_geo')\n\n pose_glo_ave = GlobalAveragePooling2D()(pose_5)\n\n pose_fc_1 = Dense(1024, name='pose_fc_1')(pose_glo_ave)\n\n pose_fc_2 = Dense(3, name='pose_fc_2')(pose_fc_1)\n pose_fc_3 = Dense(4, name='pose_fc_3')(pose_fc_1)\n\n pose_merge = concatenate([odo_fc_2, odo_fc_3, pose_fc_2, pose_fc_3], name='pose_merge') # Modification\n\n # Create model.\n # model = Model(input=[input_odo_0, input_odo_1], output=[odo_fc_2, odo_fc_3, pose_fc_2, pose_fc_3],\n # name='VLocNet_full')\n\n # changed the model from 4 outputs to 2 outputs\n model = Model(input=[input_odo_0, input_odo_1], output=[odo_merge, pose_merge], name='VLocNet_full')\n\n # load weights\n if weights == 'imagenet':\n if include_top:\n weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels.h5',\n WEIGHTS_PATH,\n cache_subdir='models',\n md5_hash='a7b3fe01876f51b976af0dea6bc144eb')\n else:\n weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',\n WEIGHTS_PATH_NO_TOP,\n cache_subdir='models',\n md5_hash='a268eb855778b3df3c7506639542a6af')\n model.load_weights(weights_path)\n if K.backend() == 'theano':\n layer_utils.convert_all_kernels_in_model(model)\n\n if K.image_data_format() == 'channels_first':\n if include_top:\n maxpool = model.get_layer(name='avg_pool')\n shape = maxpool.output_shape[1:]\n dense = model.get_layer(name='fc1000')\n layer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first')\n\n if K.backend() == 'tensorflow':\n warnings.warn('You are using the TensorFlow backend, yet you '\n 'are using the Theano '\n 'image data format convention '\n '(`image_data_format=\"channels_first\"`). '\n 'For best performance, set '\n '`image_data_format=\"channels_last\"` in '\n 'your Keras config '\n 'at ~/.keras/keras.json.')\n return model", "def bc_train_nvidia():\n\timg_rows,img_cols = 64,64\n\tinput_shape = (img_rows,img_cols,3)\n\n\t# the model\t\n\tmodel = bc_nvidia_model(input_shape = input_shape)\n\n\t\n\timg_dim = (img_rows,img_cols)\n\n\t# reading the drivelog\t\n\tcsv_data = pd.read_csv(data_path+csv_path,usecols=[\"center\",\"left\",\"right\",\"steering\"])\n\n\tthreshold = 1\n\tbatch_size = 240\n\tepochs = 6\n\tyvals = []\n\n\tfor i in range(epochs):\n\t\tgen = generate_data_train(data_path,csv_data,img_dim,batch_size,threshold,yvals)\n\t\t\n\t\tmodel.fit_generator(gen, samples_per_epoch = 24000, nb_epoch = 1, verbose = 1)\n\n\t\t# thresholding against values close to 0 to balance the data\n\t\tthreshold = 1/(i+1)\n\t\n\t# serialize model to JSON\n\tmodel_json = model.to_json()\n\twith open(\"model.json\", \"w\") as json_file:\n\t json_file.write(model_json)\n\t# serialize weights to HDF5\n\tmodel.save_weights(\"model.h5\")\n\twith open(\"s_angles\",\"wb\") as y_file:\n\t\tpickle.dump(yvals,y_file)\n\treturn", "def build_predictor(self):\n if self.library == \"yolov5\":\n self.predictor = torch.hub.load('ultralytics/yolov5', self.model_name)\n self.predictor.iou = self.nms_thresh # NMS IoU threshold (0-1)\n if self.library == \"detectron2\":\n cfg = get_cfg()\n cfg.merge_from_file(model_zoo.get_config_file(self.model_name))\n cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model\n cfg.MODEL.NMS_THRESH = self.nms_thresh # NMS IoU threshold\n if self.device == \"cpu\": cfg.MODEL.DEVICE = self.device\n # Find a model from detectron2's model zoo. You can use the https://dl.fbaipublicfiles... url as well\n cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(self.model_name)\n self.predictor = DefaultPredictor(cfg)", "def __init__(self, args):\n \n super(MicroNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 1, kernel_size=1)\n self.conv2 = nn.Conv2d(1, 29, kernel_size=5)\n self.maxpool2 = nn.MaxPool2d(3, stride=2 , ceil_mode=True)\n self.conv3 = nn.Conv2d(29, 59, kernel_size=3)\n self.maxpool3 = nn.MaxPool2d(3, stride=2 , ceil_mode=True)\n self.conv4 = nn.Conv2d(59, 74, kernel_size=3)\n self.maxpool4 = nn.MaxPool2d(3, stride=2 , ceil_mode=True)\n self.conv2_drop = nn.Dropout2d()\n self.conv3_drop = nn.Dropout2d()\n self.fc1 = nn.Linear(1184, 300)\n self.fc2 = nn.Linear(300, args.num_classes)\n self.conv0_bn = nn.BatchNorm2d(3)\n self.conv1_bn = nn.BatchNorm2d(1)\n self.conv2_bn = nn.BatchNorm2d(29)\n self.conv3_bn = nn.BatchNorm2d(59)\n self.conv4_bn = nn.BatchNorm2d(74)\n self.dense1_bn = nn.BatchNorm1d(300)", "def model(x_crop, y_, reuse):\n with tf.variable_scope(\"model\", reuse=reuse):\n net = tl.layers.InputLayer(x_crop, name='input')\n output1 = tl.layers.Conv2d(net, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', name='cnn1')\n net = tl.layers.MaxPool2d(output1, (3, 3), (2, 2), padding='SAME', name='pool1')\n output2 = tl.layers.Conv2d(net, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', name='cnn2')\n net = tl.layers.MaxPool2d(output2, (3, 3), (2, 2), padding='SAME', name='pool2')\n net = tl.layers.FlattenLayer(net, name='flatten')\n output3 = tl.layers.DenseLayer(net, 384, act=tf.nn.relu, name='d1relu')\n output4 = tl.layers.DenseLayer(output3, 192, act=tf.nn.relu, name='d2relu')\n output5 = tl.layers.DenseLayer(output4, 10, act=None, name='output')\n\n return output1.outputs, output2.outputs, output3.outputs, output4.outputs, output5.outputs, output5", "def mgcNetArch(self, **kwargs):\n\n def_vals = {\"input_img_rows\" : self.input_img_rows,\n \"input_img_cols\" : self.input_img_cols,\n \"channels\" : self.channels,\n \"nb_classes\" : self.nb_classes,\n \"outLayer\" : 'gloAvg', \n \"l2_val\" : 0.00, \n \"net_architr\" : 'cnn_max', \n \"block_typex\" : 'basic', \n \"block_repeatx\" : [1, 1]\n }\n\n\n for k, v in def_vals.items():\n kwargs.setdefault(k, v)\n\n _input_img_rows = kwargs['input_img_rows']\n _input_img_cols = kwargs['input_img_cols']\n _channels = kwargs['channels']\n _nb_classes = kwargs['nb_classes']\n _outLayer = kwargs['outLayer']\n _l2_val = kwargs['l2_val']\n _net_architr = kwargs['net_architr']\n _block_typex = kwargs['block_typex']\n _block_repeatx = kwargs['block_repeatx']\n \n \n params = {\"input_img_rows\" : _input_img_rows,\n \"input_img_cols\" : _input_img_cols,\n \"channels\" : _channels,\n \"nb_classes\" : _nb_classes\n }\n \n print(_net_architr)\n if _net_architr == 'cnn_max':\n model = mgcNetArchMax(outLayer = _outLayer, l2_val = _l2_val, **params)\n \n elif _net_architr == 'cnn_stride':\n model = mgcNetArchStride2(outLayer = _outLayer, l2_val = _l2_val, **params)\n\n elif _net_architr == 'cnn_stride_mini':\n model = mgcNetArchStride2Mini(outLayer = _outLayer, l2_val = _l2_val, **params)\n\n elif _net_architr == 'common_cnn':\n model = mgcNetArchCommonCnn(outLayer = _outLayer, l2_val = _l2_val, **params)\n \n elif _net_architr == 'net_in_net':\n model = mgcNetArchNin(outLayer = _outLayer, l2_val = _l2_val, **params)\n \n elif _net_architr == 'resnet':\n model = mgcResnet(block_type = _block_typex, block_repeat = _block_repeatx, **params)\n \n elif _net_architr == 'resblock':\n model = mgcNetArchRes(outLayer = _outLayer, l2_val = _l2_val, **params)\n\n elif _net_architr == 'skipconnect':\n model = mgcNetArchSkip(outLayer = _outLayer, l2_val = _l2_val, **params)\n elif _net_architr == 'skipconnect_mini':\n model = mgcNetArchSkipMini(outLayer = _outLayer, l2_val = _l2_val, **params)\n \n self.model = model\n self.plot_model = SVG(model_to_dot(model, show_shapes = True).create(prog='dot', format='svg'))\n #self.model_summary = model.summary() \n \n return self", "def train_mobilenetv2():\n\n # load data\n training_sets = load_augmented_dataset()\n\n # build models\n model_mobile = build_mobilenetv2()\n\n # store base weights\n baseWeights_t = model_mobile.get_weights()\n\n # NOTE: You can still leave this alone if you've only downloaded the fully augmented set.\n for training_set in training_sets:\n print(\" Starting training for set {}\".format(str(training_set)))\n model_mobile.set_weights(baseWeights_t) # Resets model\n train_x = np.load(os.path.join(\"./model_cache/train_data\", training_sets[training_set][0]))\n train_y = np.load(os.path.join(\"./model_cache/train_data\", training_sets[training_set][1]))\n\n early_stopping_monitor = EarlyStopping(patience=2)\n history = model_mobile.fit(train_x, train_y, batch_size=32, epochs=20, verbose=1, validation_split=0.2,\n shuffle=True,\n callbacks=[early_stopping_monitor])\n\n mpu.plot_accuracy_loss(history,\n \"./model_cache/train_data/{}_mobilenetv2_plots.png\".format(str(training_set)))\n\n upload_blob(BUCKET_NAME, \"./model_cache/train_data/{}_mobilenetv2_plots.png\".format(str(training_set)),\n \"model_charts/{}_mobilenetv2_plots.png\".format(str(training_set)))\n\n model_mobile.save(\"./model_cache/train_data/{}_mobilenetv2.h5\".format(str(training_set)))\n\n upload_blob(BUCKET_NAME, \"./model_cache/train_data/{}_mobilenetv2.h5\".format(str(training_set)),\n \"saved_models/{}_mobilenetv2.h5\".format(str(training_set)))", "def yolo_body(inputs, num_anchors, num_classes):\n darknet = Model(inputs, darknet_body(inputs))\n x, y1 = make_last_layers(darknet.output, 512, num_anchors*(num_classes+5))\n \n x = compose(\n DarknetConv2D_BN_Leaky(256, (1,1)),\n UpSampling2D(2))(x)\n x = Concatenate()([x,darknet.layers[148].output])\n x, y2 = make_last_layers(x, 256, num_anchors*(num_classes+5))\n \n x = compose(\n DarknetConv2D_BN_Leaky(128, (1,1)),\n UpSampling2D(2))(x)\n x = Concatenate()([x,darknet.layers[89].output])\n x, y3 = make_last_layers(x, 128, num_anchors*(num_classes+5))\n \n return Model(inputs, [y1,y2,y3])", "def __init__(self,\n mobilenet_v2_net,\n channel_means=(0., 0., 0.),\n channel_stds=(1., 1., 1.),\n bgr_ordering=False):\n\n super(CenterNetMobileNetV2FeatureExtractor, self).__init__(\n channel_means=channel_means,\n channel_stds=channel_stds,\n bgr_ordering=bgr_ordering)\n self._network = mobilenet_v2_net\n\n output = self._network(self._network.input)\n\n # MobileNet by itself transforms a 224x224x3 volume into a 7x7x1280, which\n # leads to a stride of 32. We perform upsampling to get it to a target\n # stride of 4.\n for num_filters in [256, 128, 64]:\n # 1. We use a simple convolution instead of a deformable convolution\n conv = tf.keras.layers.Conv2D(\n filters=num_filters, kernel_size=1, strides=1, padding='same')\n output = conv(output)\n output = tf.keras.layers.BatchNormalization()(output)\n output = tf.keras.layers.ReLU()(output)\n\n # 2. We use the default initialization for the convolution layers\n # instead of initializing it to do bilinear upsampling.\n conv_transpose = tf.keras.layers.Conv2DTranspose(\n filters=num_filters, kernel_size=3, strides=2, padding='same')\n output = conv_transpose(output)\n output = tf.keras.layers.BatchNormalization()(output)\n output = tf.keras.layers.ReLU()(output)\n\n self._network = tf.keras.models.Model(\n inputs=self._network.input, outputs=output)", "def __init__(self, data_cfg, pipeline_cfg, root_path, sel_index=0):\n\n super(DetRetailOneDataset, self).__init__(\n data_cfg, pipeline_cfg, root_path, sel_index\n )\n\n self.cat2label = {cat: i for i, cat in enumerate(self.class_names)}\n self.ORI_CLASSES = (\n \"asamu\",\n \"baishikele\",\n \"baokuangli\",\n \"aoliao\",\n \"bingqilinniunai\",\n \"chapai\",\n \"fenda\",\n \"guolicheng\",\n \"haoliyou\",\n \"heweidao\",\n \"hongniu\",\n \"hongniu2\",\n \"hongshaoniurou\",\n \"kafei\",\n \"kaomo_gali\",\n \"kaomo_jiaoyan\",\n \"kaomo_shaokao\",\n \"kaomo_xiangcon\",\n \"kele\",\n \"laotansuancai\",\n \"liaomian\",\n \"lingdukele\",\n \"maidong\",\n \"mangguoxiaolao\",\n \"moliqingcha\",\n \"niunai\",\n \"qinningshui\",\n \"quchenshixiangcao\",\n \"rousongbing\",\n \"suanlafen\",\n \"tangdaren\",\n \"wangzainiunai\",\n \"weic\",\n \"weitanai\",\n \"weitaningmeng\",\n \"wulongcha\",\n \"xuebi\",\n \"xuebi2\",\n \"yingyangkuaixian\",\n \"yuanqishui\",\n \"xuebi-b\",\n \"kebike\",\n \"tangdaren3\",\n \"chacui\",\n \"heweidao2\",\n \"youyanggudong\",\n \"baishikele-2\",\n \"heweidao3\",\n \"yibao\",\n \"kele-b\",\n \"AD\",\n \"jianjiao\",\n \"yezhi\",\n \"libaojian\",\n \"nongfushanquan\",\n \"weitanaiditang\",\n \"ufo\",\n \"zihaiguo\",\n \"nfc\",\n \"yitengyuan\",\n \"xianglaniurou\",\n \"gudasao\",\n \"buding\",\n \"ufo2\",\n \"damaicha\",\n \"chapai2\",\n \"tangdaren2\",\n \"suanlaniurou\",\n \"bingtangxueli\",\n \"weitaningmeng-bottle\",\n \"liziyuan\",\n \"yousuanru\",\n \"rancha-1\",\n \"rancha-2\",\n \"wanglaoji\",\n \"weitanai2\",\n \"qingdaowangzi-1\",\n \"qingdaowangzi-2\",\n \"binghongcha\",\n \"aerbeisi\",\n \"lujikafei\",\n \"kele-b-2\",\n \"anmuxi\",\n \"xianguolao\",\n \"haitai\",\n \"youlemei\",\n \"weiweidounai\",\n \"jindian\",\n \"3jia2\",\n \"meiniye\",\n \"rusuanjunqishui\",\n \"taipingshuda\",\n \"yida\",\n \"haochidian\",\n \"wuhounaicha\",\n \"baicha\",\n \"lingdukele-b\",\n \"jianlibao\",\n \"lujiaoxiang\",\n \"3+2-2\",\n \"luxiangniurou\",\n \"dongpeng\",\n \"dongpeng-b\",\n \"xianxiayuban\",\n \"niudufen\",\n \"zaocanmofang\",\n \"wanglaoji-c\",\n \"mengniu\",\n \"mengniuzaocan\",\n \"guolicheng2\",\n \"daofandian1\",\n \"daofandian2\",\n \"daofandian3\",\n \"daofandian4\",\n \"yingyingquqi\",\n \"lefuqiu\",\n )", "def __init__(self, generator, tgt_vocab,\n normalization=\"sents\",\n label_smoothing=0.0,\n use_kl_annealing=False,\n use_kl_freebits=False,\n kl_freebits_margin=0.0,\n kl_annealing_current=0.0,\n kl_annealing_increment=0.0001,\n kl_annealing_warmup_steps=1000,\n image_loss_type='logprob',\n use_local_image_features=False,\n two_step_image_prediction=False\n ):\n self.multimodal_model_type = 'vi-model1'\n\n super(NMTVIModel1LossCompute, self).__init__(generator, tgt_vocab,\n normalization, label_smoothing)\n\n # kl annealing parameters\n self.n_model_updates = 0\n self.use_kl_annealing = use_kl_annealing\n if use_kl_annealing:\n self.kl_annealing_current = kl_annealing_current\n self.kl_annealing_increment = kl_annealing_increment\n self.kl_annealing_warmup_steps = kl_annealing_warmup_steps\n else:\n self.kl_annealing_current = 1.0\n self.kl_annealing_increment = 0.0\n self.kl_annealing_warmup_steps = 0\n\n self.use_kl_freebits = use_kl_freebits\n if use_kl_freebits:\n self.kl_freebits_margin = kl_freebits_margin\n else:\n self.kl_freebits_margin = 0.0\n\n self.image_loss_type = image_loss_type\n self.use_local_image_features = use_local_image_features\n self.two_step_image_prediction = two_step_image_prediction\n self._statistics = onmt.VIStatistics\n\n if image_loss_type == 'categorical':\n self.image_loss_criterion = nn.NLLLoss2d()", "def loss_func(pred, conv, label, bboxes, num_classes, train_input_size, iou_loss_threshold):\n \"\"\" giou replaces l2 norm losses of x, y, w, h as an improvement from original yolo_v3 \"\"\"\n \n # obtain number of classes\n num_classes = num_classes\n \n # obtain shape of raw yolo_v3 output (pre-decode)\n conv_shape = tf.shape(conv)\n \n # obtain batch size of raw yolo_v3 output (pre-decode)\n batch_size = conv_shape[0]\n \n # obtain output size of raw yolo_v3 output (pre-decode)\n output_size = conv_shape[1]\n \n # obtain train input size\n train_input_size = tf.cast(train_input_size, tf.float32)\n \n # reshape raw conv output \n conv = tf.reshape(conv, (batch_size, output_size, output_size, 3, 5 + num_classes))\n \n # obtain objectiveness scores and class probabilites for batch from raw conv output\n conv_raw_objectiveness = conv[:, :, :, :, 4:5]\n conv_raw_prob = conv[:, :, :, :, 5:]\n \n # obtain predicted x, y, w, h and objectiveness scores for batch based on train_input_size post decode\n pred_xywh = pred[:, :, :, :, 0:4]\n pred_conf = pred[:, :, :, :, 4:5]\n \n # obtain label x, y, w, h and objectiveness scores for batch based on train_input_size\n label_xywh = label[:, :, :, :, 0:4]\n respond_bbox = label[:, :, :, :, 4:5]\n label_prob = label[:, :, :, :, 5:]\n \n # obtain giou between predictions and labels \n giou = tf.expand_dims(bbox_giou(pred_xywh, label_xywh), axis = -1)\n\n # loss factor that gives higher weight to smaller boxes \n bbox_loss_scale = 2.0 - 1.0 * label_xywh[:, :, :, :, 2:3] * label_xywh[:, :, :, :, 3:4] / (train_input_size ** 2)\n \n # obtain giou loss \n giou_loss = respond_bbox * bbox_loss_scale * (1 - giou)\n \n # obtain iou between predictions and labels \n iou = bbox_iou(pred_xywh[:, :, :, :, np.newaxis, :], bboxes[:, np.newaxis, np.newaxis, np.newaxis, :, :])\n \n # find the value of iou with the largest prediction box\n max_iou = tf.reduce_max(iou, axis = -1, keepdims = True)\n\n # if the largest iou is less than the threshold, it is considered that the prediction box contains no objects, \n # then the background box\n respond_bgd = (1.0 - respond_bbox) * tf.cast(max_iou < iou_loss_threshold, tf.float32)\n \n # focal factor on objectiveness loss \n conf_focal = tf.pow(respond_bbox - pred_conf, 2)\n\n # calculate the objectiveness loss \n # we hope that if the grid contains objects, then the network output prediction box has a confidence of 1 and 0 \n # when there is no object.\n conf_loss = conf_focal * (respond_bbox + respond_bgd) * \\\n tf.nn.sigmoid_cross_entropy_with_logits(labels = respond_bbox, logits = conv_raw_objectiveness)\n \n # class probabilities loss\n prob_loss = respond_bbox * tf.nn.sigmoid_cross_entropy_with_logits(labels = label_prob, logits = conv_raw_prob)\n \n # sum up losses and take mean accross batch\n giou_loss = tf.reduce_mean(tf.reduce_sum(giou_loss, axis = [1,2,3,4]))\n conf_loss = tf.reduce_mean(tf.reduce_sum(conf_loss, axis = [1,2,3,4]))\n prob_loss = tf.reduce_mean(tf.reduce_sum(prob_loss, axis = [1,2,3,4]))\n \n if np.isnan(giou_loss):\n \n giou_loss = tf.Variable(0, trainable = False, dtype = tf.float32)\n \n return giou_loss, conf_loss, prob_loss", "def __init__(self, dropout_rate, num_classes, include_top, layer):\r\n super(VGG16_Shuffle, self).__init__()\r\n print(\"CIFAR VGG16_Shuffle is used\")\r\n self.dropout_rate = dropout_rate\r\n self.num_classes = num_classes\r\n self.include_top = include_top\r\n self.layer = layer\r\n\r\n # Define the building blocks\r\n if layer == 11:\r\n self.conv11 = CONV_3x3shuffle(3, 64, kernelsize=3, stride=1, padding=1)\r\n else:\r\n self.conv11 = CONV_3x3(3, 64, kernelsize=3, stride=1, padding=1, bias=False)\r\n\r\n if layer == 12:\r\n self.conv12 = nn.Sequential(CONV_3x3shuffle(64, 64, kernelsize=3, stride=1, padding=1),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n else:\r\n self.conv12 = nn.Sequential(CONV_3x3(64, 64, kernelsize=3, stride=1, padding=1, bias=False),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n # self.conv12 = CONV_3x3(64, 64, kernelsize=3, stride=2, padding=1, bias=False)\r\n\r\n if layer == 21:\r\n self.conv21 = CONV_3x3shuffle(64, 128, kernelsize=3, stride=1, padding=1)\r\n else:\r\n self.conv21 = CONV_3x3(64, 128, kernelsize=3, stride=1, padding=1, bias=False)\r\n\r\n if layer == 22:\r\n self.conv22 = nn.Sequential(CONV_3x3shuffle(128, 128, kernelsize=3, stride=1, padding=1),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n else:\r\n self.conv22 = nn.Sequential(CONV_3x3(128, 128, kernelsize=3, stride=1, padding=1, bias=False),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n # self.conv22 = CONV_3x3(128, 128, kernelsize=3, stride=2, padding=1, bias=False)\r\n\r\n if layer == 31:\r\n self.conv31 = CONV_3x3shuffle(128, 256, kernelsize=3, stride=1, padding=1)\r\n else:\r\n self.conv31 = CONV_3x3(128, 256, kernelsize=3, stride=1, padding=1, bias=False)\r\n\r\n if layer == 32:\r\n self.conv32 = CONV_3x3shuffle(256, 256, kernelsize=3, stride=1, padding=1)\r\n else:\r\n self.conv32 = CONV_3x3(256, 256, kernelsize=3, stride=1, padding=1, bias=False)\r\n\r\n if layer == 33:\r\n self.conv33 = nn.Sequential(CONV_3x3shuffle(256, 256, kernelsize=3, stride=1, padding=1),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n else:\r\n self.conv33 = nn.Sequential(CONV_3x3(256, 256, kernelsize=3, stride=1, padding=1, bias=False),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n # self.conv33 = CONV_3x3(256, 256, kernelsize=3, stride=2, padding=1, bias=False)\r\n\r\n if layer == 41:\r\n self.conv41 = CONV_3x3shuffle(256, 512, kernelsize=3, stride=1, padding=1)\r\n else:\r\n self.conv41 = CONV_3x3(256, 512, kernelsize=3, stride=1, padding=1, bias=False)\r\n\r\n if layer == 42:\r\n self.conv42 = CONV_3x3shuffle(512, 512, kernelsize=3, stride=1, padding=1)\r\n else:\r\n self.conv42 = CONV_3x3(512, 512, kernelsize=3, stride=1, padding=1, bias=False)\r\n\r\n if layer == 43:\r\n self.conv43 = nn.Sequential(CONV_3x3shuffle(512, 512, kernelsize=3, stride=1, padding=1),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n else:\r\n self.conv43 = nn.Sequential(CONV_3x3(512, 512, kernelsize=3, stride=1, padding=1, bias=False),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n # self.conv43 = CONV_3x3(512, 512, kernelsize=3, stride=2, padding=1, bias=False)\r\n\r\n if layer == 51:\r\n self.conv51 = CONV_3x3shuffle(512, 512, kernelsize=3, stride=1, padding=1)\r\n else:\r\n self.conv51 = CONV_3x3(512, 512, kernelsize=3, stride=1, padding=1, bias=False)\r\n\r\n if layer == 52:\r\n self.conv52 = CONV_3x3shuffle(512, 512, kernelsize=3, stride=1, padding=1)\r\n else:\r\n self.conv52 = CONV_3x3(512, 512, kernelsize=3, stride=1, padding=1, bias=False)\r\n\r\n if layer == 53:\r\n self.conv53 = nn.Sequential(CONV_3x3shuffle(512, 512, kernelsize=3, stride=1, padding=1),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n else:\r\n self.conv53 = nn.Sequential(CONV_3x3(512, 512, kernelsize=3, stride=1, padding=1, bias=False),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n\r\n self.avgpool = nn.AdaptiveAvgPool2d(1)\r\n self.fc = nn.Sequential(nn.Linear(512, 4096),\r\n nn.ReLU(True),\r\n nn.Linear(4096, 4096),\r\n nn.ReLU(True),\r\n nn.Linear(4096, num_classes))\r\n\r\n # Initialize the weights\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\r\n elif isinstance(m, nn.BatchNorm2d):\r\n # raise Exception('You are using a model without BN!!!')\r\n nn.init.constant_(m.weight, 1)\r\n nn.init.constant_(m.bias, 0)", "def __init__(self, opt):\n BaseModel.__init__(self, opt)\n\n self.loss_names = ['G_SH']\n self.visual_names = ['input', 'pr_SH', 'gt_SH']\n self.model_names = ['G1']\n\n if not opt.no_brightness:\n self.loss_names += ['G_BA', 'G_BC']\n self.visual_names += ['pr_BA', 'gt_BA']\n self.model_names += ['G3']\n\n if opt.reg_LTM:\n self.loss_names += ['LTMReg']\n\n self.light_res = opt.light_res\n\n\n # Intrinsic network\n if opt.latent_Ls or opt.latent_Lt:\n netG1name = 'unet_256_latent_inL'\n else:\n netG1name = 'unet_256_latent'\n\n input_nc = opt.input_nc\n if opt.in_Ls:\n input_nc += 1\n if opt.in_Lt:\n input_nc += 1\n\n if opt.LTM:\n self.dim_LTM = self.light_res**2\n if self.opt.enc_LTM:\n self.dim_LTM = opt.dim_LTM\n use_hidden = True if not opt.enc_ill_hid==-1 else False\n self.enc_LTM = networks.init_net(networks.IlluminationEncoder(self.light_res**2, opt.enc_ill_hid, self.dim_LTM, use_hidden), opt.init_type, opt.init_gain, self.gpu_ids)\n\n self.netG1 = networks.define_G(input_nc, self.dim_LTM, opt.ngf, netG1name, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, True, self.gpu_ids)\n\n else:\n if opt.no_latent_color:\n output_nc = 3\n else:\n output_nc = 1\n self.netG1 = networks.define_G(input_nc, output_nc, opt.ngf, netG1name, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, False, self.gpu_ids)\n\n # Brightness network\n g3_input_nc = 3\n if opt.cas and opt.cat_In:\n g3_input_nc = g3_input_nc + opt.input_nc\n if not opt.cas:\n if opt.in_Ls:\n g3_input_nc += 1\n if opt.in_Lt:\n g3_input_nc += 1\n self.netG3 = networks.define_G(g3_input_nc, 1, opt.ngf, 'resnet_9blocks_latent', opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, False, self.gpu_ids)\n if self.isTrain:\n # define loss functions\n self.criterionS = torch.nn.MSELoss()\n self.criterionBA = torch.nn.MSELoss()\n # self.criterionBP = torch.nn.MSELoss()\n self.criterionBC = torch.nn.MSELoss()\n self.criterionReg = torch.nn.MSELoss()\n # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.\n self.optimizer_G1 = torch.optim.Adam(self.netG1.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n # self.optimizer_G2 = torch.optim.Adam(self.netG2.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizer_G3 = torch.optim.Adam(self.netG3.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizers.append(self.optimizer_G1)\n # self.optimizers.append(self.optimizer_G2)\n self.optimizers.append(self.optimizer_G3)", "def add_yolof_config(cfg):\n\n # anchor_generator\n cfg.MODEL.YOLOF = CN()\n\n cfg.MODEL.YOLOF.ENCODER = CN()\n\n cfg.MODEL.YOLOF.ENCODER.BACKBONE_LEVEL = \"res5\"\n cfg.MODEL.YOLOF.ENCODER.IN_CHANNELS = 2048\n cfg.MODEL.YOLOF.ENCODER.NUM_CHANNELS = 512\n cfg.MODEL.YOLOF.ENCODER.BLOCK_MID_CHANNELS = 128\n cfg.MODEL.YOLOF.ENCODER.NUM_RESIDUAL_BLOCKS = 4\n cfg.MODEL.YOLOF.ENCODER.BLOCK_DILATIONS = [2, 4, 6, 8]\n cfg.MODEL.YOLOF.ENCODER.NORM = \"BN\"\n cfg.MODEL.YOLOF.ENCODER.ACTIVATION = \"ReLU\"\n\n cfg.MODEL.YOLOF.DECODER = CN()\n\n cfg.MODEL.YOLOF.DECODER.IN_CHANNELS = 512\n cfg.MODEL.YOLOF.DECODER.NUM_CLASSES = 1\n cfg.MODEL.YOLOF.DECODER.NUM_ANCHORS = 5\n cfg.MODEL.YOLOF.DECODER.CLS_NUM_CONVS = 2\n cfg.MODEL.YOLOF.DECODER.REG_NUM_CONVS = 4\n cfg.MODEL.YOLOF.DECODER.NORM = \"BN\"\n cfg.MODEL.YOLOF.DECODER.ACTIVATION = \"ReLU\"\n cfg.MODEL.YOLOF.DECODER.PRIOR_PROB = 0.01\n\n # YOLOF box2box transform\n cfg.MODEL.YOLOF.BOX_TRANSFORM = CN()\n cfg.MODEL.YOLOF.BOX_TRANSFORM.BBOX_REG_WEIGHTS = (1.0, 1.0, 1.0, 1.0)\n cfg.MODEL.YOLOF.BOX_TRANSFORM.ADD_CTR_CLAMP = True\n cfg.MODEL.YOLOF.BOX_TRANSFORM.CTR_CLAMP = 32\n\n cfg.MODEL.YOLOF.MATCHER = CN()\n\n cfg.MODEL.YOLOF.MATCHER.TOPK = 4\n # YOLOF ignore thresholds\n cfg.MODEL.YOLOF.POS_IGNORE_THRESHOLD = 0.15\n cfg.MODEL.YOLOF.NEG_IGNORE_THRESHOLD = 0.7\n\n # YOLOF losses\n cfg.MODEL.YOLOF.LOSSES = CN()\n cfg.MODEL.YOLOF.LOSSES.FOCAL_LOSS_GAMMA = 2.0\n cfg.MODEL.YOLOF.LOSSES.FOCAL_LOSS_ALPHA = 0.25\n cfg.MODEL.YOLOF.LOSSES.BBOX_REG_LOSS_TYPE = \"giou\"\n\n cfg.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64, 128, 256, 512]]\n cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[1.0]]\n cfg.MODEL.ANCHOR_GENERATOR.OFFSET = 0.0\n\n # YOLOF test\n cfg.MODEL.YOLOF.SCORE_THRESH_TEST = 0.05\n cfg.MODEL.YOLOF.TOPK_CANDIDATES_TEST = 1000\n cfg.MODEL.YOLOF.NMS_THRESH_TEST = 0.6\n cfg.MODEL.YOLOF.DETECTIONS_PER_IMAGE = 100\n\n # Optimizer.\n cfg.SOLVER.OPTIMIZER = \"ADAMW\"\n cfg.SOLVER.BACKBONE_MULTIPLIER = 1.0\n\n # transform.\n cfg.SOLVER.TRAIN_PIPELINES = [\n (\"CenterAffine\", dict(boarder=128, output_size=(512, 512), random_aug=True)),\n (\"RandomBrightness\", dict(intensity_min=0.6, intensity_max=1.4)),\n (\"RandomContrast\", dict(intensity_min=0.6, intensity_max=1.4)),\n (\"RandomSaturation\", dict(intensity_min=0.6, intensity_max=1.4)),\n (\"RandomLighting\", dict(scale=0.1)),\n ]", "def test_two_layer_classifier(caplog):\n caplog.set_level(logging.WARNING, logger=Logger.name)\n\n # Input X specification\n D = 2 # Dimension of X WITHOUT bias\n\n # Layer 01. Output [email protected] of shape (N,M1)\n M1 = 4 # Nodes in the matmul 01\n W1 = weights.he(M1, D+1) # Weights in the matmul 01 WITH bias (D+1)\n\n # Layer 02. Input A01 of shape (N,M1).\n # Output [email protected] of shape (N,M2)\n M2: int = 3 # Number of categories to classify\n W2 = weights.he(M2, M1+1) # Weights in the matmul 02 WITH bias (M1+1)\n\n optimizer = SGD(lr=TYPE_FLOAT(0.2))\n\n # X data\n # X, T, V = linear_separable_sectors(n=N, d=D, m=M)\n X, T = venn_of_circle_a_not_b(\n radius=TYPE_FLOAT(1.0),\n ratio=TYPE_FLOAT(1.3),\n m=M2,\n n=10\n )\n N = X.shape[0]\n assert X.shape[0] > 0 and X.shape == (N, D)\n X, T = transform_X_T(X, T)\n\n def callback(W1, W2):\n \"\"\"Dummy callback\"\"\"\n pass\n\n profiler = cProfile.Profile()\n profiler.enable()\n\n train_two_layer_classifier(\n N=N,\n D=D,\n X=X,\n T=T,\n M1=M1,\n W1=W1,\n M2=M2,\n W2=W2,\n log_loss_function=softmax_cross_entropy_log_loss,\n optimizer=optimizer,\n num_epochs=10,\n test_numerical_gradient=True,\n log_level=logging.DEBUG,\n callback=callback\n )\n\n profiler.disable()\n profiler.print_stats(sort=\"cumtime\")", "def train_mobilenet_v1_fcn8(load_model=\"latest\", shift_hue_prob=0,\n add_class_weight=False,batch_size=20,\n set_learning_rate=1e-3,data_aug_faster_mode=False):\n num_classes = 3\n\n image_data = ImageNpy(join(\"data\",\"train_data_baseline.npy\"), join(\"data\",\"train_label_baseline.npy\"))\n get_batches_fn = image_data.get_bathes_fn_with_crop\n\n # Load pretrained mobilenet_v1\n pretrained_model_path = join(\"pretrained_models\",\"mobilenet_v1_1.0_224_ckpt\",\"mobilenet_v1_1.0_224.ckpt\")\n\n input_image = tf.placeholder(tf.uint8, shape=(None, None, None, 3))\n correct_label = tf.placeholder(tf.uint8, [None, None, None, num_classes], name='correct_label')\n learning_rate = tf.placeholder(tf.float32, name='learning_rate')\n\n stacked_image_label = tf.concat((input_image, correct_label), axis=3)\n\n cropped_stacked_image_label = tf.map_fn(\n lambda img: preprocess_image_label(img, cropped_shape=None),\n stacked_image_label, dtype=tf.uint8)\n\n cropped_input_image = cropped_stacked_image_label[:, :, :, 0:3]\n cropped_label = cropped_stacked_image_label[:, :, :, 3:3 + num_classes]\n\n #tf.summary.image('cropped_label', tf.expand_dims(cropped_label[:, :, :, 1], axis=3))\n\n final_layer, endpoints = mobilenetv1_fcn8_model(cropped_input_image, num_classes=3, is_training=True,\n raw_image_shape=(520 - UPPER_CUT, 800),\n decoder=\"fcn8\",data_aug_faster_mode=data_aug_faster_mode)\n\n global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')\n\n train_op, cross_entropy_loss = optimize(final_layer, cropped_label,\n learning_rate, global_step,add_class_weight=add_class_weight)\n\n tf.summary.scalar('cross_entropy_loss', cross_entropy_loss)\n\n merged = tf.summary.merge_all()\n\n saver = tf.train.Saver()\n\n with tf.Session() as sess:\n if load_model == 'mobilenetv1':\n get_var = slim.get_model_variables('MobilenetV1')\n sess_load = slim.assign_from_checkpoint_fn(pretrained_model_path, get_var)\n sess_load(sess)\n sess.run(tf.global_variables_initializer())\n elif load_model=='vgg16':\n get_var = slim.get_model_variables('vgg_16')\n vgg_pretrained_path=join(\"pretrained_models\",\"vgg16/vgg_16.ckpt\")\n sess_load = slim.assign_from_checkpoint_fn(vgg_pretrained_path, get_var)\n sess_load(sess)\n sess.run(tf.global_variables_initializer())\n\n elif load_model == \"latest\":\n # saver.restore(sess,\"./model_ckpt_udacity_trained/model\")\n get_var = slim.get_variables()\n sess_load = slim.assign_from_checkpoint_fn(join(\"model_ckpt\",\"model\", get_var))\n sess_load(sess)\n # sess.run(tf.global_variables_initializer())\n else:\n raise ValueError(\"model wrong!\")\n\n # print(slim.get_model_variables())\n # print(len(slim.get_model_variables()))\n\n train_writer = tf.summary.FileWriter(join('log' , 'train'), sess.graph)\n\n epochs = 15\n for ep in range(epochs):\n print(\"epoch: {}\".format(ep))\n for image, label in get_batches_fn(batch_size, crop_size=None, shift_hue_prob=shift_hue_prob,\n filter=False):\n summary, _, loss, step_count = sess.run([merged, train_op, cross_entropy_loss, global_step],\n feed_dict={input_image: image, correct_label: label,\n learning_rate: set_learning_rate})\n print(\"loss: = {:.5f}\".format(loss))\n train_writer.add_summary(summary, global_step=step_count)\n saver.save(sess, join('model_ckpt','model'))", "def detect_image(yolo_v3_model, image_paths, batch_frames, output_path, train_input_size, classes_file_path, \n score_threshold, iou_threshold, num_of_anchor_bbox, strides, anchors, show = False, \n rectangle_colors = ''):\n \n # obtain number of classes\n num_of_classes = len(read_class_names(classes_file_path))\n \n # create list to store images\n original_images = []\n \n # iterate over images in chronological order (last image is image of interest to put bbox)\n for x in range(batch_frames):\n \n # obtain original image\n original_image = cv2.imread(image_paths[x])\n \n # append original image to original_images list\n original_images.append(original_image[:])\n \n # convert original image to grayscale \n image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)\n \n # preprocess image\n image = transform_images(image[:], train_input_size)\n\n # obtain concat frame if none exist\n if x == 0: \n\n concat_image = image[:]\n\n # concatenate subsequent frames to concat_image\n else:\n\n concat_image = np.concatenate((concat_image, image), axis = -1)\n \n # add batch dimensions to concatenated image \n concat_image = concat_image[np.newaxis, ...].astype(np.float32)\n \n # create constant tensor from concatenated image and feed it to yolo_v3_model\n batched_input = tf.constant(concat_image)\n yolo_output = yolo_v3_model(batched_input)\n \n # list to store bboxes from respective scales\n pred_bbox = []\n \n # iterate over 3 scales\n for i in range(3):\n\n # decode resepctive yolo_output from each scale\n pred_result = decode(yolo_output = yolo_output[i], num_of_anchor_bbox = num_of_anchor_bbox, \n classes = num_of_classes, strides = strides, anchors = anchors, index = i)\n \n # obtain results of shape (:, 5 + num_classes), i.e all bboxes\n pred_result_reshaped = tf.reshape(pred_result, (-1, tf.shape(pred_result)[-1]))\n \n # append to pred_bbox\n pred_bbox.append(pred_result_reshaped)\n \n # concatenate all bboxes from all scales\n pred_bbox = tf.concat(pred_bbox, axis = 0)\n \n # post process all bboxes using latest image in orignal_images\n bboxes = postprocess_boxes(pred_bbox, original_images[-1], train_input_size, score_threshold)\n \n # non maximal supression for bboxes\n bboxes = nms(bboxes, iou_threshold, method = 'nms')\n \n # draw bbox on latest image in orignal_images\n image = draw_bbox(original_images[-1], bboxes, classes_file_path, rectangle_colors = rectangle_colors)\n \n # save image if path to save is given\n if output_path != '': cv2.imwrite(output_path, image)\n \n # display image if show is true \n if show:\n \n # show the image\n cv2.imshow(\"predicted image\", image)\n \n # load and hold the image\n cv2.waitKey(0)\n \n # to close the window after the required kill value was provided\n cv2.destroyAllWindows()\n \n return image", "def train_model(model, y_categorical, max_len, get_cross_validation=False, non_zero=False):\n\n\t# for label_numer = 'OPR', labels are [0,1,2,3,4,5]\n\tn_classes = 6\n\tx = get_data.get_feature_tensor(feature_dir,feature_name,max_len)\n\ty = get_data.get_labels(label_dir, label_name)\n\tif non_zero == True:\n\t\tx, y = get_data.non_zero_data(x,y,max_len, y)\n\tif y_categorical == True:\n\t\ty = np_utils.to_categorical(y)\n\ty = np.array(y)\n\tprint 'x', x.shape, 'y', y.shape\n\n\t# choose model\n\tif model == TK_TCN_regression:\n\t\tmodel = TK_TCN_regression(n_classes=n_classes, feat_dim=512, max_len=max_len)\n\t\tmodel.compile(loss='mean_absolute_error', optimizer='sgd',metrics=['accuracy'])\n\telse:\n\t\tif model == TK_TCN_resnet:\n\t\t\tmodel = TK_TCN_resnet(n_classes=n_classes, feat_dim=512, max_len=max_len)\n\t\telif model == TCN_V1:\n\t\t\tmodel = TCN_V1(n_classes=n_classes, feat_dim=512, max_len=max_len)\n\t\telif model == TCN_V2:\n\t\t\tmodel = TCN_V2(n_classes=n_classes, feat_dim=512, max_len=max_len)\n\t\telif model == TCN_V3:\n\t\t\tmodel = TCN_V3(n_classes=n_classes, feat_dim=512, max_len=max_len)\n\t\telif model == TCN_V4:\n\t\t\tmodel = TCN_V4(n_classes=n_classes, feat_dim=512, max_len=max_len)\n\t\telif model == TCN_V5:\n\t\t\tmodel = TCN_V5(n_classes=n_classes, feat_dim=512, max_len=max_len)\n\t\t# compile model\n\t\toptimizer = Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\n\t\tmodel.compile(loss='categorical_crossentropy', optimizer=optimizer,metrics=['categorical_accuracy'])\n\t\t# model.compile(loss='mean_absolute_error', optimizer=optimizer,metrics=['categorical_accuracy'])\n\n\t\n\t# visualize\n\t# model.summary()\n\n\tif get_cross_validation==True:\n\t\tloss = np.zeros((4))\n\t\tacc = np.zeros((4))\n\t\tclasses = np.zeros((200, n_classes))\n\t\tx_train_cro, y_train_cro, x_test_cro, y_test_cro = set_cross_validation(x, y)\n\t\tfor i in range(3):\n\t\t\tmodel.fit(x_train_cro[i], y_train_cro[i], validation_data=[x_test_cro[i],y_test_cro[i]], epochs=5)\n\t\t\tloss_and_metrics = model.evaluate(x_test_cro[i], y_test_cro[i])\t\n\t\t\tloss[i] = loss_and_metrics[0]\n\t\t\tacc[i] = loss_and_metrics[1]\n\t\t\tclasses[i*50:(i+1)*50] = model.predict(x_test_cro[i])\n\t\tloss_mean = np.mean(loss)\n\t\tacc_mean = np.mean(acc)\n\t\ty_test = y\n\telif get_cross_validation==False:\n\t\tx_train, x_test, y_train, y_test = cross_validation.train_test_split(x,y,test_size=0.2, random_state=1)\n\t\tmodel.fit(x_train, y_train, validation_data=[x_test,y_test], epochs=5)\n\t\tloss_mean, acc_mean = model.evaluate(x_test,y_test)\n\t\tclasses = model.predict(x_test)\n\t\t\n\treturn loss_mean, acc_mean, classes, y_test", "def auto_context_one_3unet(vol_size, enc_nf, dec_nf, full_size=True, indexing='ij'):\n ndims = len(vol_size)\n assert ndims in [1, 2, 3], \"ndims should be one of 1, 2, or 3. found: %d\" % ndims\n\n # get the core model\n unet_model1 = unet_core(vol_size, enc_nf, dec_nf, full_size=full_size)\n [src, tgt] = unet_model1.inputs\n x1 = unet_model1.output\n\n # transform the results into a flow field.\n Conv = getattr(KL, 'Conv%dD' % ndims)\n flow1 = Conv(ndims, kernel_size=3, padding='same', name='flow1',\n kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5))(x1)\n # warp the source with the flow\n y1 = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([src, flow1])\n\n nf_dec = [32, 32, 32, 32, 8, 8]\n x2 = unet_core(vol_size, enc_nf, dec_nf, full_size=full_size)([y1, tgt])\n# [y1, tgt2] = unet_model2.inputs\n# x2 = unet_model2.output\n # transform the results into a flow field.\n flow2 = Conv(ndims, kernel_size=3, padding='same', name='flow2',\n kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5))(x2)\n # warp the source with the flow\n y2 = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([y1, flow2])\n x3 = unet_core(vol_size, enc_nf, nf_dec, full_size=full_size)([y2, tgt])\n# x3 = unet_core(vol_size, enc_nf, dec_nf, full_size=full_size)([y2, tgt])\n# [y1, tgt2] = unet_model2.inputs\n# x2 = unet_model2.output\n # transform the results into a flow field.\n flow3 = Conv(ndims, kernel_size=3, padding='same', name='flow3',\n kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5))(x3)\n # warp the source with the flow\n y3 = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([y2, flow3])\n\n# flow = nrn_utils.compose(flow1, flow2, indexing='ij')\n flow12 = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([flow1, flow2])\n flow12 = add([flow2, flow12])\n flow = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([flow12, flow3])\n flow = add([flow3, flow])\n# x3 = concatenate([x1, x2])\n# x3 = concatenate([flow1,x1,x2])\n # transform the results into a flow field.\n# flow = Conv(ndims, kernel_size=3, padding='same', name='flow',\n# kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5))(x3)\n # warp the source with the flow\n y = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([src, flow])\n\t\n # prepare model\n model = Model(inputs=[src, tgt], outputs=[y1,y2,y3,y,flow])\n return model", "def darknet_body():\n return compose(DarknetConv2D_BN_Leaky(32, (3, 3)), MaxPooling2D(), DarknetConv2D_BN_Leaky(64, (3, 3)),\n MaxPooling2D(), bottleneck_block(128, 64), MaxPooling2D(), bottleneck_block(256, 128),\n MaxPooling2D(), bottleneck_x2_block(512, 256), MaxPooling2D(), bottleneck_x2_block(1024, 512))", "def main():\n\n # If there checkpoint is already, assign checkpoint=checkpoint_file\n checkpoint=None\n\n # Set epochs, load the data and the trainable model\n start_epoch=0\n end_epoch=7000\n learning_rate=1e-3\n batch_size=6\n\n model = DarkNet()\n data=DataLoader(416,\"data/train\")\n dataloader=torch.utils.data.DataLoader(dataset=data,batch_size=batch_size,num_workers=0,shuffle=True)\n model=model.to(\"cuda\")\n optimizer=torch.optim.Adam(model.parameters(),lr=learning_rate)\n\n # If there's a checkpoint, load its values\n if checkpoint!=None:\n model.load_state_dict(torch.load(checkpoint)['state_dict'])\n optimizer.load_state_dict(torch.load(checkpoint)['optimizer'])\n start_epoch=torch.load(checkpoint)['epoch']\n\n for param in model.parameters():\n param.requires_grad = True\n count=0\n x_y=[]\n w_h=[]\n conf_loss=[]\n final_loss=[]\n\n # Train the model\n print(\"Starting Training..\")\n\n for epoch in range(start_epoch,end_epoch):\n print(\"------------------------------------------------------------------------------------------------------------\")\n for batch_id,(imgs,target) in enumerate(dataloader):\n imgs=imgs.cuda()\n target=target.cuda()\n optimizer.zero_grad()\n loss=model(imgs,target)\n loss.backward()\n optimizer.step()\n if batch_id%10==0:\n print(\"Epoch %d/%d || Batch %d || Overall Loss %.2f || X-Loss %.2f || Y-Loss %.2f || W-Loss %.2f || H-Loss %.2f\" %(epoch, \n end_epoch, batch_id, loss.item(), model.losses[0], model.losses[1], model.losses[2], model.losses[3]))\n x_y.append(model.losses[0]+model.losses[1])\n w_h.append(model.losses[2]+model.losses[3])\n conf_loss.append(model.losses[4])\n final_loss.append(loss.item())\n\n # Plot the graph to check if the loss is decreasing through the epochs\n \n # X-Y Loss\n plt.plot(x_y,label='X and Y')\n plt.savefig('x-y-loss.png')\n plt.close()\n\n # W-H Loss\n plt.plot(w_h,label='W and H')\n plt.savefig('w-h-loss.png')\n plt.close()\n\n # Confidence Loss\n plt.plot(conf_loss,label='Conf')\n plt.savefig('conf-loss.png')\n plt.close()\n\n # Overall Loss\n plt.plot(final_loss,label='Loss')\n plt.savefig('final-loss.png')\n plt.show()\n plt.close()\n\n # Save the model as checkpoint\n torch.save({\n 'epoch': epoch,\n 'state_dict': model.state_dict(),\n 'optimizer' : optimizer.state_dict()},\n 'checkpoints/checkpoint.epoch.{}.pth.tar'.format(epoch))", "def get_unet():\n inputs = Input((img_rows, img_cols, 1))\n conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs)\n conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n\n conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)\n conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n\n conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)\n conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n\n conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)\n conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)\n pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)\n\n conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)\n conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)\n\n up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2),\n padding='same')(conv5), conv4], axis=3)\n conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(up6)\n conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)\n\n up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2),\n padding='same')(conv6), conv3], axis=3)\n conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(up7)\n conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)\n\n up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2),\n padding='same')(conv7), conv2], axis=3)\n conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(up8)\n conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)\n\n up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2),\n padding='same')(conv8), conv1], axis=3)\n conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(up9)\n conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)\n\n conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9)\n\n model = Model(inputs=[inputs], outputs=[conv10])\n\n model.compile(optimizer=Adam(lr=1e-4), loss=dice_coef_loss,\n metrics=[dice_coef])\n\n return model", "def trainNet():", "def get_classification(self, image):\n # return TrafficLight.RED\n # TODO implement light color prediction\n # creating an image object \n img_np = np.array(image) \n\n # convert np array to tensor\n input_tensor = tf.convert_to_tensor(img_np)\n\n # The model expects a batch of images, so add an axis with `tf.newaxis`.\n input_tensor = input_tensor[tf.newaxis, ...]\n\n\n detections = self.loaded(input_tensor)\n\n num_detections = int(detections.pop('num_detections'))\n\n # detection_classes should be ints.\n detections_dict = {key: value[0, :num_detections].numpy() for key, value in detections.items()}\n\n\n # detection_classes should be ints.\n detections_dict['detection_classes'] = detections_dict['detection_classes'].astype(np.int64)\n\n label_id_offset = 1\n\n # DEBUG - can do it in a cleaner way :0\n tl_classes = {3: 'green', 2: 'red'}\n top_classes_prediction = list(detections_dict['detection_classes']+label_id_offset)[:5] \n #print(top_classes_prediction)\n for i in range(len(top_classes_prediction)):\n if top_classes_prediction[i] == 2:\n top_classes_prediction[i] = 'green'\n elif top_classes_prediction[i] == 3:\n top_classes_prediction[i] = 'red'\n\n\n #print(\"--------->\", image_path, \"<-----------\")\n #print( top_classes_prediction ) \n #print(detections_dict['detection_scores'][:5], '\\n' )\n\n # basic red tl logic\n if top_classes_prediction[0] == 'red' and detections_dict['detection_scores'][0] >= 0.60:\n #print(\"-------------> RED TRAFFIC LIGHT <----------------\\n\")\n self.current_light = TrafficLight.RED\n #rospy.logwarn( \"----------------- Taffic light is RED !!! -------------------- \" )\n self.display_predictions_scores( top_classes_prediction, detections_dict['detection_scores'] )\n else:\n #print(\"No red traffic is detected\\n\")\n self.current_light = TrafficLight.GREEN\n #rospy.logwarn( \"----------------- You're good to go !!! --------: {0} - {1} \".format(top_classes_prediction[0], detections_dict['detection_scores'][0]) )\n self.display_predictions_scores( top_classes_prediction, detections_dict['detection_scores'] )\n\n return self.current_light", "def VLocNet_Odometry_new(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, classes=1000): # pooling=None,\n\n if weights not in {'imagenet', None}:\n raise ValueError('The `weights` argument should be either '\n '`None` (random initialization) or `imagenet` '\n '(pre-training on ImageNet).')\n\n if weights == 'imagenet' and include_top and classes != 1000:\n raise ValueError('If using `weights` as imagenet with `include_top`'\n ' as true, `classes` should be 1000')\n\n # Determine proper input shape\n # input_shape = _obtain_input_shape(input_shape,\n # default_size=224,\n # min_size=197,\n # data_format=K.image_data_format(),\n # include_top=include_top)\n #\n # if input_tensor is None:\n # img_input = Input(shape=input_shape)\n # else:\n # if not K.is_keras_tensor(input_tensor):\n # img_input = Input(tensor=input_tensor, shape=input_shape)\n # else:\n # img_input = input_tensor\n # if K.image_data_format() == 'channels_last':\n # bn_axis = 3\n # else:\n # bn_axis = 1\n\n # 1st branch for the t-1 odometry regression\n input_odo_0 = Input(shape=(224, 224, 3), name='input_odo_0')\n\n odo_1_0 = ResNet_50_unit_1(input_tensor=input_odo_0, bn_axis=3, activation='elu', strides=(2, 2), branch='_odo_t_p')\n\n odo_2_0 = ResNet_50_unit_2(input_tensor=odo_1_0, activation='elu', strides=(1, 1), branch='_odo_t_p')\n\n odo_3_0 = ResNet_50_unit_3(input_tensor=odo_2_0, activation='elu', branch='_odo_t_p')\n\n odo_4_0 = ResNet_50_unit_4(input_tensor=odo_3_0, activation='elu', branch='_odo_t_p')\n\n # 2nd branch for the t odometry regression\n input_odo_1 = Input(shape=(224, 224, 3), name='input_odo_1')\n\n odo_1_1 = ResNet_50_unit_1(input_tensor=input_odo_1, bn_axis=3, activation='elu', strides=(2, 2), branch='_odo_t')\n\n odo_2_1 = ResNet_50_unit_2(input_tensor=odo_1_1, activation='elu', strides=(1, 1), branch='_odo_t')\n\n odo_3_1 = ResNet_50_unit_3(input_tensor=odo_2_1, activation='elu', branch='_odo_t')\n\n odo_4_1 = ResNet_50_unit_4(input_tensor=odo_3_1, activation='elu', branch='_odo_t')\n\n # Concatenate the features from 1st and 2nd branches\n conca = concatenate([odo_4_0, odo_4_1], name='conca')\n\n odo_5 = ResNet_50_unit_5(input_tensor=conca, activation='elu', branch='_odo_all')\n\n # avg_pool = AveragePooling2D((7, 7), name='avg_pool')(odo_5)\n\n glo_ave = GlobalAveragePooling2D()(odo_5)\n\n fc_1 = Dense(1024, name='fc_1')(glo_ave)\n\n fc_2 = Dense(3, name='fc_2')(fc_1)\n fc_3 = Dense(4, name='fc_3')(fc_1)\n\n # Create model.\n model = Model(input=[input_odo_0, input_odo_1], output=[fc_2, fc_3], name='VLocNet')\n\n # load weights\n if weights == 'imagenet':\n if include_top:\n weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels.h5',\n WEIGHTS_PATH,\n cache_subdir='models',\n md5_hash='a7b3fe01876f51b976af0dea6bc144eb')\n else:\n weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',\n WEIGHTS_PATH_NO_TOP,\n cache_subdir='models',\n md5_hash='a268eb855778b3df3c7506639542a6af')\n model.load_weights(weights_path)\n if K.backend() == 'theano':\n layer_utils.convert_all_kernels_in_model(model)\n\n if K.image_data_format() == 'channels_first':\n if include_top:\n maxpool = model.get_layer(name='avg_pool')\n shape = maxpool.output_shape[1:]\n dense = model.get_layer(name='fc1000')\n layer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first')\n\n if K.backend() == 'tensorflow':\n warnings.warn('You are using the TensorFlow backend, yet you '\n 'are using the Theano '\n 'image data format convention '\n '(`image_data_format=\"channels_first\"`). '\n 'For best performance, set '\n '`image_data_format=\"channels_last\"` in '\n 'your Keras config '\n 'at ~/.keras/keras.json.')\n return model", "def build_engine(model_name, do_int8, dla_core, verbose=False):\n cfg_file_path = model_name + '.cfg'\n parser = DarkNetParser()\n layer_configs = parser.parse_cfg_file(cfg_file_path)\n net_c = get_c(layer_configs)\n net_h, net_w = get_h_and_w(layer_configs)\n\n print('Loading the ONNX file...')\n onnx_data = load_onnx(model_name)\n if onnx_data is None:\n return None\n\n TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE) if verbose else trt.Logger()\n EXPLICIT_BATCH = [] if trt.__version__[0] < '7' else \\\n [1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)]\n with trt.Builder(TRT_LOGGER) as builder, builder.create_network(*EXPLICIT_BATCH) as network, trt.OnnxParser(network, TRT_LOGGER) as parser:\n if do_int8 and not builder.platform_has_fast_int8:\n raise RuntimeError('INT8 not supported on this platform')\n if not parser.parse(onnx_data):\n print('ERROR: Failed to parse the ONNX file.')\n for error in range(parser.num_errors):\n print(parser.get_error(error))\n return None\n network = set_net_batch(network, MAX_BATCH_SIZE)\n\n print('Adding yolo_layer plugins.')\n network = add_yolo_plugins(network, model_name, TRT_LOGGER)\n\n print('Adding a concatenated output as \"detections\".')\n network = add_concat(network, model_name, TRT_LOGGER)\n\n print('Naming the input tensort as \"input\".')\n network.get_input(0).name = 'input'\n\n print('Building the TensorRT engine. This would take a while...')\n print('(Use \"--verbose\" or \"-v\" to enable verbose logging.)')\n if trt.__version__[0] < '7': # older API: build_cuda_engine()\n if dla_core >= 0:\n raise RuntimeError('DLA core not supported by old API')\n builder.max_batch_size = MAX_BATCH_SIZE\n builder.max_workspace_size = 1 << 30\n builder.fp16_mode = True # alternative: builder.platform_has_fast_fp16\n if do_int8:\n from calibrator import YOLOEntropyCalibrator\n builder.int8_mode = True\n builder.int8_calibrator = YOLOEntropyCalibrator(\n 'calib_images', (net_h, net_w), 'calib_%s.bin' % model_name)\n engine = builder.build_cuda_engine(network)\n else: # new API: build_engine() with builder config\n builder.max_batch_size = MAX_BATCH_SIZE\n config = builder.create_builder_config()\n config.max_workspace_size = 1 << 30\n config.set_flag(trt.BuilderFlag.GPU_FALLBACK)\n config.set_flag(trt.BuilderFlag.FP16)\n profile = builder.create_optimization_profile()\n profile.set_shape(\n 'input', # input tensor name\n (MAX_BATCH_SIZE, net_c, net_h, net_w), # min shape\n (MAX_BATCH_SIZE, net_c, net_h, net_w), # opt shape\n (MAX_BATCH_SIZE, net_c, net_h, net_w)) # max shape\n config.add_optimization_profile(profile)\n if do_int8:\n from calibrator import YOLOEntropyCalibrator\n config.set_flag(trt.BuilderFlag.INT8)\n config.int8_calibrator = YOLOEntropyCalibrator(\n 'calib_images', (net_h, net_w),\n 'calib_%s.bin' % model_name)\n config.set_calibration_profile(profile)\n if dla_core >= 0:\n config.default_device_type = trt.DeviceType.DLA\n config.DLA_core = dla_core\n config.set_flag(trt.BuilderFlag.STRICT_TYPES)\n print('Using DLA core %d.' % dla_core)\n engine = builder.build_engine(network, config)\n\n if engine is not None:\n print('Completed creating engine.')\n return engine", "def test_confidence_thresholding_2thresholds_2d_vis_api(csv_filename):\n input_features = [\n text_feature(encoder={\"vocab_size\": 10, \"min_len\": 1, \"type\": \"stacked_cnn\"}),\n number_feature(),\n category_feature(encoder={\"vocab_size\": 10, \"embedding_size\": 5}),\n set_feature(),\n sequence_feature(encoder={\"vocab_size\": 10, \"max_len\": 10, \"type\": \"embed\"}),\n ]\n output_features = [\n category_feature(decoder={\"vocab_size\": 2}, reduce_input=\"sum\"),\n category_feature(decoder={\"vocab_size\": 2}, reduce_input=\"sum\"),\n ]\n encoder = \"parallel_cnn\"\n with TemporaryDirectory() as tmpvizdir:\n # Generate test data\n data_csv = generate_data(input_features, output_features, os.path.join(tmpvizdir, csv_filename))\n input_features[0][ENCODER][TYPE] = encoder\n model = run_api_experiment(input_features, output_features)\n test_df, train_df, val_df = obtain_df_splits(data_csv)\n _, _, output_dir = model.train(\n training_set=train_df, validation_set=val_df, output_directory=os.path.join(tmpvizdir, \"results\")\n )\n test_stats, predictions, _ = model.evaluate(dataset=test_df, collect_predictions=True, output_dir=output_dir)\n\n output_feature_name1 = output_features[0][\"name\"]\n output_feature_name2 = output_features[1][\"name\"]\n\n ground_truth_metadata = model.training_set_metadata\n feature1_cols = [\n f\"{output_feature_name1}_probabilities_{label}\"\n for label in ground_truth_metadata[output_feature_name1][\"idx2str\"]\n ]\n feature2_cols = [\n f\"{output_feature_name2}_probabilities_{label}\"\n for label in ground_truth_metadata[output_feature_name2][\"idx2str\"]\n ]\n\n # probabilities need to be list of lists containing each row data from the\n # probability columns ref: https://ludwig-ai.github.io/ludwig-docs/latest/user_guide/api/LudwigModel#evaluate\n probability1 = predictions.loc[:, feature1_cols].values\n probability2 = predictions.loc[:, feature2_cols].values\n\n target_predictions1 = test_df[output_feature_name1]\n target_predictions2 = test_df[output_feature_name2]\n ground_truth1 = np.asarray(\n [ground_truth_metadata[output_feature_name1][\"str2idx\"][prediction] for prediction in target_predictions1]\n )\n ground_truth2 = np.asarray(\n [ground_truth_metadata[output_feature_name2][\"str2idx\"][prediction] for prediction in target_predictions2]\n )\n viz_outputs = (\"pdf\", \"png\")\n for viz_output in viz_outputs:\n vis_output_pattern_pdf = os.path.join(output_dir, \"*.{}\").format(viz_output)\n visualize.confidence_thresholding_2thresholds_2d(\n [probability1, probability2],\n [ground_truth1, ground_truth2],\n model.training_set_metadata,\n [output_feature_name1, output_feature_name2],\n labels_limit=0,\n model_names=[\"Model1\"],\n output_directory=output_dir,\n file_format=viz_output,\n )\n figure_cnt = glob.glob(vis_output_pattern_pdf)\n assert 3 == len(figure_cnt)", "def compute_loss(darknet_output, ground_truth):\n darknet_output = tf.reshape(darknet_output, [-1, 13, 13, 5, 85])\n net_raw_xy = darknet_output[..., :2]\n net_raw_hw = darknet_output[..., 2:4]\n net_raw_conf = darknet_output[..., 4:5]\n net_raw_prob = darknet_output[..., 5:]\n\n\n prediction = output_to_prediction(darknet_output)\n prediction = tf.reshape(prediction, [-1, 13, 13, 5, 85])\n\n # the factor used to calculate the object weight\n obj_scale = 5\n\n # the factor used to calculate the no object weight\n no_obj_scale = 1\n\n # the factor used to calculate the class prediction loss\n class_scale = 1\n\n # the factor factor used to calculate the coordinate loss\n coordinates_scale = 1\n\n # decode the prediction, convert all values to the 13x13 feature map\n pred_xy_offset = prediction[..., :2]\n pred_hw_ratio = prediction[..., 2:4]\n pred_conf = prediction[..., 4:5]\n pred_class = prediction[..., 5:]\n\n # decode the ground truth, convert all values to the 13x13 feature map\n gt_xy_offset = ground_truth[..., :2]\n gt_hw_ratio = ground_truth[..., 2:4]\n gt_conf = ground_truth[..., 4:5]\n gt_class = ground_truth[..., 5:]\n\n # 13 x 13 x 2 tensor, used to compute the x and y in the 13 x 13 feature map\n biases = tf.Variable([[[j * 1.0, i * 1.0] for i in range(13)] for j in range(13)])\n biases = tf.reshape(biases, [1, 13, 13, 1, 2])\n\n box_priors = tf.Variable([[0.57273, 0.677385], [1.87446, 2.06253], [3.33843, 5.47434], [7.88282, 3.52778], [9.77052, 9.16828]])\n box_priors = tf.reshape(box_priors, [1, 1, 1, 5, 2])\n\n pred_xy = pred_xy_offset + biases\n pred_hw = pred_hw_ratio * box_priors\n\n gt_xy = gt_xy_offset + biases\n gt_hw = gt_hw_ratio * box_priors\n\n # calculate the top-left and bottom-right point of the predicted box\n pred_xy_min, pred_xy_max = pred_xy - pred_hw / 2.0, pred_xy + pred_hw / 2.0\n\n gt_xy_min, gt_xy_max = gt_xy - gt_hw / 2.0, gt_xy + gt_hw / 2.0\n\n intersection_min = tf.maximum(gt_xy_min, pred_xy_min)\n intersection_max = tf.minimum(gt_xy_max, pred_xy_max)\n intersection_hw = tf.maximum(intersection_max - intersection_min, 0.0)\n\n # calculate the intersection area and the union area of the prediction and the ground truth\n intersection_area = tf.multiply(intersection_hw[..., 0], intersection_hw[..., 1])\n union_area = tf.multiply(gt_hw[..., 0], gt_hw[..., 1]) + tf.multiply(pred_hw[..., 0],\n pred_hw[..., 1]) - intersection_area\n # shape of iou: (?, 13, 13, 5)\n box_iou = intersection_area / union_area\n\n obj = gt_conf\n\n gt_raw_hw = tf.log(gt_hw_ratio)\n\n gt_raw_hw = tf.where(tf.is_inf(gt_raw_hw), tf.zeros_like(gt_raw_hw), gt_raw_hw)\n\n # ======================================================================================\n\n coords_xy_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=net_raw_xy, labels=gt_xy_offset) * obj * coordinates_scale\n coords_xy_loss = tf.reduce_sum(coords_xy_loss)\n\n coords_wh_loss = tf.square(net_raw_hw - gt_raw_hw) * 0.5 * obj * coordinates_scale\n\n coords_wh_loss = tf.reduce_sum(coords_wh_loss)\n\n coords_loss = coords_xy_loss + coords_wh_loss\n\n ignore_thresh = 0.5\n\n ignore_mask = tf.cast(tf.less(box_iou, ignore_thresh * tf.ones_like(box_iou)), tf.float32)\n ignore_mask = tf.reshape(ignore_mask, [-1, 13, 13, 5])\n ignore_mask = tf.expand_dims(ignore_mask, -1)\n\n back_loss = ((1 - obj) * tf.nn.sigmoid_cross_entropy_with_logits(logits=net_raw_conf, labels=obj) * ignore_mask)\n back_loss = tf.reduce_sum(back_loss)\n\n fore_loss = obj * tf.nn.sigmoid_cross_entropy_with_logits(logits=net_raw_conf, labels=obj)\n\n fore_loss = tf.reduce_sum(fore_loss)\n\n conf_loss = back_loss + fore_loss\n\n class_loss = tf.reduce_sum(obj * tf.nn.sigmoid_cross_entropy_with_logits(logits=net_raw_prob, labels=gt_class))\n\n loss = coords_loss + conf_loss + class_loss\n\n return loss", "def train_conv_net(datasets,datasets_weights,\n U, U_Topical,\n img_w=300, \n filter_hs=[3,4,5],\n hidden_units=[100,2], \n dropout_rate=[0.5],\n shuffle_batch=True,\n n_epochs=25, \n batch_size=50, \n lr_decay = 0.95,\n conv_non_linear=\"relu\",\n use_valid_set=True,\n show_states=False,\n activations=[Iden],\n sqr_norm_lim=9,\n non_static=True): \n rng = np.random.RandomState(3435)\n img_h = len(datasets[0][0])-1 \n U_Topical.dtype = \"float32\"\n (num_topics,topic_dim) = U_Topical.shape\n word_w = img_w\n img_w = int(img_w + num_topics*topic_dim)\n filter_w = img_w \n feature_maps = hidden_units[0]\n filter_shapes = []\n pool_sizes = []\n for filter_h in filter_hs: \n filter_shapes.append((feature_maps, 1, filter_h, filter_w)) # 100 1 3 300\n pool_sizes.append((img_h-filter_h+1, img_w-filter_w+1)) # size of words samples one\n parameters = [(\"image shape\",img_h,img_w),(\"filter shape\",filter_shapes), (\"hidden_units\",hidden_units),\n (\"dropout\", dropout_rate), (\"batch_size\",batch_size),(\"non_static\", non_static),\n (\"learn_decay\",lr_decay), (\"conv_non_linear\", conv_non_linear), (\"non_static\", non_static)\n ,(\"sqr_norm_lim\",sqr_norm_lim),(\"shuffle_batch\",shuffle_batch)]\n #print parameters \n \n #define model architecture\n index = T.lscalar()\n x = T.matrix('x') \n y = T.ivector('y')\n x_topic = T.tensor3('x_topic')\n Words = theano.shared(value = U, name = \"Words\")\n Topics = theano.shared(value=U_Topical,name=\"Topics\")\n zero_vec_tensor = T.vector()\n zero_vec = np.zeros(word_w, dtype='float32')\n set_zero = theano.function([zero_vec_tensor], updates=[(Words, T.set_subtensor(Words[0,:], zero_vec_tensor))])\n layer0_input_words = Words[T.cast(x.flatten(),dtype=\"int32\")].reshape((x.shape[0],1,x.shape[1],Words.shape[1])) \n layer0_inputs_topics = []\n for i in range(num_topics):\n sin_topic = x_topic[:,:,i]\n Topic = Topics[i].reshape((1,Topics[i].shape[0]))\n weights = sin_topic.flatten()\n weights = weights.reshape((weights.shape[0],1))\n layer0_inputs_topics.append(T.dot(weights, Topic))\n layer0_input_topics = T.concatenate(layer0_inputs_topics,1)\n layer0_input_topics = layer0_input_topics.reshape((x_topic.shape[0],1,x_topic.shape[1],num_topics*topic_dim))\n layer0_input = T.concatenate([layer0_input_words,layer0_input_topics],3) \n conv_layers = []\n layer1_inputs = []\n for i in xrange(len(filter_hs)):\n filter_shape = filter_shapes[i]\n pool_size = pool_sizes[i]\n conv_layer = LeNetConvPoolLayer(rng, input=layer0_input,image_shape=(batch_size, 1, img_h, img_w),\n filter_shape=filter_shape, poolsize=pool_size, non_linear=conv_non_linear)\n layer1_input = conv_layer.output.flatten(2)\n conv_layers.append(conv_layer)\n layer1_inputs.append(layer1_input)\n layer1_input = T.concatenate(layer1_inputs,1)\n hidden_units[0] = feature_maps*len(filter_hs) \n classifier = MLPDropout(rng, input=layer1_input, layer_sizes=hidden_units, activations=activations, dropout_rates=dropout_rate)\n \n #define parameters of the model and update functions using adadelta\n params = classifier.params \n for conv_layer in conv_layers:\n params += conv_layer.params\n \n if non_static:\n #if word vectors are allowed to change, add them as model parameters\n params += [Words] #params are model parameters\n params += [Topics] #Topics embedding are adjusted\n cost = classifier.negative_log_likelihood(y) \n dropout_cost = classifier.dropout_negative_log_likelihood(y) \n grad_updates = sgd_updates_adadelta(params, dropout_cost, lr_decay, 1e-6, sqr_norm_lim)\n \n #shuffle dataset and assign to mini batches. if dataset size is not a multiple of mini batches, replicate \n #extra data (at random)\n np.random.seed(3435)\n if datasets[0].shape[0] % batch_size > 0:\n extra_data_num = batch_size - datasets[0].shape[0] % batch_size\n random_index = np.random.permutation(np.arange(datasets[0].shape[0])) \n random_index.astype('int32')\n train_set = datasets[0][random_index,:]\n train_set_weights = datasets_weights[0][random_index,:,:]\n extra_data = train_set[:extra_data_num]\n extra_data_weights = train_set_weights[:extra_data_num]\n new_data=np.append(datasets[0],extra_data,axis=0)\n new_data_weights = np.append(datasets_weights[0],extra_data_weights,axis = 0)\n else:\n new_data = datasets[0]\n new_data_weights = datasets_weights[0]\n random_index = np.random.permutation(np.arange(new_data.shape[0])) \n random_index.astype('int32')\n new_data = new_data[random_index]\n new_data_weights = new_data_weights[random_index]\n n_batches = new_data.shape[0]/batch_size\n n_train_batches = int(np.round(n_batches*0.9))\n \n test_set_x = np.asarray(datasets[1][:,:img_h] ,\"float32\")\n test_set_x_topic = np.asarray(datasets_weights[1][:,:img_h,:] ,\"float32\")\n test_set_y = np.asarray(datasets[1][:,-1],\"int32\")\n if use_valid_set:\n train_set = new_data[:n_train_batches*batch_size,:]\n train_set_weights = new_data_weights[:n_train_batches*batch_size,:,:]\n val_set = new_data[n_train_batches*batch_size:,:]\n val_set_weights = new_data_weights[n_train_batches*batch_size:,:,:] \n train_set_x, train_set_x_topic, train_set_y = shared_dataset((train_set[:,:img_h],train_set_weights,train_set[:,-1]))\n val_set_x, val_set_x_topic, val_set_y = shared_dataset((val_set[:,:img_h],val_set_weights,val_set[:,-1]))\n n_val_batches = n_batches - n_train_batches\n val_model = theano.function([index], classifier.errors(y),\n givens={\n x: val_set_x[index * batch_size: (index + 1) * batch_size],\n x_topic: val_set_x_topic[index * batch_size: (index + 1) * batch_size],\n y: val_set_y[index * batch_size: (index + 1) * batch_size]})\n else:\n train_set = new_data[:,:] \n train_set_x, train_set_x_topic, train_set_y = shared_dataset((train_set[:,:img_h],train_set_weights,train_set[:,-1])) \n \n #make theano functions to get train/val/test errors\n test_model = theano.function([index], classifier.errors(y),\n givens={\n x: train_set_x[index * batch_size: (index + 1) * batch_size],\n x_topic: train_set_x_topic[index * batch_size: (index + 1) * batch_size],\n y: train_set_y[index * batch_size: (index + 1) * batch_size]}) \n train_model = theano.function([index], cost, updates=grad_updates,\n givens={\n x: train_set_x[index*batch_size:(index+1)*batch_size],\n x_topic: train_set_x_topic[index * batch_size: (index + 1) * batch_size],\n y: train_set_y[index*batch_size:(index+1)*batch_size]}) \n test_pred_layers = []\n test_size = test_set_x.shape[0]\n \n \n\n test_layer0_input_words = Words[T.cast(x.flatten(),dtype=\"int32\")].reshape((test_size,1,img_h,Words.shape[1])) \n test_layer0_inputs_topics = []\n for i in range(num_topics):\n sin_topic = x_topic[:,:,i]\n Topic = Topics[i].reshape((1,Topics[i].shape[0]))\n weights = sin_topic.flatten()\n weights = weights.reshape((weights.shape[0],1))\n test_layer0_inputs_topics.append(T.dot(weights, Topic))\n test_layer0_input_topics = T.concatenate(test_layer0_inputs_topics,1)\n test_layer0_input_topics = test_layer0_input_topics.reshape((test_size,1,img_h,num_topics*topic_dim))\n test_layer0_input = T.concatenate([test_layer0_input_words,test_layer0_input_topics],3) \n\n\n\n for conv_layer in conv_layers:\n test_layer0_output = conv_layer.predict(test_layer0_input, test_size)\n test_pred_layers.append(test_layer0_output.flatten(2))\n test_layer1_input = T.concatenate(test_pred_layers, 1)\n test_y_pred = classifier.predict(test_layer1_input)\n\n test_error = T.mean(T.neq(test_y_pred, y))\n test_model_all = theano.function([x,x_topic,y], test_error) \n \n #start training over mini-batches\n print '... training'\n epoch = 0\n best_val_perf = 0\n val_perf = 0\n test_perf = 0 \n cost_epoch = 0 \n while (epoch < n_epochs): \n epoch = epoch + 1\n if shuffle_batch:\n for minibatch_index in np.random.permutation(range(n_train_batches)):\n cost_epoch = train_model(minibatch_index)\n set_zero(zero_vec)\n else:\n for minibatch_index in xrange(n_train_batches):\n cost_epoch = train_model(minibatch_index) \n set_zero(zero_vec)\n train_losses = [test_model(i) for i in xrange(n_train_batches)]\n train_perf = 1 - np.mean(train_losses)\n if use_valid_set:\n val_losses = [val_model(i) for i in xrange(n_val_batches)]\n val_perf = 1- np.mean(val_losses)\n\n if val_perf >= best_val_perf:\n params_conv = [] \n params_output = {}\n test_loss = test_model_all(test_set_x,test_set_x_topic, test_set_y) \n test_perf = 1- test_loss \n best_val_perf = val_perf \n for conv_layer in conv_layers:\n params_conv.append(conv_layer.get_params())\n params_output = classifier.get_params()\n word_vec = Words.get_value()\n Topic_vec = Topics.get_value()\n else :\n val_perf = 0 \n if show_states:\n print('epoch %i, train perf %f %%, val perf %f' % (epoch, train_perf * 100., val_perf*100.))\n \n if not use_valid_set:\n params_conv = [] \n params_output = {}\n test_loss = test_model_all(test_set_x,test_set_x_topic, test_set_y) \n test_perf = 1- test_loss \n \n for conv_layer in conv_layers:\n params_conv.append(conv_layer.get_params())\n params_output = classifier.get_params()\n word_vec = Words.get_value()\n Topic_vec = Topics.get_value() \n \n return test_perf, [params_conv, params_output, word_vec,Topic_vec]", "def mgcNetArchNin(outLayer, l2_val, **kwargs):\n\n def_vals = {\"input_img_rows\" : 72,\n \"input_img_cols\" : 72,\n \"channels\" : 1,\n \"nb_classes\" : 13\n } # default parameters value\n\n for k, v in def_vals.items():\n kwargs.setdefault(k, v)\n\n input_img_rows = kwargs['input_img_rows']\n input_img_cols = kwargs['input_img_cols']\n channels = kwargs['channels']\n nb_classes = kwargs['nb_classes']\n\n \n # Input: 72 x 72 x 1\n img_shape = layers.Input(shape = (input_img_rows, input_img_cols, channels))\n\n # Layer 1\n #------------------------\n conv1 = layers.Conv2D(filters=16, kernel_size=(5, 5), padding='valid', activation='relu')(img_shape)\n conv1 = layers.Conv2D(filters=16, kernel_size=(1, 1), activation='relu')(conv1)\n conv1 = layers.Conv2D(filters=16, kernel_size=(1, 1), activation='relu')(conv1)\n conv1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)\n conv1 = layers.Dropout(0.4)(conv1)\n\n # Layer 2\n #------------------------\n conv2 = layers.Conv2D(filters=32, kernel_size=(5, 5), padding='same', activation='relu')(conv1)\n conv2 = layers.Conv2D(filters=32, kernel_size=(1, 1), activation='relu')(conv2)\n conv2 = layers.Conv2D(filters=32, kernel_size=(1, 1), activation='relu')(conv2)\n conv2 = layers.MaxPooling2D(pool_size=(2, 2))(conv2)\n conv2 = layers.Dropout(0.4)(conv2)\n\n # Layer 3\n #------------------------\n conv3 = layers.Conv2D(filters=64, kernel_size=(3, 3), padding='same', activation='relu')(conv2)\n conv3 = layers.Conv2D(filters=64, kernel_size=(1, 1), activation='relu')(conv3)\n conv3 = layers.Conv2D(filters=64, kernel_size=(1, 1), activation='relu')(conv3)\n conv3 = layers.MaxPooling2D(pool_size=(2, 2))(conv3)\n conv3 = layers.Dropout(0.4)(conv3)\n\n # Layer 4\n #------------------------\n #conv4 = layers.Conv2D(filters=128, kernel_size=(2, 2), padding='same', activation='relu')(conv3)\n #conv4 = layers.Conv2D(filters=128, kernel_size=(1, 1), activation='relu')(conv4)\n #conv4 = layers.Conv2D(filters=128, kernel_size=(1, 1), activation='relu')(conv4)\n #conv4 = layers.MaxPooling2D(pool_size=(2, 2))(conv4)\n #conv4 = layers.Dropout(0.4)(conv4)\n\n # Layer 5\n #------------------------\n output = layers.Conv2D(filters=128, kernel_size=(2, 2), padding='same', activation='relu')(conv3) # skip layer 4\n output = layers.Conv2D(filters=64, kernel_size=(1, 1), activation='relu')(output)\n output = layers.Conv2D(filters=32, kernel_size=(1, 1))(output)\n output = layers.MaxPooling2D(pool_size=(2, 2))(output)\n output = layers.Dropout(0.4)(output)\n\n \n # FC Layer\n #------------------------\n outputmlp = layers.Flatten()(output)\n outputmlp = layers.Dense(64, activation = 'relu')(outputmlp)\n outputmlp = layers.Dropout(0.5)(outputmlp)\n\n predictionsMlp = layers.Dense(nb_classes, activation='softmax')(outputmlp)\n \n \n # global averaging\n weight_decay=1E-4\n concat_axis = 1\n \n x = BatchNormalization(axis=concat_axis,\n gamma_regularizer=regularizers.l2(weight_decay),\n beta_regularizer=regularizers.l2(weight_decay))(output)\n x = Activation('relu')(x)\n x = layers.Dropout(0.4)(x)\n x = GlobalAveragePooling2D(data_format=K.image_data_format())(x)\n \n predictionsGloAvg = layers.Dense(nb_classes,\n activation='softmax',\n kernel_regularizer=regularizers.l2(weight_decay),\n bias_regularizer=regularizers.l2(weight_decay))(x)\n \n if outLayer == \"gloAvg\":\n predictions = predictionsGloAvg\n elif outLayer == \"mlp\":\n predictions = predictionsMlp\n \n # prediction model\n model = Model(img_shape, predictions, name = 'net_in_net')\n\n\n return model", "def ternausnetv1(input_shape=(512, 512, 3), base_depth=64):\n inputs = Input(input_shape)\n conv1 = Conv2D(base_depth, 3, activation='relu', padding='same')(inputs)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n\n conv2_1 = Conv2D(base_depth*2, 3, activation='relu',\n padding='same')(pool1)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2_1)\n\n conv3_1 = Conv2D(base_depth*4, 3, activation='relu',\n padding='same')(pool2)\n conv3_2 = Conv2D(base_depth*4, 3, activation='relu',\n padding='same')(conv3_1)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3_2)\n\n conv4_1 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(pool3)\n conv4_2 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(conv4_1)\n pool4 = MaxPooling2D(pool_size=(2, 2))(conv4_2)\n\n conv5_1 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(pool4)\n conv5_2 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(conv5_1)\n pool5 = MaxPooling2D(pool_size=(2, 2))(conv5_2)\n\n conv6_1 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(pool5)\n\n up7 = Conv2DTranspose(base_depth*4, 2, strides=(2, 2), activation='relu',\n padding='same')(conv6_1)\n concat7 = concatenate([up7, conv5_2])\n conv7_1 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(concat7)\n\n up8 = Conv2DTranspose(base_depth*4, 2, strides=(2, 2), activation='relu',\n padding='same')(conv7_1)\n concat8 = concatenate([up8, conv4_2])\n conv8_1 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(concat8)\n\n up9 = Conv2DTranspose(base_depth*2, 2, strides=(2, 2), activation='relu',\n padding='same')(conv8_1)\n concat9 = concatenate([up9, conv3_2])\n conv9_1 = Conv2D(base_depth*4, 3, activation='relu',\n padding='same')(concat9)\n\n up10 = Conv2DTranspose(base_depth, 2, strides=(2, 2), activation='relu',\n padding='same')(conv9_1)\n concat10 = concatenate([up10, conv2_1])\n conv10_1 = Conv2D(base_depth*2, 3, activation='relu',\n padding='same')(concat10)\n\n up11 = Conv2DTranspose(int(base_depth/2), 2, strides=(2, 2),\n activation='relu', padding='same')(conv10_1)\n concat11 = concatenate([up11, conv1])\n\n out = Conv2D(1, 1, activation='sigmoid', padding='same')(concat11)\n\n return Model(input=inputs, output=out)", "def create(self):\n \n \"\"\" A solo prepressing reduction network in the head \"\"\"\n print(\"pre_reduction\")\n with tf.name_scope('pre_reduction'):\n conv1 = NW.conv(self.X, 7, 7, 64, 2, 2, name='conv1')\n pool1 = NW.max_pool(conv1, 3, 3, 2, 2, name='pool1')\n norm1 = NW.lrn(pool1, 2, 2e-05, 0.75, name='norm1')\n reduction2 = NW.conv(norm1, 1, 1, 64, 1, 1, name='reduction2')\n conv2 = NW.conv(reduction2, 3, 3, 192, 1, 1,name='conv2')\n norm2 = NW.lrn(conv2, 2, 2e-05, 0.75, name='norm2')\n pool2 = NW.max_pool(norm2, 3, 3, 2, 2, name='pool2')\n \n \"\"\" 1st inception layer group \"\"\"\n print(\"icp1\")\n with tf.name_scope('icp1'):\n # branch 0\n icp1_out0 = NW.conv(pool2, 1, 1, 64, 1, 1, name='icp1_out0')\n # branch 1\n icp1_reduction1 = NW.conv(pool2, 1, 1, 96, 1, 1, name='icp1_reduction1')\n icp1_out1 = NW.conv(icp1_reduction1, 3, 3, 128, 1, 1, name='icp1_out1')\n # branch 2\n icp1_reduction2 = NW.conv(pool2, 1, 1, 16, 1, 1, name='icp1_reduction2')\n icp1_out2 = NW.conv(icp1_reduction2, 5, 5, 32, 1, 1, name='icp1_out2')\n # branch 3\n icp1_pool = NW.max_pool(pool2, 3, 3, 1, 1, name='icp1_pool')\n icp1_out3 = NW.conv(icp1_pool, 1, 1, 32, 1, 1, name='icp1_out3')\n # concat\n icp2_in = NW.concat([icp1_out0,\n icp1_out1,\n icp1_out2,\n icp1_out3], 3, 'icp2_in')\n\n \"\"\" 2nd inception layer group \"\"\"\n print(\"icp2\")\n with tf.name_scope('icp2'):\n # branch 0\n icp2_out0 = NW.conv(icp2_in, 1, 1, 128, 1, 1, name='icp2_out0')\n # branch 1\n icp2_reduction1 = NW.conv(icp2_in, 1, 1, 128, 1, 1, name='icp2_reduction1')\n icp2_out1 = NW.conv(icp2_reduction1, 3, 3, 192, 1, 1, name='icp2_out1')\n # branch 2\n icp2_reduction2 = NW.conv(icp2_in, 1, 1, 32, 1, 1, name='icp2_reduction2')\n icp2_out2 = NW.conv(icp2_reduction2, 5, 5, 96, 1, 1, name='icp2_out2')\n # branch 3\n icp2_pool = NW.max_pool(icp2_in, 3, 3, 1, 1, name='icp2_pool')\n icp2_out3 = NW.conv(icp2_pool, 1, 1, 64, 1, 1, name='icp2_out3')\n # concat\n icp2_out = NW.concat([icp2_out0,\n icp2_out1,\n icp2_out2,\n icp2_out3], 3, 'icp2_out')\n \n \"\"\" 3rd inception layer group \"\"\"\n print(\"icp3\")\n with tf.name_scope('icp3'):\n icp3_in = NW.max_pool(icp2_out, 3, 3, 2, 2, name='icp3_in')\n # branch 0\n icp3_out0 = NW.conv(icp3_in, 1, 1, 192, 1, 1, name='icp3_out0')\n # branch 1\n icp3_reduction1 = NW.conv(icp3_in, 1, 1, 96, 1, 1, name='icp3_reduction1')\n icp3_out1 = NW.conv(icp3_reduction1, 3, 3, 208, 1, 1, name='icp3_out1')\n # branch 2\n icp3_reduction2 = NW.conv(icp3_in, 1, 1, 16, 1, 1, name='icp3_reduction2')\n icp3_out2 = NW.conv(icp3_reduction2, 5, 5, 48, 1, 1, name='icp3_out2')\n # branch 3\n icp3_pool = NW.max_pool(icp3_in, 3, 3, 1, 1, name='icp3_pool')\n icp3_out3 = NW.conv(icp3_pool, 1, 1, 64, 1, 1, name='icp3_out3')\n # concat\n icp3_out = NW.concat([icp3_out0,\n icp3_out1,\n icp3_out2,\n icp3_out3], 3, 'icp3_out')\n \n \"\"\" 1st classify branch \"\"\"\n with tf.name_scope('cls1'):\n cls1_pool = NW.avg_pool(icp3_out, 5, 5, 3, 3, padding='VALID', name='cls1_pool')\n cls1_reduction_pose = NW.conv(cls1_pool, 1, 1, 128, 1, 1, name='cls1_reduction_pose')\n cls1_fc1_pose = NW.fc(cls1_reduction_pose, 1024, name='cls1_fc1_pose')\n cls1_fc_pose_xy = NW.fc(cls1_fc1_pose, 2, relu=False, name='cls1_fc_pose_xy')\n cls1_fc_pose_ab = NW.fc(cls1_fc1_pose, 2, relu=False, name='cls1_fc_pose_ab')\n self.layers[\"cls1_fc_pose_xy\"] = cls1_fc_pose_xy\n self.layers[\"cls1_fc_pose_ab\"] = cls1_fc_pose_ab\n \n \"\"\" 4st inception layer group \"\"\"\n print(\"icp4\")\n with tf.name_scope('icp4'):\n # branch 0\n icp4_out0 = NW.conv(icp3_out, 1, 1, 160, 1, 1, name='icp4_out0')\n # branch 1\n icp4_reduction1 = NW.conv(icp3_out, 1, 1, 112, 1, 1, name='icp4_reduction1')\n icp4_out1 = NW.conv(icp4_reduction1, 3, 3, 224, 1, 1, name='icp4_out1')\n # branch 2\n icp4_reduction2 = NW.conv(icp3_out, 1, 1, 24, 1, 1, name='icp4_reduction2')\n icp4_out2 = NW.conv(icp4_reduction2, 5, 5, 64, 1, 1, name='icp4_out2')\n # branch 3\n icp4_pool = NW.max_pool(icp3_out, 3, 3, 1, 1, name='icp4_pool')\n icp4_out3 = NW.conv(icp4_pool, 1, 1, 64, 1, 1, name='icp4_out3')\n # concat\n icp4_out = NW.concat([icp4_out0,\n icp4_out1,\n icp4_out2,\n icp4_out3],3, name='icp4_out')\n\n \"\"\" 5st inception layer group \"\"\"\n print(\"icp5\")\n with tf.name_scope('icp5'):\n # branch 0\n icp5_out0 = NW.conv(icp4_out, 1, 1, 128, 1, 1, name='icp5_out0')\n # branch 1\n icp5_reduction1 = NW.conv(icp4_out, 1, 1, 128, 1, 1, name='icp5_reduction1')\n icp5_out1 = NW.conv(icp5_reduction1, 3, 3, 256, 1, 1, name='icp5_out1')\n # branch 2\n icp5_reduction2 = NW.conv(icp4_out,1, 1, 24, 1, 1, name='icp5_reduction2')\n icp5_out2 = NW.conv(icp5_reduction2, 5, 5, 64, 1, 1, name='icp5_out2')\n # branch 3\n icp5_pool = NW.max_pool(icp4_out,3, 3, 1, 1, name='icp5_pool')\n icp5_out3 = NW.conv(icp5_pool, 1, 1, 64, 1, 1, name='icp5_out3')\n # concat\n icp5_out = NW.concat([icp5_out0, \n icp5_out1, \n icp5_out2, \n icp5_out3], 3, name='icp5_out')\n \n \"\"\" 6st inception layer group \"\"\"\n print(\"icp6\")\n with tf.name_scope('icp6'):\n # branch 0\n icp6_out0 = NW.conv(icp5_out, 1, 1, 112, 1, 1, name='icp6_out0')\n # branch 1\n icp6_reduction1 = NW.conv(icp5_out, 1, 1, 144, 1, 1, name='icp6_reduction1')\n icp6_out1 = NW.conv(icp6_reduction1, 3, 3, 288, 1, 1, name='icp6_out1')\n # branch 2\n icp6_reduction2 = NW.conv(icp5_out, 1, 1, 32, 1, 1, name='icp6_reduction2')\n icp6_out2 = NW.conv(icp6_reduction2, 5, 5, 64, 1, 1, name='icp6_out2')\n # branch 3\n icp6_pool = NW.max_pool(icp5_out,3, 3, 1, 1, name='icp6_pool')\n icp6_out3 = NW.conv(icp6_pool, 1, 1, 64, 1, 1, name='icp6_out3')\n # concat\n icp6_out = NW.concat([icp6_out0,\n icp6_out1,\n icp6_out2,\n icp6_out3], 3, name='icp6_out')\n\n \"\"\" 2nd classify branch \"\"\"\n with tf.name_scope('cls2'):\n cls2_pool = NW.avg_pool(icp6_out, 5, 5, 3, 3, padding='VALID', name='cls2_pool')\n cls2_reduction_pose = NW.conv(cls2_pool, 1, 1, 128, 1, 1, name='cls2_reduction_pose')\n cls2_fc1 = NW.fc(cls2_reduction_pose, 1024, name='cls2_fc1')\n cls2_fc_pose_xy = NW.fc(cls2_fc1, 2, relu=False, name='cls2_fc_pose_xy')\n cls2_fc_pose_ab = NW.fc(cls2_fc1, 2, relu=False, name='cls2_fc_pose_ab')\n self.layers[\"cls2_fc_pose_xy\"] = cls2_fc_pose_xy\n self.layers[\"cls2_fc_pose_ab\"] = cls2_fc_pose_ab\n\n \"\"\" 7st inception layer group \"\"\"\n print(\"icp7\")\n with tf.name_scope('icp7'):\n # branch 0\n icp7_out0 = NW.conv(icp6_out, 1, 1, 256, 1, 1, name='icp7_out0')\n # branch 1\n icp7_reduction1 = NW.conv(icp6_out, 1, 1, 160, 1, 1, name='icp7_reduction1')\n icp7_out1 = NW.conv(icp7_reduction1, 3, 3, 320, 1, 1, name='icp7_out1')\n # branch 2\n icp7_reduction2 = NW.conv(icp6_out, 1, 1, 32, 1, 1, name='icp7_reduction2')\n icp7_out2 = NW.conv(icp7_reduction2, 5, 5, 128, 1, 1, name='icp7_out2')\n # branch 3\n icp7_pool = NW.max_pool(icp6_out, 3, 3, 1, 1, name='icp7_pool')\n icp7_out3 = NW.conv(icp7_pool, 1, 1, 128, 1, 1, name='icp7_out3')\n # concat\n icp7_out = NW.concat([icp7_out0,\n icp7_out1,\n icp7_out2,\n icp7_out3], 3, name='icp7_out')\n\n \"\"\" 8st inception layer group \"\"\"\n print(\"icp8\")\n with tf.name_scope('icp8'):\n icp8_in = NW.max_pool(icp7_out, 3, 3, 2, 2, name='icp8_in')\n # branch 0\n icp8_out0 = NW.conv(icp8_in, 1, 1, 256, 1, 1, name='icp8_out0')\n # branch 1\n icp8_reduction1 = NW.conv(icp8_in, 1, 1, 160, 1, 1, name='icp8_reduction1')\n icp8_out1 = NW.conv(icp8_reduction1, 3, 3, 320, 1, 1, name='icp8_out1')\n # branch 2\n icp8_reduction2 = NW.conv(icp8_in, 1, 1, 32, 1, 1, name='icp8_reduction2')\n icp8_out2 = NW.conv(icp8_reduction2, 5, 5, 128, 1, 1, name='icp8_out2')\n # branch 3\n icp8_pool = NW.max_pool(icp8_in, 3, 3, 1, 1, name='icp8_pool')\n icp8_out3 = NW.conv(icp8_pool, 1, 1, 128, 1, 1, name='icp8_out3')\n # concat\n icp8_out = NW.concat([icp8_out0,\n icp8_out1,\n icp8_out2,\n icp8_out3], 3, name='icp8_out')\n \n \"\"\" 9st inception layer group \"\"\"\n print(\"icp9\")\n with tf.name_scope('icp9'):\n # branch 0\n icp9_out0 = NW.conv(icp8_out, 1, 1, 384, 1, 1, name='icp9_out0')\n # branch 1\n icp9_reduction1 = NW.conv(icp8_out, 1, 1, 192, 1, 1, name='icp9_reduction1')\n icp9_out1 = NW.conv(icp9_reduction1, 3, 3, 384, 1, 1, name='icp9_out1')\n # branch 2\n icp9_reduction2 = NW.conv(icp8_out, 1, 1, 48, 1, 1, name='icp9_reduction2')\n icp9_out2 = NW.conv(icp9_reduction2, 5, 5, 128, 1, 1, name='icp9_out2')\n # branch 3\n icp9_pool = NW.max_pool(icp8_out, 3, 3, 1, 1, name='icp9_pool')\n icp9_out3 = NW.conv(icp9_pool, 1, 1, 128, 1, 1, name='icp9_out3')\n # concat\n icp9_out = NW.concat([icp9_out0,\n icp9_out1,\n icp9_out2,\n icp9_out3], 3, name='icp9_out')\n\n \"\"\" 3rd classify branch \"\"\"\n with tf.name_scope('cls3'):\n cls3_pool = NW.avg_pool(icp9_out, 7, 7, 1, 1, padding='VALID', name='cls3_pool')\n cls3_fc1_pose = NW.fc(cls3_pool, 2048, name='cls3_fc1_pose')\n cls3_fc_pose_xy = NW.fc(cls3_fc1_pose, 2, relu=False, name='cls3_fc_pose_xy')\n cls3_fc_pose_ab = NW.fc(cls3_fc1_pose, 2, relu=False, name='cls3_fc_pose_ab')\n self.layers[\"cls3_fc_pose_xy\"] = cls3_fc_pose_xy\n self.layers[\"cls3_fc_pose_ab\"] = cls3_fc_pose_ab", "def to_yolov3(data, gt_bboxes, mode, save_path='yolov3_data'): \n save_path = join(save_path,mode)\n data_path = join(os.getcwd(),save_path,'data')\n \n if len(data)==1: \n if os.path.exists(data_path):\n if len(glob.glob(data_path+'/*.*')) == 2*sum([len(d) for _,d in data.items()]):\n print('Data already in YOLOv3 format!')\n return\n\n os.makedirs(data_path,exist_ok=True)\n\n for split, split_data in data[0].items():\n files = []\n for path in tqdm(split_data,'Preparing '+split+' data for YOLOv3'):\n # Convert to yolov3 format\n frame_id = basename(path).split('.')[0]\n lines_out = gt_multi_txt(path, gt_bboxes[frame_id])\n\n # Write/save files\n file_out = open(join(data_path,frame_id+'.txt'), 'w')\n file_out.writelines(lines_out)\n new_path = join(data_path,frame_id+'.jpg')\n files.append(new_path+'\\n')\n copyfile(path, new_path)\n\n split_txt = open(join(os.getcwd(),save_path,split+'.txt'), 'w')\n split_txt.writelines(files)\n else:\n for k, fold in enumerate(data):\n for split, split_data in fold.items():\n files = []\n for path in tqdm(split_data,'Preparing '+split+' data for YOLOv3'):\n # Convert to yolov3 format\n frame_id = basename(path).split('.')[0]\n new_path = join(data_path,frame_id+'.jpg')\n files.append(new_path+'\\n')\n \n # Write files\n os.makedirs(join(save_path,str(len(data))),exist_ok=True)\n split_txt = open(join(save_path,str(len(data)),split+'_'+str(k)+'.txt'), 'w')\n split_txt.writelines(files)", "def train():\n\n ### DO NOT CHANGE SEEDS!\n # Set the random seeds for reproducibility\n np.random.seed(42)\n\n ## Prepare all functions\n # Get number of units in each hidden layer specified in the string such as 100,100\n if FLAGS.dnn_hidden_units:\n dnn_hidden_units = FLAGS.dnn_hidden_units.split(\",\")\n dnn_hidden_units = [int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units]\n else:\n dnn_hidden_units = []\n\n # Get negative slope parameter for LeakyReLU\n neg_slope = FLAGS.neg_slope\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n import matplotlib.pyplot as plt\n\n data = cifar10_utils.get_cifar10(FLAGS.data_dir)\n train = data['train']\n test = data['test']\n dim_x = train.images.shape[1]*train.images.shape[2]*train.images.shape[3]\n\n mlp = MLP(dim_x, dnn_hidden_units, train.labels.shape[1], neg_slope)\n loss_module = CrossEntropyModule()\n\n loss_train = np.zeros((int(np.floor(FLAGS.max_steps/FLAGS.eval_freq), )))\n loss_test = np.zeros((int(np.floor(FLAGS.max_steps/FLAGS.eval_freq), )))\n accuracy_test = np.zeros((int(np.floor(FLAGS.max_steps/FLAGS.eval_freq), )))\n\n images_test = test.images\n labels_test = test.labels\n images_test = np.reshape(images_test, (images_test.shape[0], dim_x))\n\n for i in range(0, FLAGS.max_steps):\n if PRINTS:\n print('iter', i+1, end='\\r')\n images, labels = train.next_batch(FLAGS.batch_size) \n images = np.reshape(images, (images.shape[0], dim_x))\n\n pred = mlp.forward(images)\n loss = loss_module.forward(pred, labels)\n loss_grad = loss_module.backward(pred, labels)\n mlp.backward(loss_grad)\n\n for module in reversed(mlp.modules):\n if isinstance(module, LinearModule):\n module.params['weight'] -= 1/FLAGS.batch_size*FLAGS.learning_rate*module.grads['weight']\n module.params['bias'] -= 1/FLAGS.batch_size*FLAGS.learning_rate*module.grads['bias']\n if (i+1) % FLAGS.eval_freq == 0:\n pred_test = mlp.forward(images_test)\n loss_train[i // FLAGS.eval_freq] = loss\n accuracy_test[i // FLAGS.eval_freq] = accuracy(pred_test, labels_test)\n loss_test[i // FLAGS.eval_freq] = loss_module.forward(pred_test, labels_test)\n if PRINTS:\n print()\n print('test_loss:', loss_test[i // FLAGS.eval_freq])\n print('test_accuracy:', accuracy_test[i // FLAGS.eval_freq])\n print('train_loss:', loss_train[i // FLAGS.eval_freq])\n\n if PLOTS:\n fig, ax = plt.subplots(1, 2, figsize=(10,5))\n fig.suptitle('Training curves for Numpy MLP\\nFinal test accuracy: {:0.4f}, default configuration'.format(accuracy_test[i // FLAGS.eval_freq]))\n\n ax[0].set_title('Loss')\n ax[0].set_ylabel('Loss value')\n ax[0].set_xlabel('No of batches seen x{}'.format(FLAGS.eval_freq))\n ax[0].plot(loss_train, label='Train')\n ax[0].plot(loss_test, label='Test')\n ax[0].legend()\n\n ax[1].set_title('Accuracy')\n ax[1].set_ylabel('Accuracy value')\n ax[1].set_xlabel('No of batches seen x{}'.format(FLAGS.eval_freq))\n ax[1].plot(accuracy_test, label='Test')\n ax[1].legend()\n plt.show()\n\n\n ########################\n # END OF YOUR CODE #\n #######################", "def classify_lenet5(batch_size=500, output_size=20):\n\n rng = numpy.random.RandomState(23455)\n\n\n # start-snippet-1\n x = T.matrix('x') # the data is presented as rasterized images\n ######################\n # BUILD ACTUAL MODEL #\n ######################\n print '... building the model'\n\n # Reshape matrix of rasterized images of shape (batch_size, 28 * 28)\n # to a 4D tensor, compatible with our LeNetConvPoolLayer\n # (28, 28) is the size of MNIST images.\n layer0_input = x.reshape((batch_size, 1, 37, 23))\n\n # Construct the first convolutional pooling layer:\n # filtering reduces the image size to (28-5+1 , 28-5+1) = (24, 24)\n # maxpooling reduces this further to (24/2, 24/2) = (12, 12)\n # 4D output tensor is thus of shape (batch_size, nkerns[0], 12, 12)\n layer0 = LeNetConvPoolLayer(\n rng,\n input=layer0_input,\n image_shape=(batch_size, 1, 37, 23),\n filter_shape=(20, 1, 4, 2),\n poolsize=(2, 2),\n )\n\n # layer1 = LeNetConvPoolLayer(\n # rng,\n # input=layer0.output,\n # image_shape=(batch_size, 20, 17, 11),\n # filter_shape=(50, 20, 4, 2),\n # poolsize=(2, 2),\n # )\n #\n # layer4 = LeNetConvPoolLayer(\n # rng,\n # input=layer1.output,\n # image_shape=(batch_size, 50, 7, 5),\n # filter_shape=(100, 50, 4, 2),\n # poolsize=(2, 2),\n # )\n\n layer2_input = layer0.output.flatten(2)\n\n # construct a fully-connected sigmoidal layer\n layer2 = HiddenLayer(\n rng,\n input=layer2_input,\n n_in=3740,\n n_out=output_size,\n activation=T.tanh,\n use_bias=True\n )\n\n # layer5 = HiddenLayer(\n # rng,\n # input=layer2.output,\n # n_in=200,\n # n_out=output_size,\n # activation=T.tanh,\n # use_bias=True\n # )\n\n # classify the values of the fully-connected sigmoidal layer\n layer3 = LogisticRegression(input=layer2.output, n_in=output_size, n_out=2)\n\n model_params = pickle.load(open('../model/cnn_dist_'+str(output_size)+'.pkl'))\n #\n layer0.W = theano.shared(\n value=numpy.array(\n model_params[2].get_value(True),\n dtype=theano.config.floatX\n ),\n name='W',\n borrow=True\n )\n\n layer0.b = theano.shared(\n value=numpy.array(\n model_params[3].get_value(True),\n dtype=theano.config.floatX\n ),\n name='b',\n borrow=True\n )\n\n # layer1.W = theano.shared(\n # value=numpy.array(\n # model_params[-4].get_value(True),\n # dtype=theano.config.floatX\n # ),\n # name='W',\n # borrow=True\n # )\n #\n # layer1.b = theano.shared(\n # value=numpy.array(\n # model_params[-3].get_value(True),\n # dtype=theano.config.floatX\n # ),\n # name='b',\n # borrow=True\n # )\n #\n # layer4.W = theano.shared(\n # value=numpy.array(\n # model_params[-6].get_value(True),\n # dtype=theano.config.floatX\n # ),\n # name='W',\n # borrow=True\n # )\n #\n # layer4.b = theano.shared(\n # value=numpy.array(\n # model_params[-5].get_value(True),\n # dtype=theano.config.floatX\n # ),\n # name='b',\n # borrow=True\n # )\n\n layer2.W = theano.shared(\n value=numpy.array(\n model_params[0].get_value(True),\n dtype=theano.config.floatX\n ),\n name='W',\n borrow=True\n )\n\n layer2.b = theano.shared(\n value=numpy.array(\n model_params[1].get_value(True),\n dtype=theano.config.floatX\n ),\n name='b',\n borrow=True\n )\n\n # layer5.W = theano.shared(\n # value=numpy.array(\n # model_params[-10].get_value(True),\n # dtype=theano.config.floatX\n # ),\n # name='W',\n # borrow=True\n # )\n #\n # layer5.b = theano.shared(\n # value=numpy.array(\n # model_params[-9].get_value(True),\n # dtype=theano.config.floatX\n # ),\n # name='b',\n # borrow=True\n # )\n\n layer3.W = theano.shared(\n value=numpy.array(\n model_params[4].get_value(True),\n dtype=theano.config.floatX\n ),\n name='W',\n borrow=True\n )\n\n layer3.b = theano.shared(\n value=numpy.array(\n model_params[5].get_value(True),\n dtype=theano.config.floatX\n ),\n name='b',\n borrow=True\n )\n\n # params = layer3.params + layer5.params + layer2.params + layer4.params + layer1.params + layer0.params\n\n datasets = load_data(None)\n\n sets = ['train', 'dev', 'test']\n dimension = [20000, 20000, 20000]\n for k in range(3):\n if k == 0:\n classify_set_x, classify_set_y, classify_set_z, classify_set_m, classify_set_c, classify_set_b= datasets[k]\n else:\n classify_set_x, classify_set_y, classify_set_z= datasets[k]\n\n # compute number of minibatches for training, validation and testing\n n_classify_batches = classify_set_x.get_value(borrow=True).shape[0]\n n_classify_batches /= batch_size\n\n # allocate symbolic variables for the data\n index = T.lscalar() # index to a [mini]batch\n classify = theano.function(\n [index],\n layer2.output,\n givens={\n x: classify_set_x[index * batch_size: (index + 1) * batch_size],\n }\n )\n\n r = []\n\n for i in xrange(n_classify_batches):\n m = classify(i)\n r.extend(m)\n r = np.array(r)\n print r.shape\n r = np.append(r, np.reshape(classify_set_y.eval(),(dimension[k], 1)), 1)\n numpy.savetxt('../extractedInformation/cnn_dist_'+str(output_size)+'/'+sets[k]+'.csv', r, delimiter=\",\")", "def classifier():\n\tprint(\"Classifying\")\n\t#initialize important variables\n\tminConfidence = 0.5\n\tthresholdValue = 0.3\n\t\n\t\"\"\"\n\tfile = request.files#['image']\n\tfile.save(\"./classifier_image.jpg\")\n\tframe = cv2.imread(\"./classifier_image.jpg\")\n\t\"\"\"\n\tfile = request.json\n\tframe = np.array(file[\"Frame\"], dtype = \"uint8\") \n\n\t#file = request.files['image']\n\t#file.save(\"./classifier_image.jpg\")\n\t#frame = cv2.imread(\"./classifier_image.jpg\")\n\t#file = request.json\n\t#frame = np.array(file[\"contour\"], dtype=\"uint8\")\n\t\n\t#Get Image dimensions\n\timage = cv2.copyMakeBorder(frame, 30, 30, 30, 30, cv2.BORDER_CONSTANT, value=255)\n\t(H, W) = image.shape[:2]\n\t\n\t#Get the output layers parameters\n\tln = net.getLayerNames()\n\tln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\t\n\t#Create a blob to do a forward pass\n\tblob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416), swapRB=True, crop=False)\n\tnet.setInput(blob)\n\t#print(H, W)\n\tlayerOutputs = net.forward(ln)\n\tprint(type(net))\n\tboxes = []\n\tconfidences = []\n\tclassIDs = []\n\tfor output in layerOutputs:\n\t\tprint(\"detecting\")\n\t\t#loop over each detection\n\t\tfor detection in output:\n\t\t\t# extract the class ID and confidence (i.e., probability) of\n\t\t\t# the current object detection\n\t\t\tscores = detection[5:]\n\t\t\tclassID = np.argmax(scores)\n\t\t\tconfidence = scores[classID]\n\n\t\t\t# filter out weak predictions by ensuring the detected\n\t\t\t# probability is greater than the minimum probability\n\t\t\tif confidence > minConfidence:\n\t\t\t\t# scale the bounding box coordinates back relative to the\n\t\t\t\t# size of the image, keeping in mind that YOLO actually\n\t\t\t\t# returns the center (x, y)-coordinates of the bounding\n\t\t\t\t# box followed by the boxes' width and height\n\t\t\t\tbox = detection[0:4] * np.array([W, H, W, H])\n\t\t\t\t(centerX, centerY, width, height) = box.astype(\"int\")\n\n\t\t\t\t# use the center (x, y)-coordinates to derive the top and\n\t\t\t\t# and left corner of the bounding box\n\t\t\t\tx = int(centerX - (width / 2))\n\t\t\t\ty = int(centerY - (height / 2))\n\n\t\t\t\t# update our list of bounding box coordinates, confidences,\n\t\t\t\t# and class IDs\n\t\t\t\tboxes.append([x, y, int(width), int(height)])\n\t\t\t\tconfidences.append(float(confidence))\n\t\t\t\tclassIDs.append(classID)\n\n\t# apply non-maxima suppression to suppress weak, overlapping bounding\n\t# boxes\n\tidxs = cv2.dnn.NMSBoxes(boxes, confidences, minConfidence, thresholdValue)\n\n\t# ensure at least one detection exists\n\tif len(idxs) > 0:\n\t\toutput = json.load(open(outputFile))\n\t\t# loop over the indexes we are keeping\n\t\tfor i in idxs.flatten():\n\t\t\t# extract the bounding box coordinates\n\t\t\t(x, y) = (boxes[i][0], boxes[i][1])\n\t\t\t(w, h) = (boxes[i][2], boxes[i][3])\n\n\t\t\tprint(LABELS[classIDs[i]], output[LABELS[classIDs[i]]]+1, confidences[i])\n\t\t\toutput[LABELS[classIDs[i]]]+=1\n\t\t\n\t\tjson.dump(output, open(outputFile, \"w\"))\n\t\treturn LABELS[classIDs[i]]\n\telse:\n\t\treturn Response(status=200)", "def losd_cnn_svm_model(num_class=8, num_flow_channel=10, cnn_svm_model_id=1, feature_pool='max', device=torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")):\n# feature_dimension = 2048 # change this according to which layer's feature you want\n num_gpu_device = torch.cuda.device_count()\n\n # load spatial cnn net\n input_spatial_cnn_model_file = ''.join(['model/cnn_net/spatial_lucid_08_split_no_', str(cnn_svm_model_id).zfill(2), '_nfch_10_nepochs_500_lr_0.0005_ss_model_best.pth.tar'])\n print('loading spatial cnn trained model from: {}' .format(input_spatial_cnn_model_file))\n spatial_cnn_model = load_resnet_cnn_net_model(input_spatial_cnn_model_file, num_class=num_class, num_channel=3, device=device)\n print('----------------------------------------------------------------------------------------')\n\n # load motion cnn net\n input_motion_cnn_model_file = ''.join(['model/cnn_net/motion_lucid_08_split_no_', str(cnn_svm_model_id).zfill(2), '_nfch_10_nepochs_500_lr_0.01_ss_model_best.pth.tar'])\n print('loading motion cnn trained model from: {}' .format(input_motion_cnn_model_file))\n motion_cnn_model = load_resnet_cnn_net_model(input_motion_cnn_model_file, num_class=num_class, num_channel=2*num_flow_channel, device=device)\n print('----------------------------------------------------------------------------------------')\n\n # for svm model\n input_spatial_motion_svm_model_path = ''.join(['model/svm/spatial_motion_lucid_08_split_no_', str(cnn_svm_model_id).zfill(2), '_nfch_10_nepochs_500_slr_0.0005_mlr_0.01_network_resnet101_', str(feature_pool), '_pooling_thundersvm_1v1/'])\n input_spatial_motion_svm_model_file = ''.join([input_spatial_motion_svm_model_path, 'best_model_basedon_train_val_data_nclass_12_kernel_linear_int_c_0.1_max_c_1000.0_num_div_c_46_int_g_0.1_max_g_1.0_num_div_g_10'])\n print('loading spatial and motion net svm model from: \"{}\"' .format(input_spatial_motion_svm_model_file))\n spatial_motion_svm_model = thundersvm_load_svm_model(input_spatial_motion_svm_model_file)\n print('----------------------------------------------------------------------------------------')\n \n # model paralization (if you have multiple gpus)\n print('model will use \"{}\" GPUs' .format(num_gpu_device))\n if num_gpu_device > 1:\n spatial_cnn_model = nn.DataParallel(spatial_cnn_model)\n motion_cnn_model = nn.DataParallel(motion_cnn_model)\n \n return spatial_cnn_model, motion_cnn_model, spatial_motion_svm_model", "def train():\n if os.path.isfile(load_model):\n all_weights = np.load(load_model) \n else:\n print(\"Model file does not exist. Exiting....\")\n return\n\n print(\"Build up the network\")\n\n\n # Two different types of input\n image_input_var = T.tensor4('original_inputs')\n rotated_image_input_var = T.tensor4('rotated_image_input')\n target_var = T.ivector('targets')\n\n # Build teacher network\n cnn_model, cnn_mid_output, weight_decay_penalty = cifar10_merge.build_cnn(image_input_var)\n\n # Get the intermediate layer of the teacher network\n original_model_mid_output = lasagne.layers.get_output(cnn_mid_output, image_input_var, deterministic = True)\n\n # Get the softmax output of the teacher network.\n\n original_model_output_val = lasagne.layers.get_output(cnn_model, image_input_var, deterministic = True)\n \n # Build the student network\n \n rotated_cnn_model, rotated_model_mid, rotated_weight_penalty = \\\n cifar10_merge.build_cnn(rotated_image_input_var)\n \n # Get the softmax output of the student network. Since it need to be trained on, deterministic = False\n rotated_model_mid_output = lasagne.layers.get_output(rotated_model_mid, rotated_image_input_var, deterministic = False)\n\n # Get the model output of the studenet network.\n rotated_model_output = lasagne.layers.get_output(rotated_cnn_model, rotated_image_input_var, deterministic = True)\n\n # Set the weights for the teacher network\n lasagne.layers.set_all_param_values(cnn_model, all_weights)\n\n # Get the initialized weights below the intermediate layer\n rotated_net_weights_below_mid = lasagne.layers.get_all_param_values(rotated_model_mid)\n\n # Get the parameter of the student network that needs to be trained.\n rotated_net_training_param = lasagne.layers.get_all_params(rotated_model_mid, trainable=True)\n\n # Set the weights for the student network\n lasagne.layers.set_all_param_values(rotated_cnn_model, all_weights)\n\n lasagne.layers.set_all_param_values(rotated_model_mid,\n rotated_net_weights_below_mid)\n \n # cross_entropy_loss = lasagne.objectives.categorical_crossentropy(rotated_model_mid_output, target_var)\n\n # cross_entropy_loss_mean = cross_entropy_loss.mean()\n\n # L = T.mean(lasagne.objectives.squared_error(original_model_mid_output, rotated_model_mid_output), axis = 1)\n L = lasagne.objectives.squared_error(original_model_mid_output, rotated_model_mid_output).mean()\n # cost = T.mean(L)\n\n # cost = cross_entropy_loss_mean\n cost = L\n\n # updates = lasagne.updates.adagrad(cost, rotated_net_training_param, learning_rate=0.1)\n updates = lasagne.updates.adam(cost, rotated_net_training_param, learning_rate=0.001)\n\n # cross_entropy_loss = lasagne.objectives.categorical_crossentropy(model_output, target_var)\n\n # cross_entropy_loss_mean = cross_entropy_loss.mean()\n\n # loss = cross_entropy_loss_mean + weight_decay_penalty\n\n\n train_acc = T.mean(T.eq(T.argmax(rotated_model_output, axis = 1), target_var),\n dtype=theano.config.floatX)\n\n original_model_acc = T.mean(T.eq(T.argmax(original_model_output_val, axis = 1), target_var),\n dtype=theano.config.floatX)\n\n train_fn = theano.function(inputs = [image_input_var, rotated_image_input_var, target_var],\n outputs = [original_model_mid_output, rotated_model_mid_output, train_acc], updates = updates)\n\n # Return the accuracy for teacher network and student network, respectively\n val_fn = theano.function(inputs = [image_input_var, rotated_image_input_var, target_var],\n outputs = [original_model_acc, train_acc])\n\n if os.path.isfile(os.path.join(train_dir, 'latest_model.txt')):\n weight_file = \"\"\n with open(os.path.join(train_dir, 'latest_model.txt'), 'r') as checkpoint_file:\n weight_file = checkpoint_file.read().replace('\\n', '')\n print(\"Loading from: \", weight_file)\n model_weights = np.load(weight_file)\n lasagne.layers.set_all_param_values(rotated_cnn_model, model_weights)\n\n # Get images and labels for CIFAR-10.\n\n cifar10_data = cifar10_merge_input.load_cifar10()\n\n bkgimg = np.array([np.mean(cifar10_data.train.images[cifar10_data.train.labels==i], axis = 0) for i in range(10)])\n for epoch in xrange(max_steps):\n start_time = time.time()\n\n original_test_image, rotated_test_image, test_label = cifar10_data.test.next_eval_batch(batch_size)\n total_t_net_for_original = 0\n total_s_net_for_original = 0\n total_t_net_for_rotation = 0\n total_s_net_for_rotation = 0\n total_count = 0\n\n print(\"Start Evaluating\")\n\n while(rotated_test_image is not None):\n t_net_for_original, s_net_for_original = val_fn(original_test_image, original_test_image, test_label)\n total_t_net_for_original += t_net_for_original * original_test_image.shape[0]\n total_s_net_for_original += s_net_for_original * original_test_image.shape[0]\n\n t_net_for_rotated, s_net_for_rotated = val_fn(rotated_test_image, rotated_test_image, test_label)\n total_t_net_for_rotation += t_net_for_rotated * rotated_test_image.shape[0]\n total_s_net_for_rotation += s_net_for_rotated * rotated_test_image.shape[0]\n\n total_count += rotated_test_image.shape[0]\n original_test_image, rotated_test_image, test_label = cifar10_data.test.next_eval_batch(batch_size)\n \n print(\"Student Network Accuracy on Original Image: %.4f\" % (float(total_s_net_for_original / total_count)))\n print(\"Teacher Network Accuracy on Original Image: %.4f\" % (float(total_t_net_for_original / total_count)))\n\n print(\"Student Network Accuracy on Rotated Image: %.4f\" % (float(total_s_net_for_rotation / total_count)))\n print(\"Teacher Network Accuracy on Rotated Image: %.4f\" % (float(total_t_net_for_rotation / total_count)))\n\n\n print(\"Start Training...\")\n original_train_image, rotated_train_image, train_label, start = cifar10_data.train.next_batch(batch_size)\n original_train_image = generate(original_train_image, train_label, bkgimg, 16, 32, None, batch_size)\n # rotated_train_image = random_rotated_image(original_train_image[::-1])\n rotated_train_image = random_rotated_image(original_train_image)\n\n end_time_1 = time.time() - start_time\n step = 1\n loss_total = 0\n original_start = start\n\n while(start != 0):\n #loss_value, train_acc = train_fn(original_train_image, rotated_train_image, train_label)\n \n ori_mid, rot_mid, train_acc = train_fn(original_train_image, rotated_train_image, train_label)\n # ori_mid, rot_mid, train_acc = train_fn(original_train_image, np.array(np.random.rand(batch_size, 3, 32, 32), dtype = np.float32), train_label)\n step += 1\n if start == original_start:\n print(ori_mid[0])\n print(rot_mid[0])\n print(train_label)\n \n original_train_image, rotated_train_image, train_label, start = cifar10_data.train.next_batch(batch_size)\n original_train_image = generate(original_train_image, train_label, bkgimg, 16, 32, None, batch_size)\n rotated_train_image = random_rotated_image(original_train_image)\n # assert not np.isnan(loss_value), 'Model diverged with loss = NaN'\n # loss_total += loss_value\n if 1:\n if epoch % 100 == 0 or (step + 1) == max_steps:\n checkpoint_path = os.path.join(train_dir, 'model_step%d.npy' % epoch)\n weightsOfParams = lasagne.layers.get_all_param_values(rotated_cnn_model)\n np.save(checkpoint_path, weightsOfParams)\n latest_model_path = os.path.join(train_dir, 'latest_model.txt')\n try:\n os.remove(latest_model_path)\n except OSError:\n pass\n latest_model_file = open(latest_model_path, \"w\")\n latest_model_file.write(checkpoint_path)\n latest_model_file.close()\n\n # print(\"Epoch Stop, loss_averge\", float(loss_total) / float(step))\n duration = time.time() - start_time\n print(\"Duration is\", duration)", "def __init__(self, dropout_rate, num_classes, include_top, layer):\r\n super(VGG16_Rand, self).__init__()\r\n print(\"CIFAR VGG16_Rand is used\")\r\n self.dropout_rate = dropout_rate\r\n self.num_classes = num_classes\r\n self.include_top = include_top\r\n self.layer = layer\r\n self.bias = True\r\n\r\n # Define the building blocks\r\n if layer <= 11:\r\n self.conv11 = CONV_3x3rand(3, 64, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n else:\r\n self.conv11 = CONV_3x3(3, 64, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n\r\n if layer <= 12:\r\n self.conv12 = nn.Sequential(CONV_3x3rand(64, 64, kernelsize=3, stride=1, padding=1, bias=self.bias),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n else:\r\n self.conv12 = nn.Sequential(CONV_3x3(64, 64, kernelsize=3, stride=1, padding=1, bias=self.bias),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n\r\n if layer <= 21:\r\n self.conv21 = CONV_3x3rand(64, 128, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n else:\r\n self.conv21 = CONV_3x3(64, 128, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n\r\n if layer <= 22:\r\n self.conv22 = nn.Sequential(CONV_3x3rand(128, 128, kernelsize=3, stride=1, padding=1, bias=self.bias),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n else:\r\n self.conv22 = nn.Sequential(CONV_3x3(128, 128, kernelsize=3, stride=1, padding=1, bias=self.bias),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n\r\n if layer <= 31:\r\n self.conv31 = CONV_3x3rand(128, 256, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n else:\r\n self.conv31 = CONV_3x3(128, 256, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n\r\n if layer <= 32:\r\n self.conv32 = CONV_3x3rand(256, 256, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n else:\r\n self.conv32 = CONV_3x3(256, 256, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n\r\n if layer <= 33:\r\n self.conv33 = nn.Sequential(CONV_3x3rand(256, 256, kernelsize=3, stride=1, padding=1, bias=self.bias),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n else:\r\n self.conv33 = nn.Sequential(CONV_3x3(256, 256, kernelsize=3, stride=1, padding=1, bias=self.bias),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n\r\n if layer <= 41:\r\n self.conv41 = CONV_3x3rand(256, 512, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n else:\r\n self.conv41 = CONV_3x3(256, 512, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n\r\n if layer <= 42:\r\n self.conv42 = CONV_3x3rand(512, 512, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n else:\r\n self.conv42 = CONV_3x3(512, 512, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n\r\n if layer <= 43:\r\n self.conv43 = nn.Sequential(CONV_3x3rand(512, 512, kernelsize=3, stride=1, padding=1, bias=self.bias),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n else:\r\n self.conv43 = nn.Sequential(CONV_3x3(512, 512, kernelsize=3, stride=1, padding=1, bias=self.bias),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n\r\n if layer <= 51:\r\n self.conv51 = CONV_3x3rand(512, 512, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n else:\r\n self.conv51 = CONV_3x3(512, 512, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n\r\n if layer <= 52:\r\n self.conv52 = CONV_3x3rand(512, 512, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n else:\r\n self.conv52 = CONV_3x3(512, 512, kernelsize=3, stride=1, padding=1, bias=self.bias)\r\n\r\n if layer <= 53:\r\n self.conv53 = nn.Sequential(CONV_3x3rand(512, 512, kernelsize=3, stride=1, padding=1, bias=self.bias),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n else:\r\n self.conv53 = nn.Sequential(CONV_3x3(512, 512, kernelsize=3, stride=1, padding=1, bias=self.bias),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n\r\n self.avgpool = nn.AdaptiveAvgPool2d(1)\r\n self.fc = nn.Sequential(nn.Linear(512, 4096),\r\n nn.ReLU(True),\r\n nn.Linear(4096, 4096),\r\n nn.ReLU(True),\r\n nn.Linear(4096, num_classes))\r\n\r\n # Initialize the weights\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\r\n elif isinstance(m, nn.BatchNorm2d):\r\n # raise Exception('You are using a model without BN!!!')\r\n nn.init.constant_(m.weight, 1)\r\n nn.init.constant_(m.bias, 0)", "def comprehensiveLOOEvaluation(directory=defaultDirectory(), \n pruneGlobal = True, numLayers = 2, \n numNodesPerLayer = 200, randSeed = 1,\n trainPer = .6, valPer = .25, testPer = 0.15,\n totalPer = 1, batchSize = 64,\n numEpochs = 1000, learningRate = 0.001, \n l2Reg = 0.0001, modelFilePrefix = '',\n useGRU = False,\n dropoutI = 0.2, dropoutH = 0.2, trainMode = 'continue',\n randSeed2 = None, center = False, prependMean = False):\n \n trainModes = ['continue', 'overwrite', 'continue-each']\n \n if trainMode.lower() not in trainModes:\n raise ValueError(\"Parameter 'trainMode' must be either 'continue', 'overwrite', or 'continue-each'.\")\n \n np.random.seed(randSeed) #control permutation of data\n # prune global coordinate data?\n if pruneGlobal:\n pruneRange = range(0, 18)\n else:\n pruneRange = None\n structs = loadDataset(directory=directory, LOUO=True, \n delRange=pruneRange, trainPer=trainPer,\n valPer = valPer, testPer=testPer, totalPer=totalPer,\n preExt = '.left', prune=True)\n\n u=0\n losses = []\n accs = []\n balAccs = []\n finAccs = []\n cmEpochs = []\n outDirectory = nameModelFile('', useGRU, numLayers, numNodesPerLayer, randSeed,\n trainPer, valPer, testPer, totalPer, dropoutI, dropoutH, l2Reg,\n center, prependMean)\n if not os.path.isdir(outDirectory):\n os.mkdir(outDirectory)\n if randSeed2 is not None: #control randomization of training (for Keras at least)\n np.random.seed(randSeed2)\n for struct in structs:\n modelFile = modelFilePrefix + 'LOU-' + str(u)\n modelFile = nameModelFile(modelFile, useGRU, numLayers, numNodesPerLayer, randSeed,\n trainPer, valPer, testPer, totalPer, dropoutI, dropoutH, l2Reg,\n center, prependMean)\n u += 1\n if (os.path.isfile(outDirectory + '\\\\' + 'Keras' + modelFile + '.json') \n and os.path.isfile(outDirectory + '\\\\' + 'Keras' + modelFile + '_Weights.h5')):\n #if we have already trained for leaving out this user\n if trainMode == 'continue': #continue until each user has a model\n trainMode2 = 'skip' \n elif trainMode == 'continue-each': # continue training previous models\n trainMode2 = 'continue'\n else:\n trainMode2 = 'overwrite'\n else:\n trainMode2 = trainMode\n\n if center:\n \"\"\"\n Center the labeled markers on their mean. \n \"\"\"\n from Postures import centerData\n struct = list(struct)\n labeledMarkerData = struct[0][:,:,18:].reshape((-1, 11, 3))\n labeledMarkerData = centerData(labeledMarkerData, True, prependMean).reshape((struct[0].shape[0], struct[0].shape[1], -1))\n struct[0] = np.concatenate([struct[0][:,:,0:18], labeledMarkerData], axis = 2)\n if prependMean:\n struct[8] += 3\n\n\n cmEpoch, loss, acc, balAcc, finAcc = trainGestureRNN(numLayers=numLayers, numNodesPerLayer=numNodesPerLayer,\n useGRU=useGRU, batchSize=batchSize, \n numEpochs = numEpochs, learningRate=learningRate,\n l1Reg=0, l2Reg = l2Reg, dropoutI=dropoutI, dropoutH=dropoutH,\n sequences = struct[0], classes = struct[1],\n trainRange = struct[2], valRange = struct[3],\n testRange = struct[4], numClasses = struct[5],\n numObservations = struct[6], numSequences = struct[7],\n numFeatures = struct[8],\n modelFile=modelFile, \n outDirectory=outDirectory, trainMode=trainMode2,\n callbacks = [EarlyStopping(patience=20)])\n #catch our breath.... Really, give the user a chance to insert Ctrl-C\n time.sleep(2)\n losses += [loss]\n accs += [acc]\n balAccs += [balAcc]\n finAccs += [finAcc]\n cmEpochs += [cmEpoch]\n losses = np.asarray(losses)\n accs = np.asarray(accs)*100\n balAccs = np.asarray(balAccs)*100\n finAccs = np.asarray(finAccs)*100\n trainPer, valPer, _, _ = normalizePercentages(trainPer, valPer, 0, 1)\n sys.stdout.write('\\n')\n sys.stdout.write('Leave One User Out Evaluation\\nTest Results for ' + str(numLayers) + '-Layer, ' \n + str(numNodesPerLayer) + ' Nodes-Per-Layer ' + ('GRU' if useGRU else 'LSTM') + ' Networks\\n'\n + 'Trained with ' + (\"%0.2f\" % (dropoutI*100)) + '% Input Dropout, '\n + (\"%0.2f\" % (dropoutH*100)) + '% Hidden Dropout, and ' + str(l2Reg) + ' L2 Regularization\\n'\n + str(numEpochs) + ' Maximum Epochs at ' + (\"%0.2f\" % trainPer) + '/' + (\"%0.2f\" % valPer) + ' Training/Validation Split\\n')\n sys.stdout.write('\\n')\n sys.stdout.write('Loss: ' + str(np.mean(losses)) + ' +/- ' + str(np.std(losses)) +'\\n')\n sys.stdout.write('25%, 50%, 75% Quartile Loss: ' + str(np.percentile(losses, 25))\n + ', ' + str(np.median(losses)) \n + ', ' + str(np.percentile(losses, 75)) +'\\n')\n sys.stdout.write('\\n')\n sys.stdout.write('Accuracy: ' + str(np.mean(accs)) + ' +/- ' + str(np.std(accs)) +'\\n')\n sys.stdout.write('25%, 50%, 75% Quartile Accuracy: ' + str(np.percentile(accs, 25))\n + ', ' + str(np.median(accs)) \n + ', ' + str(np.percentile(accs, 75)) +'\\n')\n sys.stdout.write('\\n')\n sys.stdout.write('Balanced Accuracy: ' + str(np.mean(balAccs)) + ' +/- ' + str(np.std(balAccs)) +'\\n')\n sys.stdout.write('25%, 50%, 75% Quartile Balanced Accuracy: ' + str(np.percentile(balAccs, 25))\n + ', ' + str(np.median(balAccs))\n + ', ' + str(np.percentile(balAccs, 75)) +'\\n')\n sys.stdout.write('\\n')\n sys.stdout.write('Final-Frame Accuracy: ' + str(np.mean(finAccs)) + ' +/- ' + str(np.std(finAccs)) +'\\n')\n sys.stdout.write('25%, 50%, 75% Quartile Final-Frame Accuracy: ' + str(np.percentile(finAccs, 25))\n + ', ' + str(np.median(finAccs))\n + ', ' + str(np.percentile(finAccs, 75)) +'\\n')", "def test_net(model, val_loader=None, thresh=0.05):\n\n for iter, data in enumerate(val_loader):\n\n # one batch = data for one image\n image = data['image']\n target = data['label']\n wgt = data['wgt']\n rois = data['rois']\n gt_boxes = data['gt_boxes']\n gt_class_list = data['gt_classes']\n\n #TODO: perform forward pass, compute cls_probs\n\n\n # TODO: Iterate over each class (follow comments)\n for class_num in range(20): \n # get valid rois and cls_scores based on thresh\n \n # use NMS to get boxes and scores\n \n\n #TODO: visualize bounding box predictions when required\n #TODO: Calculate mAP on test set", "def mobilenetv3_large(**kwargs):\n cfgs = [\n # k, t, c, SE, HS, s \n [3, 1, 16, 0, 0, 1],\n [3, 4, 24, 0, 0, 2],\n [3, 3, 24, 0, 0, 1],\n [5, 3, 40, 1, 0, 2],\n [5, 3, 40, 1, 0, 1],\n [5, 3, 40, 1, 0, 1],\n [3, 6, 80, 0, 1, 2],\n [3, 2.5, 80, 0, 1, 1],\n [3, 2.3, 80, 0, 1, 1],\n [3, 2.3, 80, 0, 1, 1],\n [3, 6, 112, 1, 1, 1],\n [3, 6, 112, 1, 1, 1],\n [5, 6, 160, 1, 1, 2],\n [5, 6, 160, 1, 1, 1],\n [5, 6, 160, 1, 1, 1]\n ]\n return MobileNetV3(cfgs, mode='large', **kwargs)", "def three_layer_neuralnetwork(X, model, y=None, reg=0.0,verbose=0):\n \n # Unpack weights\n W1, b1, W2, b2, W3, b3 = model['W1'], model['b1'], model['W2'], model['b2'],model['W3'],model['b3']\n N,D= X.shape\n\n assert W1.shape[0] == D, ' W1 2nd dimenions must match number of features'\n \n dW1,dW2,dW3,db1,db2,db3=np.zeros_like(W1),np.zeros_like(W2),np.zeros_like(W3),np.zeros_like(b1),np.zeros_like(b2),np.zeros_like(b3)\n # Compute the forward pass\n \n '''\n AffineLayer = X.dot(W1)+b1 \n ReluLayer,_ = relu_forward(AffineLayer)\n AffineLayer2 = ReluLayer.dot(W2) + b2\n ReluLayer2,_ = relu_forward(AffineLayer2)\n AffineLayer3 = ReluLayer2.dot(W3) + b3\n scores = AffineLayer3\n \n print X.shape\n print W1.shape\n print b1.shape\n print W2.shape\n print b2.shape\n print W3.shape\n print b3.shape\n '''\n affine_out1,cache1 = affine_forward(X, W1, b1)\n relu_out1,cache_relu1 = relu_forward(affine_out1)\n \n affine_out2,cache2 = affine_forward(relu_out1, W2, b2)\n relu_out2,cache_relu2 = relu_forward(affine_out2)\n \n affine_out3,cache3 = affine_forward(relu_out2, W3, b3)\n scores = affine_out3\n\n #if verbose:\n #print ['Layer {} Variance = {}'.format(i+1, np.var(l[:])) for i,l in enumerate([a1, a2, cache3[0]])][:]\n if y is None:\n return scores\n data_loss,d_softmax = softmax_loss(scores,y)\n data_loss += reg * (np.sum(W1*W1) + np.sum(W2*W2) + np.sum(W3*W3))\n '''\n max_scores = np.max(scores)\n scores -= max_scores\n correct_class_scores = scores[y,np.arange(N)]\n exp_score = np.exp(scores)\n sumexp = np.sum(exp_score,axis=0)\n loss_i = -correct_class_scores + np.log(sumexp)\n loss = np.sum(loss_i) / N \n ''' \t\n # Compute the backward pass\n \n d_affine_out3, dW3, db3 = affine_backward(d_softmax, cache3) \n d_relu2 = relu_backward(d_affine_out3, cache_relu2)\n \n d_affine_out2, dW2, db2 = affine_backward(d_relu2, cache2) \n d_relu1 = relu_backward(d_affine_out2, cache_relu1)\n \n d_affine_out1, dW1, db1 = affine_backward(d_relu1, cache1) \n \n #\n reg_loss = 0\n\n loss = data_loss + reg_loss\n grads = {'W1': dW1, 'b1': db1, 'W2': dW2, 'b2': db2,'W3':dW3,'b3':db3}\n \n return loss, grads", "def __init__(self):\n super(Match3DNet, self).__init__()\n self.features = nn.Sequential(\n # conv1\n nn.Conv3d(1, 64, kernel_size=3, stride=1, padding=0),\n nn.ReLU(inplace=True),\n # conv2\n nn.Conv3d(64, 64, kernel_size=3, stride=1, padding=0),\n nn.ReLU(inplace=True),\n # maxpool\n nn.MaxPool3d(kernel_size=2, stride=2),\n # conv3\n nn.Conv3d(64, 128, kernel_size=3, stride=1, padding=0),\n nn.ReLU(inplace=True),\n # conv4\n nn.Conv3d(128, 128, kernel_size=3, stride=1, padding=0),\n nn.ReLU(inplace=True),\n # conv5\n nn.Conv3d(128, 256, kernel_size=3, stride=1, padding=0),\n nn.ReLU(inplace=True),\n # conv6\n nn.Conv3d(256, 256, kernel_size=3, stride=1, padding=0),\n nn.ReLU(inplace=True),\n # conv7\n nn.Conv3d(256, 512, kernel_size=3, stride=1, padding=0),\n nn.ReLU(inplace=True),\n # conv8\n nn.Conv3d(512, 512, kernel_size=3, stride=1, padding=0),\n nn.ReLU(inplace=True),\n )", "def second_nms(input, threshold):\r\n\r\n # Identify Class Net and Box Net head names.\r\n second_head_names = ['StatefulPartitionedCall/mask_rcnn_keras_box_predictor/mask_rcnn_class_head/ClassPredictor_dense',\r\n 'StatefulPartitionedCall/mask_rcnn_keras_box_predictor/mask_rcnn_box_head/BoxEncodingPredictor_dense']\r\n\r\n # Find the softmax node at the end of the 2nd class net (multi-scale class predictor).\r\n second_class_net = find_head_end(second_head_names[0], \"MatMul\", \"Softmax\")\r\n\r\n # Faster R-CNN's slice operation to adjust third dimension of Class Net's last node tensor (adjusting class values).\r\n slice_start = np.asarray([1], dtype=np.int64)\r\n slice_end = np.asarray([91], dtype=np.int64)\r\n # Second list element but third tensor dimension.\r\n slice_axes = np.asarray([2], dtype=np.int64)\r\n slice_out = self.graph.elt_const_slice(\"Slice\", second_head_names[0]+\"/slicer\", second_class_net.outputs[0], slice_start, slice_end, slice_axes)\r\n\r\n # Final Class Net tensor.\r\n second_class_net_tensor = slice_out[0]\r\n \r\n # Find the add node at the end of the box net (multi-scale localization predictor).\r\n second_box_net = find_head_end(second_head_names[1], \"MatMul\", \"Add\")\r\n # Final Box Net tensor.\r\n second_box_net_output = second_box_net.outputs[0]\r\n\r\n # Reshape node that is preparing second_box_net_output's output shape for Mul scaling node that comes next.\r\n reshape_shape_second = np.asarray([self.batch_size, self.first_stage_max_proposals, second_box_net.outputs[0].shape[1]], dtype=np.int64)\r\n reshape_node_second = self.graph.elt_const(\"Reshape\", second_head_names[1]+\"/reshape\", second_box_net_output, reshape_shape_second)\r\n # 0.1, 0.1, 0.2, 0.2 are localization head variance numbers, they scale second_box_net_output, in order to get accurate coordinates.\r\n second_scale_adj = np.expand_dims(np.asarray([0.1, 0.1, 0.2, 0.2], dtype=np.float32), axis=(0, 1))\r\n second_scale_out = self.graph.elt_const(\"Mul\", second_head_names[1]+\"/scale_second\", reshape_node_second[0], second_scale_adj)\r\n\r\n # Final Box Net tensor.\r\n second_box_net_tensor = second_scale_out[0]\r\n\r\n # Set score threshold\r\n score_threshold = self.second_score_threshold if threshold is None else threshold\r\n\r\n # NMS Inputs and Attributes\r\n # NMS expects these shapes for its input tensors:\r\n # box_net: [batch_size, number_boxes, 4]\r\n # class_net: [batch_size, number_boxes, number_classes]\r\n # anchors: [1, number_boxes, 4] (if used)\r\n second_nms_op = None\r\n second_nms_attrs = None\r\n second_nms_inputs = None\r\n\r\n # EfficientNMS TensorRT Plugin is suitable for our use case.\r\n # Fusing the decoder will always be faster, so this is the default NMS method supported. In this case,\r\n # three inputs are given to the NMS TensorRT node:\r\n # - The box predictions (from the Box Net node found above)\r\n # - The class predictions (from the Class Net node found above)\r\n # - The default anchor coordinates (from the extracted anchor constants)\r\n # As the original tensors from given model will be used, the NMS code type is set to 1 (Center+Size),\r\n # because this is the internal box coding format used by the network.\r\n second_nms_inputs = [second_box_net_tensor, second_class_net_tensor, input]\r\n second_nms_op = \"EfficientNMS_TRT\"\r\n second_nms_attrs = {\r\n 'plugin_version': \"1\",\r\n 'background_class': -1,\r\n 'max_output_boxes': self.first_stage_max_proposals,\r\n 'score_threshold': max(0.01, score_threshold),\r\n 'iou_threshold': self.second_iou_threshold,\r\n 'score_activation': False,\r\n 'box_coding': 1,\r\n }\r\n second_nms_output_classes_dtype = np.int32\r\n\r\n # NMS Outputs.\r\n second_nms_output_num_detections = gs.Variable(name=\"second_num_detections\", dtype=np.int32, shape=[self.batch_size, 1])\r\n second_nms_output_boxes = gs.Variable(name=\"second_detection_boxes\", dtype=np.float32,\r\n shape=[self.batch_size, self.first_stage_max_proposals, 4])\r\n second_nms_output_scores = gs.Variable(name=\"second_detection_scores\", dtype=np.float32,\r\n shape=[self.batch_size, self.first_stage_max_proposals])\r\n second_nms_output_classes = gs.Variable(name=\"second_detection_classes\", dtype=second_nms_output_classes_dtype,\r\n shape=[self.batch_size, self.first_stage_max_proposals])\r\n\r\n second_nms_outputs = [second_nms_output_num_detections, second_nms_output_boxes, second_nms_output_scores, second_nms_output_classes]\r\n\r\n # Create the NMS Plugin node with the selected inputs. \r\n self.graph.plugin(\r\n op=second_nms_op,\r\n name=\"nms/non_maximum_suppression_second\",\r\n inputs=second_nms_inputs,\r\n outputs=second_nms_outputs,\r\n attrs=second_nms_attrs)\r\n log.info(\"Created NMS plugin '{}' with attributes: {}\".format(second_nms_op, second_nms_attrs))\r\n \r\n # Set graph outputs.\r\n self.graph.outputs = second_nms_outputs\r\n\r\n self.infer()", "def build_model_multihead_attention_multiscaleCNN4_covermore(self,\n dim_attention,headnum,\n embedding_vec,\n load_weights=False, weight_dir=None,\n nb_filters=32,filters_length1=1,\n filters_length2=5,\n filters_length3=10,\n pooling_size=3,\n drop_input=0,\n drop_cnn=0.2,\n drop_flat=0,\n W1_regularizer=0.005,\n W2_regularizer=0.005,\n Att_regularizer_weight=0.0005,\n BatchNorm=False,\n fc_dim = 50,\n fcnum=0,\n posembed=False,\n pos_dmodel=40,\n pos_nwaves = 20,\n posmod = 'concat',\n regularfun=1,\n huber_delta=1,\n activation='gelu',\n activationlast='gelu',\n add_avgpooling = False,\n poolingmod=1,\n normalizeatt=False,\n regressionmodel=False,\n attmod = \"softmax\",\n sharp_beta=1,\n lr = 0.001 \n ):\n ###print('Advanced Masking')\n def mask_func(x):\n return x[0] * x[1]\n \n ###print(posembed)\n ###print(posmod)\n input = Input(shape=(self.max_len,), dtype='int8')\n input_mask = Input(shape=([int(self.max_len/pooling_size), 1]), dtype='float32')\n embedding_layer = Embedding(len(embedding_vec), len(embedding_vec[0]), weights=[embedding_vec],\n input_length=self.max_len,\n trainable=False)\n embedding_output = Dropout(drop_input)(embedding_layer(input)) #layer2\n if 'gelu' in activation:\n activationfun=gelu\n else:\n activationfun = 'relu'\n \n if 'gelu' in activationlast:\n activationlastfun = gelu\n else:\n activationlastfun='relu'\n \n ###print(activationfun)\n ###print(activationlastfun)\n with tf.name_scope('first_cnn'):\n first_cnn = Convolution1D(nb_filters, filters_length1, #kernel_regularizer=regularizers.l2(0.0001),\n border_mode='same', activation=activationfun, use_bias=False,name='CNN1')(embedding_output) #layer3\n first_cnn2 = Convolution1D(int(nb_filters/2), filters_length1, #kernel_regularizer=regularizers.l2(0.0001),\n border_mode='same', activation=activationlastfun, use_bias=False)(first_cnn) #layer5\n second_cnn = Convolution1D(nb_filters, filters_length2, #kernel_regularizer=regularizers.l2(0.0001),\n border_mode='same', activation=activationfun, use_bias=False,name='CNN2')(embedding_output) #layer4\n second_cnn2 = Convolution1D(int(nb_filters/2), filters_length2, #kernel_regularizer=regularizers.l2(0.0001),\n border_mode='same', activation=activationlastfun, use_bias=False)(second_cnn)\n third_cnn = Convolution1D(int(nb_filters/2), filters_length3, #kernel_regularizer=regularizers.l2(0.0001),\n border_mode='same', activation=activationfun, use_bias=False,name='CNN3')(embedding_output)\n \n third_cnn2 = Convolution1D(int(nb_filters/2), filters_length3, #kernel_regularizer=regularizers.l2(0.0001),\n border_mode='same', activation=activationlastfun, use_bias=False)(third_cnn)\n if BatchNorm:\n first_cnn2 = BatchNormalization()(first_cnn2)\n second_cnn2 = BatchNormalization()(second_cnn2)\n third_cnn2 = BatchNormalization()(third_cnn2)\n \n if not add_avgpooling:\n if poolingmod == 1:\n pooling_layer = MaxPooling1D(pool_length=pooling_size, stride=pooling_size)\n else:\n pooling_layer = AveragePooling1D(pool_length=pooling_size, stride=pooling_size)\n \n cnn_output1 = Dropout(drop_cnn)(pooling_layer(first_cnn2))\n cnn_output2 = Dropout(drop_cnn)(pooling_layer(second_cnn2))\n cnn_output3 = Dropout(drop_cnn)(pooling_layer(third_cnn2))\n else:\n first_cnn2_max=MaxPooling1D(pool_length=pooling_size, stride=pooling_size)(first_cnn2)\n first_cnn2_avg=AveragePooling1D(pool_length=pooling_size, stride=pooling_size)(first_cnn2)\n cnn_output1 = Dropout(drop_cnn)(concatenate([first_cnn2_max,first_cnn2_avg],axis=-1))\n second_cnn2_max=MaxPooling1D(pool_length=pooling_size, stride=pooling_size)(second_cnn2)\n second_cnn2_avg=AveragePooling1D(pool_length=pooling_size, stride=pooling_size)(second_cnn2)\n cnn_output2 = Dropout(drop_cnn)(concatenate([second_cnn2_max,second_cnn2_avg],axis=-1))\n third_cnn2_max=MaxPooling1D(pool_length=pooling_size, stride=pooling_size)(third_cnn2)\n third_cnn2_avg=AveragePooling1D(pool_length=pooling_size, stride=pooling_size)(third_cnn2)\n cnn_output3 = Dropout(drop_cnn)(concatenate([third_cnn2_max,third_cnn2_avg],axis=-1))\n \n \n \n if posembed:\n ##print(posmod)\n from position_embedding import PositionEmbedding\n if posmod == 'concat':\n pos_emb1 = PositionEmbedding(max_time=int(self.max_len/pooling_size), n_waves=pos_nwaves, d_model=pos_dmodel,name='pos_emb1')(cnn_output1)\n cnn_output1 = concatenate([cnn_output1, pos_emb1], axis=-1)\n pos_emb2 = PositionEmbedding(max_time=int(self.max_len/pooling_size), n_waves=pos_nwaves, d_model=pos_dmodel,name='pos_emb2')(cnn_output2)\n cnn_output2 = concatenate([cnn_output2, pos_emb2], axis=-1)\n pos_emb3 = PositionEmbedding(max_time=int(self.max_len/pooling_size), n_waves=pos_nwaves, d_model=pos_dmodel,name='pos_emb3')(cnn_output3)\n cnn_output3 = concatenate([cnn_output3, pos_emb3], axis=-1)\n else:\n ##print(\"yes add posmod\")\n pos_emb1 = PositionEmbedding(max_time=int(self.max_len/pooling_size), n_waves=int(int_shape(cnn_output1)[-1]/2), d_model=int_shape(cnn_output1)[-1],name='pos_emb1')(cnn_output1)\n cnn_output1 = Add()([cnn_output1, pos_emb1])\n pos_emb2 = PositionEmbedding(max_time=int(self.max_len/pooling_size), n_waves=int(int_shape(cnn_output2)[-1]/2), d_model=int_shape(cnn_output2)[-1],name='pos_emb2')(cnn_output2)\n cnn_output2 = Add()([cnn_output2, pos_emb2])\n pos_emb3 = PositionEmbedding(max_time=int(self.max_len/pooling_size), n_waves=int(int_shape(cnn_output3)[-1]/2), d_model=int_shape(cnn_output3)[-1],name='pos_emb3')(cnn_output3)\n cnn_output3 = Add()([cnn_output3, pos_emb3])\n \n mask_input1 = []\n mask_input1.append(cnn_output1)\n mask_input1.append(input_mask)\n cnn_mask_output1 = Lambda(mask_func)(mask_input1)\n del mask_input1\n mask_input2 = []\n mask_input2.append(cnn_output2)\n mask_input2.append(input_mask)\n cnn_mask_output2 = Lambda(mask_func)(mask_input2)\n del mask_input2\n mask_input3 = []\n mask_input3.append(cnn_output3)\n mask_input3.append(input_mask)\n cnn_mask_output3 = Lambda(mask_func)(mask_input3)\n del mask_input3\n \n if regularfun==1:\n regularizerfunction_W1 = regularizers.l1(W1_regularizer)\n regularizerfunction_W2 = regularizers.l1(W2_regularizer)\n elif regularfun==2:\n regularizerfunction_W1 = regularizers.l2(W1_regularizer)\n regularizerfunction_W2 = regularizers.l2(W2_regularizer)\n elif regularfun ==3:\n regularizerfunction_W1 = smoothL1(W1_regularizer,huber_delta)\n regularizerfunction_W2 = smoothL1(W2_regularizer,huber_delta)\n \n with tf.name_scope('multihead_attention'):\n att1,att1_A = Attention_mask(hidden=cnn_output1.get_shape()[-1].value, da=dim_attention, r=headnum, init='glorot_uniform', activation='tanh',\n W1_regularizer=regularizerfunction_W1,\n W2_regularizer=regularizerfunction_W2,\n W1_constraint=None, W2_constraint=None, return_attention=True,\n attention_regularizer_weight=Att_regularizer_weight,normalize=normalizeatt,attmod=attmod,sharp_beta=sharp_beta,name=\"att1\")(concatenate([cnn_mask_output1, input_mask]))#-5 layer\n \n att2,att2_A = Attention_mask(hidden=cnn_output1.get_shape()[-1].value, da=dim_attention, r=headnum, init='glorot_uniform', activation='tanh',\n W1_regularizer=regularizerfunction_W1,\n W2_regularizer=regularizerfunction_W2,\n W1_constraint=None, W2_constraint=None, return_attention=True,\n attention_regularizer_weight=Att_regularizer_weight,normalize=normalizeatt,attmod=attmod,sharp_beta=sharp_beta,name=\"att2\")(concatenate([cnn_mask_output2, input_mask])) #-4 layer\n \n att3,att3_A = Attention_mask(hidden=cnn_output1.get_shape()[-1].value, da=dim_attention, r=headnum, init='glorot_uniform', activation='tanh',\n W1_regularizer=regularizerfunction_W1,\n W2_regularizer=regularizerfunction_W2,\n W1_constraint=None, W2_constraint=None, return_attention=True,\n attention_regularizer_weight=Att_regularizer_weight,normalize=normalizeatt,attmod=attmod,sharp_beta=sharp_beta,name=\"att3\")(concatenate([cnn_mask_output3, input_mask])) #-3 layer\n \n if BatchNorm:\n att1 = BatchNormalization()(att1)\n att2 = BatchNormalization()(att2)\n att3 = BatchNormalization()(att3)\n \n \n output = Dropout(drop_flat)(Flatten()(concatenate([att1,att2,att3]))) #-2 layer\n \n fc = output\n for _ in range(fcnum):\n fc = Dense(fc_dim,activation='relu')(fc)\n fc = Dropout(drop_flat)(fc)\n \n with tf.name_scope(''):\n if regressionmodel:\n preds = Dense(self.nb_classes,activation='softmax')(fc) #-1 layer\n else:\n preds = Dense(self.nb_classes,activation='sigmoid')(fc) #-1 layer\n \n self.model = Model(inputs=[input,input_mask], outputs=preds)\n from keras import optimizers\n # optim = optimizers.RMSprop()\n optim = optimizers.Adam(lr=lr, decay=5e-5) #The paper uses a decay rate alpha = alpha/sqrt(t) updted each epoch (t) for the logistic regression demonstration.\n #optim = optimizers.nadam()\n #optim = RAdam()\n if regressionmodel:\n self.model.compile(loss='kld',optimizer=optim,metrics=['acc'])\n else:\n self.model.compile(loss='binary_crossentropy',optimizer=optim,metrics=['binary_accuracy','categorical_accuracy'])\n \n \n \n if load_weights:\n self.model.load_weights(weight_dir)\n \n self.is_built = True\n self.bn = False\n self.model.summary()", "def baseUNet(input_shape,conv_depth,n_classes,init_w,dropout):\n inputs = Input(input_shape)\n\n c1=Conv2D(conv_depth,\n (3,3),\n activation='relu',\n padding='same',\n kernel_initializer=init_w)(inputs)\n\n c1=Conv2D(conv_depth,\n (3,3),\n activation='relu',\n padding=\"same\",\n kernel_initializer=init_w)(c1)\n\n # pool down to next layer\n pool1 = MaxPooling2D((2,2),strides = (2,2))(c1)\n\n conv_depth *= 2\n\n # convolute down again\n conv2 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(pool1)\n\n conv2 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv2)\n \n # pool down again\n pool2 = MaxPooling2D((2,2),strides = (2,2))(conv2)\n\n conv_depth *= 2 \n\n # Convolution\n conv3 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(pool2)\n\n conv3 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv3)\n \n # pool down\n pool3 = MaxPooling2D((2,2),strides = (2,2))(conv3)\n\n conv_depth *= 2 \n # Convolution\n conv4 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(pool3)\n\n conv4 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv4)\n \n # pool down \n pool4 = MaxPooling2D((2,2),strides = (2,2))(conv4)\n\n conv_depth *=2 \n\n # Convolution\n conv5 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(pool4)\n\n conv5 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv5)\n\n drop = Dropout(dropout)(conv5)\n\n conv_depth /= 2\n conv_depth = int(conv_depth) \n # do upsampling\n up1 = UpSampling2D(size = (2,2))(drop)\n conv6 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(up1)\n \n # add in skip info\n cat1 = concatenate([conv4,conv6],axis = 3)\n conv6 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(cat1)\n\n conv6 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv6)\n\n conv_depth /= 2\n conv_depth = int(conv_depth)\n # do upsampling\n up2 = UpSampling2D(size = (2,2))(conv6)\n conv7 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(up2)\n \n # add in skip info\n cat2 = concatenate([conv3,conv7],axis = 3)\n conv7 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(cat2)\n\n conv7 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv7)\n \n conv_depth /= 2\n conv_depth = int(conv_depth)\n # do upsampling\n up3 = UpSampling2D(size = (2,2))(conv7)\n conv8 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size=(3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(up3)\n \n # add in skip info\n cat3 = concatenate([conv2,conv8],axis = 3)\n conv8 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(cat3)\n\n conv8 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv8)\n \n conv_depth /= 2\n conv_depth = int(conv_depth)\n # do upsampling\n up4 = UpSampling2D(size = (2,2))(conv8)\n conv9 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(up4)\n \n # add in skip info\n cat4 = concatenate([c1,conv9],axis = 3)\n conv9 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(cat4)\n\n conv9 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv9)\n\n outputs = Conv2D(n_classes, 1, activation = 'softmax')(conv9)\n\n return outputs,inputs", "def __init__(self):\n super(Decoder_1m, self).__init__()\n self.lconvtwos = nn.ModuleList(\n [\n nn.Sequential(\n nn.Dropout(p=0.1),\n nn.Conv2d(128, 32, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n ),\n ]\n )\n\n self.convtwos = nn.ModuleList(\n [\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n ]\n )\n self.final = nn.Sequential(\n nn.Conv2d(64, 5, kernel_size=(1, 1), padding=0),\n nn.BatchNorm2d(5),\n nn.ReLU(inplace=True),\n nn.Conv2d(5, 1, kernel_size=(1, 1), padding=0),\n )", "def train_cavia(args, config, path_model, device):\n global DATA, PATH_BASE, PATH_DATA, N_CLS, NAME_DATA, NAME_BACKBONE\n\n if path_model is None:\n masks_dict = None\n learner = None\n else:\n print('Load net from', path_model)\n save_dict = torch.load(path_model)\n state_dict = save_dict['state_dict']\n masks_dict = save_dict.get('masks_dict', None)\n\n learner = Learner(config, args.num_context_params, args.context_in)\n learner.load_state_dict(state_dict)\n\n if masks_dict is None:\n print('masks_dict is None!')\n cr = 1.\n else:\n cr = get_cr(learner, masks_dict)\n\n cavia = Meta(args, config, learner).to(device)\n\n # save path\n if path_model is None:\n path_save = '%s/model/cavia_lobs/%s_%s/%d-way_%d-shot' % (\n PATH_BASE, NAME_DATA, NAME_BACKBONE, args.n_way, args.k_spt)\n else:\n path_save = '/'.join(path_model.split('/')[:-1])\n if not os.path.exists(path_save):\n os.makedirs(path_save)\n\n # meta train\n meta_train(cavia, args, masks_dict, device,\n output=args.log_print,\n save=args.save_each_epoch,\n save_threshold=args.save_threshold,\n test_each_epoch=args.test_each_epoch,\n path_save=path_save,\n cr=cr,\n break_threshold=args.break_threshold)\n\n if not args.save_each_epoch:\n # meta test\n acc_avg, accs = meta_test(cavia, args, masks_dict, device)\n\n path_model = '%s/net_cr-1_acc-%.4f.pkl' % (path_save, acc_avg)\n print('Save as %s' % path_model)\n torch.save(\n {\n 'state_dict': cavia.net.state_dict(),\n 'accs': accs,\n 'config': config,\n 'args': args\n },\n path_model\n )", "def build(classes):\n # data input\n data = mx.sym.Variable(\"data\")\n\n # Block #1: first CONV => RELU => POOL layer set\n conv1_1 = mx.sym.Convolution(data=data, kernel=(11, 11), stride=(4, 4), num_filter=96)\n act1_1 = mx.sym.LeakyReLU(data=conv1_1, act_type=\"elu\")\n bn1_1 = mx.sym.BatchNorm(data=act1_1)\n pool1 = mx.sym.Pooling(data=bn1_1, pool_type=\"max\", kernel=(3, 3), stride=(2, 2))\n do1 = mx.sym.Dropout(data=pool1, p=0.25)\n\n # Block #2: second CONV => RELU => POOL layer set\n conv2_1 = mx.sym.Convolution(data=do1, kernel=(5, 5), pad=(2, 2), num_filter=256)\n act2_1 = mx.sym.LeakyReLU(data=conv2_1, act_type=\"elu\")\n bn2_1 = mx.sym.BatchNorm(data=act2_1)\n pool2 = mx.sym.Pooling(data=bn2_1, pool_type=\"max\", kernel=(3, 3), stride=(2, 2))\n do2 = mx.sym.Dropout(data=pool2, p=0.25)\n\n # Block #3: (CONV => RELU) * 3 => POOL\n conv3_1 = mx.sym.Convolution(data=do2, kernel=(3, 3), pad=(1, 1), num_filter=384)\n act3_1 = mx.sym.LeakyReLU(data=conv3_1, act_type=\"elu\")\n bn3_1 = mx.sym.BatchNorm(data=act3_1)\n conv3_2 = mx.sym.Convolution(data=bn3_1, kernel=(3, 3), pad=(1, 1), num_filter=384)\n act3_2 = mx.sym.LeakyReLU(data=conv3_2, act_type=\"elu\")\n bn3_2 = mx.sym.BatchNorm(data=act3_2)\n conv3_3 = mx.sym.Convolution(data=bn3_2, kernel=(3, 3), pad=(1, 1), num_filter=256)\n act3_3 = mx.sym.LeakyReLU(data=conv3_3, act_type=\"elu\")\n bn3_3 = mx.sym.BatchNorm(data=act3_3)\n pool3 = mx.sym.Pooling(data=bn3_3, pool_type=\"max\", kernel=(3, 3), stride=(2, 2))\n do3 = mx.sym.Dropout(data=pool3, p=0.25)\n\n # Block #4: first set of FC => RELU layers\n flatten = mx.sym.Flatten(data=do3)\n fc1 = mx.sym.FullyConnected(data=flatten, num_hidden=4096)\n act4_1 = mx.sym.LeakyReLU(data=fc1, act_type=\"elu\")\n bn4_1 = mx.sym.BatchNorm(data=act4_1)\n do4 = mx.sym.Dropout(data=bn4_1, p=0.5)\n\n # Block #5: second set of FC => RELU layers\n fc2 = mx.sym.FullyConnected(data=do4, num_hidden=4096)\n act5_1 = mx.sym.LeakyReLU(data=fc2, act_type=\"elu\")\n bn5_1 = mx.sym.BatchNorm(data=act5_1)\n do5 = mx.sym.Dropout(data=bn5_1, p=0.5)\n\n # softmax classifier\n fc3 = mx.sym.FullyConnected(data=do5, num_hidden=classes)\n model = mx.sym.SoftmaxOutput(data=fc3, name=\"softmax\")\n\n # return the network architecture\n return model", "def lenet300_classic():\n return LeNet300(dropout=False, nonlinearity=nn.Tanh)", "def neural_net_ex4_ng():\n # ==================\n # read data\n dataset = loadmat('data/ex4data1.mat')\n print(dataset.keys())\n\n y = dataset['y'] # 5000 x 1\n print('dims y: ', y.shape)\n # print('y[0]: ', y[0])\n\n X = dataset['X'] # 5000 x 400\n print('dims X: ', X.shape)\n # print('X[0]: ', X[0])\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)\n\n num_samples_test = X_test.shape[0]\n\n # ==================\n # display data\n\n # pick 20 examples and visualize them\n fig = plt.figure(figsize=(10, 8), facecolor='white')\n fig.add_subplot(651)\n samples = np.random.choice(num_samples_test, 10)\n print('samples:', samples)\n plt.imshow(X_test[samples, :].reshape(-1, 20).T, cmap=\"Greys\")\n plt.axis('off')\n\n # ==================\n # run neural net\n hidden_layer_size = 25\n\n mlp = MLPClassifier(hidden_layer_sizes=(25,), max_iter=20, alpha=1e-4,\n solver='sgd', verbose=False, tol=1e-4, random_state=1,\n learning_rate_init=.1)\n mlp.fit(X_train, y_train.ravel())\n\n predictions = mlp.predict(X_test)\n print('Test set accuracy: {} %'.format(np.mean(predictions == y_test.ravel())*100))\n\n # print(confusion_matrix(y_test, predictions))\n # print(classification_report(y_test, predictions))\n print(\"Training set score: %f\" % mlp.score(X_train, y_train))\n print(\"Test set score: %f\" % mlp.score(X_test, y_test))\n print('coeffs shape', (mlp.coefs_[0]).shape)\n\n # ==================\n # display coefficients of hidden layer\n fig.add_subplot(652)\n plt.imshow(mlp.coefs_[0][:, 0].reshape(20, 20))\n plt.axis('off')\n\n gs = gridspec.GridSpec(6, 5)\n cur_img_idx = 5\n\n # use global min / max to ensure all weights are shown on the same scale\n vmin, vmax = mlp.coefs_[0].min(), mlp.coefs_[0].max()\n for coef, ax in zip(mlp.coefs_[0].T, range(hidden_layer_size)):\n fig.add_subplot(gs[cur_img_idx])\n plt.imshow(coef.reshape(20, 20), cmap=plt.cm.gray, vmin=.5 * vmin, vmax=.5 * vmax)\n plt.axis('off')\n cur_img_idx += 1\n\n plt.show()", "def multiview_consistency(y_true, y_pred):\n alpha = 1 # 0.0001 # The weight on the multiview loss, which we hard-code for now..\n\n msk_loss = mask_nan_keep_loss(y_true[-1], y_pred[-1])\n\n # The output should be (batch_size,nvox,nvox,nvox,n_markers)\n # For a 3-cam system, there are only 3 different possible pairs, so we discard the last, which is the complete set\n y_pred_ = y_pred[:-1]\n y_pred_diff = y_pred_[1:] - y_pred_[:-1]\n multiview_loss = K.mean(K.flatten(y_pred_diff) ** 2)\n\n return msk_loss + alpha * multiview_loss", "def train_tgt(src_encoder, tgt_encoder, critic,\r\n src_data_loader, tgt_data_loader, classifier, tgt_test_data_loader):\r\n\r\n ####################\r\n # 1. setup network #\r\n ####################\r\n\r\n # set train state for Dropout and BN layers\r\n src_encoder.eval()\r\n tgt_encoder.train()\r\n critic.train()\r\n\r\n # setup criterion and optimizer\r\n criterion = nn.CrossEntropyLoss()\r\n optimizer_tgt = optim.Adam(tgt_encoder.parameters(),\r\n lr=params.c_learning_rate,\r\n betas=(params.beta1, params.beta2))\r\n optimizer_critic = optim.Adam(critic.parameters(),\r\n lr=params.d_learning_rate,\r\n betas=(params.beta1, params.beta2))\r\n # len_data_loader = min(len(src_data_loader), len(tgt_data_loader))\r\n\r\n ####################\r\n # 2. train network #\r\n ####################\r\n #test_model(src_encoder, classifier, tgt_test_data_loader)\r\n #test_model(tgt_encoder, classifier, tgt_test_data_loader)\r\n for epoch in range(params.num_epochs):\r\n # zip source and target data pair\r\n data_zip = enumerate(zip(src_data_loader, tgt_data_loader))\r\n for step, ((images_src, _), (images_tgt, _)) in data_zip:\r\n ###########################\r\n # 2.1 train discriminator #\r\n ###########################\r\n\r\n # make images variable\r\n images_src = images_src.cuda()\r\n images_tgt = images_tgt.cuda()\r\n # print(images_src.shape, images_tgt.shape)\r\n\r\n # zero gradients for optimizer\r\n optimizer_critic.zero_grad()\r\n\r\n # extract and concat features\r\n feat_src = src_encoder(images_src)\r\n feat_tgt = tgt_encoder(images_tgt)\r\n # print(src_encoder)\r\n # print(feat_tgt.shape)\r\n # print(feat_src.shape)\r\n # print(tgt_encoder)\r\n # print(feat_tgt)\r\n feat_concat = torch.cat((feat_src, feat_tgt), 0)\r\n\r\n # predict on discriminator\r\n pred_concat = critic(feat_concat.detach())\r\n\r\n # prepare real and fake label\r\n label_src = torch.ones(feat_src.size(0)).long()\r\n label_tgt = torch.zeros(feat_tgt.size(0)).long()\r\n label_concat = torch.cat((label_src, label_tgt), 0).cuda()\r\n\r\n # compute loss for critic\r\n loss_critic = criterion(pred_concat, label_concat)\r\n loss_critic.backward()\r\n\r\n # optimize critic\r\n optimizer_critic.step()\r\n\r\n pred_cls = torch.squeeze(pred_concat.max(1)[1])\r\n acc = (pred_cls == label_concat).float().mean()\r\n\r\n ############################\r\n # 2.2 train target encoder #\r\n ############################\r\n\r\n # zero gradients for optimizer\r\n optimizer_critic.zero_grad()\r\n optimizer_tgt.zero_grad()\r\n\r\n # extract and target features\r\n # feat_tgt = tgt_encoder(images_tgt)\r\n feat_tgt = tgt_encoder(images_tgt)\r\n\r\n # predict on discriminator\r\n pred_tgt = critic(feat_tgt)\r\n\r\n # prepare fake labels\r\n label_tgt = torch.ones(feat_tgt.size(0)).long().cuda()\r\n\r\n # compute loss for target encoder\r\n loss_tgt = criterion(pred_tgt, label_tgt)\r\n loss_tgt.backward()\r\n\r\n # optimize target encoder\r\n optimizer_tgt.step()\r\n\r\n #######################\r\n # 2.3 print step info #\r\n #######################\r\n if ((step + 1) % params.log_step == 0):\r\n print(\"Epoch [{}/{}]:\"\r\n \"d_loss={:.5f} g_loss={:.5f} acc={:.5f}\"\r\n .format(epoch + 1,\r\n params.num_epochs,\r\n loss_critic.item(),\r\n loss_tgt.item(),\r\n acc.item()))\r\n test_model(tgt_encoder, classifier, tgt_test_data_loader)\r\n #############################\r\n # 2.4 save model parameters #\r\n #############################\r\n \"\"\"\r\n if ((epoch + 1) % params.save_step == 0):\r\n torch.save(critic.state_dict(), os.path.join(\r\n params.model_root,\r\n \"ADDA-critic-{}.pt\".format(epoch + 1)))\r\n torch.save(tgt_encoder.state_dict(), os.path.join(\r\n params.model_root,\r\n \"ADDA-target-encoder-{}.pt\".format(epoch + 1)))\r\n \"\"\"\r\n tgt_encoder_path = getTargetEncoderPath(src_encoder, src_data_loader, tgt_data_loader)\r\n tgt_critic_path = getTargetDiscriminatorPath(src_encoder, src_data_loader, tgt_data_loader)\r\n torch.save(critic.state_dict(), tgt_critic_path)\r\n torch.save(tgt_encoder.state_dict(), tgt_encoder_path)\r\n return tgt_encoder", "def crossValidation(data, output_variable_name):\r\n X, xt, y, yt = train_test_split(\r\n data.drop(output_variable_name, axis=1), data[output_variable_name], test_size=0.01, random_state=SEED)\r\n\r\n model = pickle.load(open(\"models/lasso.sav\", 'rb'))\r\n lassoCV = -mean(cross_val_score(model, X, y, cv=5, scoring='neg_root_mean_squared_error'))\r\n\r\n model = pickle.load(open(\"models/ridge.sav\", 'rb'))\r\n ridgeCV = -mean(cross_val_score(model, X, y, cv=5, scoring='neg_root_mean_squared_error'))\r\n\r\n model = pickle.load(open(\"models/decisionTree.sav\", 'rb'))\r\n decTreeCV = -mean(cross_val_score(model, X, y, cv=5, scoring='neg_root_mean_squared_error'))\r\n\r\n param = {\r\n 'max_depth': 15,\r\n 'eta': 0.1,\r\n 'objective': 'reg:squarederror',\r\n 'nthread': 16,\r\n \"subsample\": 0.5,\r\n \"colsample_bytree\": 0.5,\r\n 'eval_metric': 'rmse'\r\n }\r\n num_round = XGB_EPOCH_NR\r\n\r\n dtrain = xgb.DMatrix(X, label=y)\r\n xgbCV = xgb.cv(\r\n param,\r\n dtrain,\r\n num_boost_round=num_round,\r\n seed=SEED,\r\n nfold=5,\r\n metrics={'rmse'}\r\n )[\"test-rmse-mean\"][-1:]\r\n\r\n param = {\r\n \"iterations\": 400,\r\n \"learning_rate\": 0.02,\r\n \"depth\": 12,\r\n \"eval_metric\": 'RMSE',\r\n \"random_seed\": 23,\r\n \"bagging_temperature\": 0.2,\r\n \"od_type\": 'Iter',\r\n \"metric_period\": 75,\r\n \"od_wait\": 100\r\n }\r\n\r\n catBoostCV = cv(data, param, fold_count=5, plot=True)\r\n\r\n return lassoCV, ridgeCV, decTreeCV, xgbCV, catBoostCV", "def build_examples():\n build_models([\n \"VGG_16\",\n \"VGG_19\",\n \"RESNET_50\",\n \"MOBILENET\",\n #\"INCEPTION_V3\",\n #\"INCEPTION_RESNET\",\n #\"DENSENET_121\",\n #\"DENSENET_169\",\n #\"DENSENET_201\"])\n ])", "def setup(args):\n cfg = get_cfg()\n\n cfg.merge_from_file(model_zoo.get_config_file(\"COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml\"))\n cfg.merge_from_list(args.opts)\n\n # configs for training\n if args.small_vidor: # cfg.DATASETS.VIDOR.SIZE == 'small':\n cfg.DATASETS.TRAIN = (\"vidor_small_train\",)\n elif args.small_vidor_10imgs: # cfg.DATASETS.VIDOR.SIZE == 'small-10imgs':\n cfg.DATASETS.TRAIN = (\"vidor_small_10imgs_train\",)\n else:\n cfg.DATASETS.TRAIN = (\"vidor_large_train\",)\n # cfg.DATALOADER.NUM_WORKERS = 2\n if not args.eval_only:\n cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(\"COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml\") # Let training initialize from model zoo\n factor = 4\n cfg.SOLVER.IMS_PER_BATCH = 16 * factor\n cfg.SOLVER.BASE_LR = 0.0001 * factor # finetune using 10x smaller base_lr\n cfg.SOLVER.MAX_ITER = 270000 // factor \n cfg.SOLVER.STEPS = [210000 // factor, 250000 // factor]\n # cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128 # default: 512\n cfg.MODEL.ROI_HEADS.NUM_CLASSES = 78\n\n # configs for testing\n # cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, \"model_final.pth\")\n if args.small_vidor: # cfg.DATASETS.VIDOR.SIZE == 'small':\n cfg.DATASETS.TEST = (\"vidor_small_val\",)\n elif args.small_vidor_10imgs: # cfg.DATASETS.VIDOR.SIZE == 'small-10imgs':\n cfg.DATASETS.TEST = (\"vidor_small_10imgs_val\",)\n else:\n cfg.DATASETS.TEST = (\"vidor_large_val\",)\n # cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5\n\n # cfg.OUTPUT_DIR = './output/train_vidor_with_pseudo_labels'\n \n \n if not args.eval_only:\n os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)\n cfg.freeze()\n default_setup(cfg, args)\n return cfg", "def __init__(self, input_image_topic=\"/usb_cam/image_raw\", output_image_topic=\"/opendr/image_boxes_annotated\",\n detections_topic=\"/opendr/objects\", device=\"cuda\", backbone=\"darknet53\"):\n\n # Initialize the face detector\n self.object_detector = YOLOv3DetectorLearner(backbone=backbone, device=device)\n self.object_detector.download(path=\".\", verbose=True)\n self.object_detector.load(\"yolo_default\")\n self.class_names = self.object_detector.classes\n\n # Initialize OpenDR ROSBridge object\n self.bridge = ROSBridge()\n\n # setup communications\n if output_image_topic is not None:\n self.image_publisher = rospy.Publisher(output_image_topic, ROS_Image, queue_size=10)\n else:\n self.image_publisher = None\n\n if detections_topic is not None:\n self.bbox_publisher = rospy.Publisher(detections_topic, Detection2DArray, queue_size=10)\n else:\n self.bbox_publisher = None\n\n rospy.Subscriber(input_image_topic, ROS_Image, self.callback)", "def demo2(image_paths, output_dir, cuda):\n\n device = get_device(cuda)\n\n # Synset words\n classes = get_classtable()\n\n # Model\n model = models.resnet152(pretrained=True)\n model.to(device)\n model.eval()\n\n # The four residual layers\n target_layers = [\"relu\", \"layer1\", \"layer2\", \"layer3\", \"layer4\"]\n target_class = 243 # \"bull mastif\"\n\n # Images\n images, raw_images = load_images(image_paths)\n images = torch.stack(images).to(device)\n\n gcam = GradCAM(model=model)\n probs, ids = gcam.forward(images)\n # ids_ = torch.LongTensor([[target_class]] * len(images)).to(device)\n ids_ = torch.tensor([[target_class]] * len(images), dtype=torch.long).to(device)\n gcam.backward(ids=ids_)\n\n for target_layer in target_layers:\n print(\"Generating Grad-CAM @{}\".format(target_layer))\n\n # Grad-CAM\n regions = gcam.generate(target_layer=target_layer)\n\n for j in range(len(images)):\n print(\n \"\\t#{}: {} ({:.5f})\".format(\n j, classes[target_class], float(probs[ids == target_class])\n )\n )\n\n # save_gradcam(\n # filename=osp.join(\n # output_dir,\n # \"{}-{}-gradcam-{}-{}.png\".format(\n # j, \"resnet152\", target_layer, classes[target_class]\n # ),\n # ),\n # gcam=regions[j, 0],\n # raw_image=raw_images[j],\n # )", "def train():\n\n # Load camera parameters\n rcams = cameras.load_cameras()\n\n # Load 3d data and 2d projections\n full_train_set_3d, full_test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d =\\\n data_utils.read_3d_data( FLAGS.camera_frame, rcams, FLAGS.origin_bc, FLAGS.augment_data,\n FLAGS.procrustes, FLAGS.lowpass )\n \n # Read stacked hourglass 2D predictions\n full_train_set_2d, full_test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = \\\n data_utils.read_2d_predictions( FLAGS.origin_bc, FLAGS.augment_data )\n \n print(\"\\n[+] done reading and normalizing data\")\n # Getting the number of training and test subjects\n tr_subj = 0\n for v in full_train_set_3d.values():\n tr_subj += v.shape[0]\n te_subj = 0\n for v in full_test_set_3d.values():\n te_subj += v.shape[0]\n print(\"{0} training subjects, {1} test subjects\".format(tr_subj, te_subj))\n print(dim_to_use_2d)\n print(dim_to_use_3d)\n # Un-normalizing data for visualizations\n unNorm_ftrs2d = data_utils.unNormalize_dic(full_train_set_2d, data_mean_2d, data_std_2d, dim_to_use_2d)\n unNorm_ftrs3d = data_utils.unNormalize_dic(full_train_set_3d, data_mean_3d, data_std_3d, dim_to_use_3d)\n unNorm_ftes3d = data_utils.unNormalize_dic(full_test_set_3d, data_mean_3d, data_std_3d, dim_to_use_3d)\n # Visualize the data\n viz.visualize_train_sample(unNorm_ftrs2d, unNorm_ftrs3d, FLAGS.camera_frame)\n viz.visualize_files_oneatatime(unNorm_ftrs3d, unNorm_ftes3d)\n\n # Getting only the dimensions to use (get rid of body coxas, other limb, antennas, abdomen\n train_set_3d, train_set_2d, test_set_3d, test_set_2d = {}, {}, {}, {}\n for k in full_train_set_3d:\n (f, c) = k\n train_set_3d[k] = full_train_set_3d[k][:, dim_to_use_3d]\n train_set_2d[(f, data_utils.CAMERA_TO_USE)] =\\\n full_train_set_2d[(f, data_utils.CAMERA_TO_USE)][:, dim_to_use_2d]\n for k in full_test_set_3d:\n (f, c) = k\n test_set_3d[k] = full_test_set_3d[k][:, dim_to_use_3d]\n test_set_2d[(f, data_utils.CAMERA_TO_USE)] =\\\n full_test_set_2d[(f, data_utils.CAMERA_TO_USE)][:, dim_to_use_2d]\n \n print(\"3D data mean:\")\n print(data_mean_3d)\n print(\"3D data std:\")\n print(data_std_3d)\n\n print(\"2D data mean:\")\n print(data_mean_2d)\n print(\"2D data std:\")\n print(data_std_2d)\n \n input(\"Press Enter to continue...\")\n\n # Avoid using the GPU if requested\n device_count = {\"GPU\": 0} if FLAGS.use_cpu else {\"GPU\": 1}\n with tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(\n device_count=device_count,\n allow_soft_placement=True )) as sess:\n\n # === Create the model ===\n print(\"[*] creating %d bi-layers of %d units.\" % (FLAGS.num_layers, FLAGS.linear_size))\n model = create_model( sess, FLAGS.batch_size )\n model.train_writer.add_graph( sess.graph )\n print(\"[+] model created\")\n \n #=== This is the training loop ===\n step_time, loss, val_loss = 0.0, 0.0, 0.0\n current_step = 0 if FLAGS.load <= 0 else FLAGS.load + 1\n previous_losses = []\n\n step_time, loss = 0, 0\n current_epoch = 0\n log_every_n_batches = 100\n losses, errors, joint_errors = [], [], []\n for _ in range( FLAGS.epochs ):\n current_epoch = current_epoch + 1\n\n # === Load training batches for one epoch ===\n encoder_inputs, decoder_outputs =\\\n model.get_all_batches( train_set_2d, train_set_3d, FLAGS.camera_frame, training=True )\n nbatches = len( encoder_inputs )\n print(\"[*] there are {0} train batches\".format( nbatches ))\n start_time, loss = time.time(), 0.\n # === Loop through all the training batches ===\n for i in range( nbatches ):\n\n if (i+1) % log_every_n_batches == 0:\n # Print progress every log_every_n_batches batches\n print(\"Working on epoch {0}, batch {1} / {2}...\".format( current_epoch, i+1, nbatches),end=\"\" )\n\n enc_in, dec_out = encoder_inputs[i], decoder_outputs[i]\n step_loss, loss_summary, lr_summary, _ =\\\n model.step( sess, enc_in, dec_out, FLAGS.dropout, isTraining=True )\n\n if (i+1) % log_every_n_batches == 0:\n # Log and print progress every log_every_n_batches batchespixels = pixels / pixels[2,:]\n model.train_writer.add_summary( loss_summary, current_step )\n model.train_writer.add_summary( lr_summary, current_step )\n step_time = (time.time() - start_time)\n start_time = time.time()\n print(\"done in {0:.2f} ms\".format( 1000*step_time / log_every_n_batches ) )\n\n loss += step_loss\n current_step += 1\n # === end looping through training batches ===\n\n loss = loss / nbatches\n losses.append(loss)\n print(\"=============================\\n\"\n \"Global step: %d\\n\"\n \"Learning rate: %.2e\\n\"\n \"Train loss avg: %.4f\\n\"\n \"=============================\" % (model.global_step.eval(),\n model.learning_rate.eval(), loss) )\n # === End training for an epoch ===\n\n # === Testing after this epoch ===\n isTraining = False\n \n n_joints = len(data_utils.DIMENSIONS_TO_USE)\n if FLAGS.origin_bc:\n n_joints -= len(data_utils.ROOT_POSITIONS)\n\n encoder_inputs, decoder_outputs =\\\n model.get_all_batches( test_set_2d, test_set_3d, FLAGS.camera_frame, training=False)\n\n total_err, coordwise_err, joint_err, step_time, loss = evaluate_batches( sess, model,\n data_mean_3d, data_std_3d, dim_to_use_3d, dim_to_ignore_3d,\n data_mean_2d, data_std_2d, dim_to_use_2d, dim_to_ignore_2d,\n current_step, encoder_inputs, decoder_outputs, current_epoch )\n\n print(\"=============================\\n\"\n \"Step-time (ms): %.4f\\n\"\n \"Val loss avg: %.4f\\n\"\n \"Val error avg (mm): %.2f (%.2f, %.2f, %.2f)\\n\"\n \"=============================\" % ( 1000*step_time, loss, total_err,\n coordwise_err[0], coordwise_err[1], coordwise_err[2] ))\n\n for i in range(n_joints):\n # 6 spaces, right-aligned, 5 decimal places\n print(\"Error in joint {0:02d} (mm): {1:>5.2f}\".format(i+1, joint_err[i]))\n print(\"=============================\")\n errors.append(coordwise_err)\n joint_errors.append(joint_err)\n # Log the error to tensorboard\n summaries = sess.run( model.err_mm_summary, {model.err_mm: total_err} )\n model.test_writer.add_summary( summaries, current_step )\n\n # Save the model\n print( \"Saving the model... \", end=\"\" )\n start_time = time.time()\n model.saver.save(sess, os.path.join(train_dir, 'checkpoint'), global_step=current_step )\n print( \"done in {0:.2f} ms\".format(1000*(time.time() - start_time)) )\n\n # Reset global time and loss\n step_time, loss = 0, 0\n\n sys.stdout.flush()\n # Save losses for future plots\n def print_list_tofile(l, filename):\n with open(filename, 'wb') as f:\n pickle.dump(l, f)\n print_list_tofile(losses, train_dir+\"/losses.pkl\")\n print_list_tofile(errors, train_dir+\"/errors.pkl\")\n print_list_tofile(joint_errors, train_dir+\"/joint_errors.pkl\")", "def __init__(self, hparams):\n super(ImagenetTransferLearning, self).__init__()\n self.hparams = hparams\n self.feature_extractor = models.mobilenet_v2(pretrained=True)\n self.feature_extractor.eval()\n\n # Establish classifier\n # self.layer_1 = torch.nn.Linear(hparams[\"input_size\"], 128)\n self.layer_1 = torch.nn.Linear(1000, 128)\n self.layer_2 = torch.nn.Linear(128, 256)\n self.layer_3 = torch.nn.Linear(256, hparams[\"targets\"])", "def model_performance_comparison(self, yvar, prev_group, C_FP, C_FN):\n trn_bl_df = self.reweigh()\n # sample_weights = self.reweigh(bl_df=trn_bl_df)\n\n s_weights = trn_bl_df.instance_weights\n print(s_weights)\n\n trainset = trn_bl_df.convert_to_dataframe()[0]\n testset = trainset\n\n X = trainset.loc[:, trainset.columns != yvar]\n y = trainset[yvar]\n #X_test = testset.loc[:, trainset.columns != yvar]\n #y_test = testset[yvar]\n\n X_test = X\n y_test = y\n\n clf_ww = sklearn.linear_model.LogisticRegression(random_state=999).fit(X, y, sample_weight=s_weights)\n clf_wow = sklearn.linear_model.LogisticRegression(random_state=999).fit(X, y)\n\n output_probabilities_to_csv(model=clf_ww, x_test=X_test, path='probs_ww_withprvgroup.csv',priv_group_col=trainset[prev_group], actuals=y_test)\n output_probabilities_to_csv(model=clf_wow, x_test=X_test, path='probs_wow_withprvgroup.csv', priv_group_col=trainset[prev_group], actuals=y_test)\n\n print(\"------------------------------------------\")\n print(\"Accuracy of Vanila Logistic Model\")\n print(\"------------------------------------------\")\n print(\"Without Weights : \", round(clf_wow.score(X_test, y_test), 3))\n print(\"With Weights : \", round(clf_ww.score(X_test, y_test), 3))\n\n X_test_age1 = testset.loc[:, trainset.columns != yvar][testset[prev_group] == 1.0]\n y_test_age1 = testset[yvar][testset[prev_group] == 1.0]\n X_test_age0 = testset.loc[:, trainset.columns != yvar][testset[prev_group] == 0.0]\n y_test_age0 = testset[yvar][testset[prev_group] == 0.0]\n\n wow = round(abs(clf_wow.score(X_test_age0, y_test_age0) - clf_wow.score(X_test_age1, y_test_age1)), 3)\n ww = round(abs(clf_ww.score(X_test_age0, y_test_age0) - clf_ww.score(X_test_age1, y_test_age1)), 3)\n\n #output_probabilities_to_csv(model=clf_ww, x_test=X_test_age0, path='probs_unpriv_ww.csv')\n #output_probabilities_to_csv(model=clf_ww, x_test=X_test_age1, path='probs_priv_ww.csv')\n #output_probabilities_to_csv(model=clf_wow, x_test=X_test_age0, path='probs_unpriv_wow.csv')\n #output_probabilities_to_csv(model=clf_wow, x_test=X_test_age1, path='probs_priv_wow.csv')\n\n print(\"\")\n print(\"\")\n print(\"--------------------------------------------------------------\")\n print(\"Difference in accuracy between privileged and unprivileged\")\n print(\"--------------------------------------------------------------\")\n print(\"without weights : \", wow)\n print(\"with weights : \", ww)\n\n Ypredclf = clf_ww.predict(X_test)\n Ypredclf2 = clf_wow.predict(X_test)\n withw = confusion_matrix(y_test, Ypredclf)\n without = confusion_matrix(y_test, Ypredclf2)\n print(\"\")\n print(\"\")\n print(\"--------------------------------------------------------------\")\n print(\"Confusion Matrix\")\n print(\"--------------------------------------------------------------\")\n print(\"without weights\")\n print(without)\n print(\"\")\n print(\"\")\n print(\"with weights\")\n print(withw)\n\n a, b, c, d = without.ravel() #(tn, fp, fn, tp)\n a1, b1, c1, d1 = withw.ravel() #(tn, fp, fn, tp)\n\n withweights = b1 * C_FP + c1 * C_FN\n withoutweights = b * C_FP + c * C_FN\n\n print(\"\")\n print(\"\")\n print(\"cost with weights: \", withweights)\n print(\"cost without weights: \", withoutweights)\n print(\"Has cost decreased after reweighing?\", withweights < withoutweights)\n\n print('')\n print('SUMMARY TABLE')\n\n cost = fr.CostingFairness(input_dataframe=self.data,\n label_names=['credit'],\n protected_attribute_names=['Age_previliged'],\n trained_model=clf_ww)\n\n metrics_table = self.generate_pre_train_metrics_table(model_without_weights=clf_wow,\n model_with_weights=clf_ww,\n test_set=testset,\n target=yvar,\n privileged=prev_group,\n false_positive_cost=C_FP,\n false_negative_cost=C_FN)\n priv_diff_table = generate_privileged_diff(metrics_table)\n delta_table = generate_delta_table(metrics_table)\n costs_table = cost.return_cost_fairness_accuracy_optimised()\n\n # pdf = PDF()\n # pdf.add_page()\n # pdf.write_table_to_pdf(metrics_table)\n # pdf.write_table_to_pdf(priv_diff_table)\n # pdf.write_table_to_pdf(delta_table)\n # pdf.output('TEST01.pdf', 'F')\n\n print(\"\")\n print(\"What we see is interesting, after re-weighing the bias of the model has decreased significantly by {}%, \"\n \"with a very slight decrease in accuracy as shown earlier\".format(round((wow - ww) * 100)))\n\n return metrics_table, priv_diff_table, delta_table, costs_table", "def get_classification(self, image):\n\n # Convert image to PIL RGB image\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n # add a fourth batch dimension to array\n image = np.expand_dims(image, axis=0)\n\n ## Predict images class\n if image.shape==(1, self.img_height, self.img_width, self.img_channels):\n y_pred = self.model.predict(image)\n else:\n rospy.logwarn(\"tl_classifier: Wrong image shape: {},{},{},{}\".format(image.shape[0],image.shape[1],image.shape[2],image.shape[3]))\n return TrafficLight.UNKNOWN\n\n # Filter predictions\n confidence_threshold = 0.7\n y_pred_thresh = [y_pred[k][y_pred[k,:,1] > confidence_threshold] for k in range(y_pred.shape[0])]\n\n # Output predicted classes and scores\n #rospy.loginfo(\"tl_classifier: class conf xmin ymin xmax ymax\")\n \n # Filter classes prediction\n tl_pred_classes = y_pred_thresh[0][:,0]\n tl_pred_scores = y_pred_thresh[0][:,1]\n # Find classes that contains tl's\n tl_pred_classes = [cl for cl in tl_pred_classes if 1<=cl<=3]\n\n\n # Test light state (if prediction is not empty)\n if len(tl_pred_classes) > 0:\n if (tl_pred_classes[0]==1):\n tl_return = TrafficLight.GREEN\n rospy.loginfo(\"tl_classifier: Green detected, score {:.2f}\".format(tl_pred_scores[0]))\n elif (tl_pred_classes[0]==2):\n tl_return = TrafficLight.YELLOW\n rospy.loginfo(\"tl_classifier: Yellow detected, score {:.2f}\".format(tl_pred_scores[0]))\n elif (tl_pred_classes[0]==3):\n tl_return = TrafficLight.RED\n rospy.loginfo(\"tl_classifier: Red detected, score {:.2f}\".format(tl_pred_scores[0]))\n else:\n tl_return = TrafficLight.UNKNOWN\n rospy.loginfo(\"tl_classifier: Other class detected!\")\n else:\n tl_return = TrafficLight.UNKNOWN\n rospy.loginfo(\"tl_classifier: Unknown detected!\")\n\n\n return tl_return", "def inference():\n inf_dataset = dataset\n net.eval()\n frames_gen, frame_cnt, rel_props, prop_ticks, prop_scaling = inf_dataset[index]\n \n num_crop = args.test_crops\n length = 3\n if args.modality == 'Flow':\n length = 10\n elif args.modality == 'RGBDiff':\n length = 18\n \n # First get the base_out outputs\n base_output = torch.autograd.Variable(torch.zeros((num_crop, frame_cnt, base_out_dim)).cuda(),\n volatile=True)\n cnt = 0\n for frames in frames_gen:\n # frames.shape == [frame_batch_size * num_crops * 3, 224, 224]\n # frame_batch_size is 4 by default\n input_var = torch.autograd.Variable(frames.view(-1, length, frames.size(-2), frames.size(-1)).cuda(),\n volatile=True)\n base_out = net(input_var, None, None, None, None)\n bsc = base_out.view(num_crop, -1, base_out_dim)\n base_output[:, cnt:cnt+bsc.size(1), :] = bsc\n cnt += bsc.size(1)\n\n n_frames = base_output.size(1)\n assert frame_cnt == n_frames\n # GLCU\n step_features = base_output.mean(dim=0).mean(dim=0).unsqueeze(0)\n gate, glcu_task_pred = net.glcu(step_features)\n glcu_task_pred = F.softmax(glcu_task_pred.squeeze(), dim=0).data.cpu().numpy()\n gate = gate.repeat(1, num_crop * n_frames).view(num_crop, n_frames, base_out_dim)\n if net.additive_glcu:\n base_output = base_output + gate\n else:\n base_output = base_output * gate\n\n # output.shape == [num_frames, 7791]\n output = torch.zeros((frame_cnt, output_dim)).cuda()\n cnt = 0\n for i in range(0, frame_cnt, 4):\n base_out = base_output[:, i:i+4, :].contiguous().view(-1, base_out_dim)\n rst = net.test_fc(base_out)\n sc = rst.data.view(num_crop, -1, output_dim).mean(dim=0)\n output[cnt: cnt + sc.size(0), :] = sc\n cnt += sc.size(0)\n base_output = base_output.mean(dim=0).data\n\n # act_scores.shape == [num_proposals, K+1]\n # comp_scores.shape == [num_proposals, K]\n act_scores, comp_scores, reg_scores = reorg_stpp.forward(output, prop_ticks, prop_scaling)\n act_scores = torch.autograd.Variable(act_scores, volatile=True)\n comp_scores = torch.autograd.Variable(comp_scores, volatile=True)\n\n # Task Head\n combined_scores = F.softmax(act_scores[:, 1:], dim=1) * torch.exp(comp_scores)\n combined_scores = combined_scores.mean(dim=0).unsqueeze(0)\n task_pred = F.softmax(net.task_head(combined_scores).squeeze(), dim=0).data.cpu().numpy()\n\n act_scores = act_scores.data\n comp_scores = comp_scores.data\n\n if reg_scores is not None:\n reg_scores = reg_scores.view(-1, num_class, 2)\n reg_scores[:, :, 0] = reg_scores[:, :, 0] * stats[1, 0] + stats[0, 0]\n reg_scores[:, :, 1] = reg_scores[:, :, 1] * stats[1, 1] + stats[0, 1]\n\n torch.cuda.empty_cache() # To empty the cache from previous iterations\n\n # perform stpp on scores\n return ((inf_dataset.video_list[index].id,\n (rel_props.numpy(), act_scores.cpu().numpy(), comp_scores.cpu().numpy(), reg_scores.cpu().numpy(), \n glcu_task_pred, task_pred),\n output.cpu().numpy(),\n base_output.cpu().numpy()))", "def cvpr2018_net_T1T2(vol_size, enc_nf, dec_nf, full_size=True, indexing='ij'):\n ndims = len(vol_size)\n assert ndims in [1, 2, 3], \"ndims should be one of 1, 2, or 3. found: %d\" % ndims\n\n # get the core model\n unet_model_channel1 = unet_core(vol_size, enc_nf, dec_nf, full_size=full_size)\n [srcT1, tgtT1] = unet_model_channel1.inputs\n x_out_T1 = unet_model_channel1.outputs\n unet_model_channel2 = unet_core(vol_size, enc_nf, dec_nf, full_size=full_size)\n [srcT2, tgtT2] = unet_model_channel2.inputs\n x_out_T2 = unet_model_channel2.outputs\n\n # transform the results into a flow field.\n Conv = getattr(KL, 'Conv%dD' % ndims)\n flow_T1 = Conv(ndims, kernel_size=3, padding='same', name='flow_T1',\n kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5))(x_out_T1)\n flow_T2 = Conv(ndims, kernel_size=3, padding='same', name='flow_T2',\n kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5))(x_out_T2)\n flow = MergeInputs3D()([flow_T1, flow_T2])\n\n # warp the source with the flow\n y_T1_flowT1 = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([srcT1, flow_T1])\n y_T2_flowT2 = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([srcT2, flow_T2])\n y_T1 = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([srcT1, flow])\n y_T2 = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([srcT2, flow])\n # prepare model\n model = Model(inputs=[srcT1, tgtT1, srcT2, tgtT2], outputs=[y_T1, y_T2, flow, y_T1_flowT1, y_T2_flowT2])\n return model", "def train_net_det(network, imdb, roidb, output_dir, pretrained_model=None, pretrained_ckpt=None, max_iters=40000):\n\n loss_regu = tf.add_n(tf.losses.get_regularization_losses(), 'regu')\n\n # RPN, class loss\n rpn_cls_score = tf.reshape(network.get_output('rpn_cls_score_reshape'), [-1, 2])\n rpn_label = tf.reshape(network.get_output('rpn_labels'), [-1])\n rpn_select = tf.where(tf.not_equal(rpn_label, -1))\n rpn_cls_score = tf.reshape(tf.gather(rpn_cls_score, rpn_select), [-1, 2])\n rpn_label = tf.reshape(tf.gather(rpn_label, rpn_select), [-1])\n loss_rpn_cls = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=rpn_cls_score, labels=rpn_label))\n\n # RPN, bbox loss\n rpn_bbox_pred = network.get_output('rpn_bbox_pred')\n rpn_bbox_targets = network.get_output('rpn_bbox_targets')\n rpn_bbox_inside_weights = network.get_output('rpn_bbox_inside_weights')\n rpn_bbox_outside_weights = network.get_output('rpn_bbox_outside_weights')\n loss_rpn_box = smooth_l1_loss(rpn_bbox_pred, rpn_bbox_targets, rpn_bbox_inside_weights,\n rpn_bbox_outside_weights, sigma=3.0, dim=[1, 2, 3])\n\n # RCNN, class loss\n cls_score = network.get_output(\"cls_score\")\n label = tf.reshape(network.get_output(\"labels\"), [-1])\n loss_cls = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=cls_score, labels=label))\n\n # RCNN, bbox loss\n bbox_pred = network.get_output('bbox_pred')\n bbox_targets = network.get_output('bbox_targets')\n bbox_inside_weights = network.get_output('bbox_inside_weights')\n bbox_outside_weights = network.get_output('bbox_outside_weights')\n loss_box = smooth_l1_loss(bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights)\n\n # pose regression loss\n loss_pose = network.get_output('loss_pose')[0]\n\n # add losses\n loss = loss_rpn_cls + loss_rpn_box + loss_cls + loss_box + loss_pose + loss_regu\n\n # optimizer\n global_step = tf.Variable(0, trainable=False)\n starter_learning_rate = cfg.TRAIN.LEARNING_RATE\n learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,\n cfg.TRAIN.STEPSIZE, 0.1, staircase=True)\n momentum = cfg.TRAIN.MOMENTUM\n train_op = tf.train.MomentumOptimizer(learning_rate, momentum).minimize(loss, global_step=global_step)\n\n # config = tf.ConfigProto()\n # config.gpu_options.per_process_gpu_memory_fraction = 0.85\n # config.gpu_options.allow_growth = True\n # with tf.Session(config=config) as sess:\n with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:\n sw = SolverWrapper(sess, network, imdb, roidb, output_dir, pretrained_model=pretrained_model, pretrained_ckpt=pretrained_ckpt)\n\n # thread to load data\n data_layer = GtSynthesizeLayer(roidb, imdb.num_classes, imdb._extents, imdb._points_all, imdb._symmetry, imdb.cache_path, imdb.name, cfg.CAD, cfg.POSE)\n\n print 'Solving...'\n sw.train_model_det(sess, train_op, loss, loss_rpn_cls, loss_rpn_box, loss_cls, loss_box, loss_pose, learning_rate, max_iters, data_layer)\n print 'done solving'", "def mini_imagenet_tasksets(\n train_ways=5,\n train_samples=10,\n test_ways=5,\n test_samples=10,\n root='~/data',\n data_augmentation=None,\n device=None,\n **kwargs,\n):\n if data_augmentation is None:\n train_data_transforms = None\n test_data_transforms = None\n elif data_augmentation == 'normalize':\n train_data_transforms = Compose([\n lambda x: x / 255.0,\n ])\n test_data_transforms = train_data_transforms\n elif data_augmentation == 'lee2019':\n normalize = Normalize(\n mean=[120.39586422/255.0, 115.59361427/255.0, 104.54012653/255.0],\n std=[70.68188272/255.0, 68.27635443/255.0, 72.54505529/255.0],\n )\n train_data_transforms = Compose([\n ToPILImage(),\n RandomCrop(84, padding=8),\n ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),\n RandomHorizontalFlip(),\n ToTensor(),\n normalize,\n ])\n test_data_transforms = Compose([\n normalize,\n ])\n else:\n raise ValueError('Invalid data_augmentation argument.')\n\n train_dataset = l2l.vision.datasets.MiniImagenet(\n root=root,\n mode='train',\n download=True,\n )\n valid_dataset = l2l.vision.datasets.MiniImagenet(\n root=root,\n mode='validation',\n download=True,\n )\n test_dataset = l2l.vision.datasets.MiniImagenet(\n root=root,\n mode='test',\n download=True,\n )\n if device is None:\n train_dataset.transform = train_data_transforms\n valid_dataset.transform = test_data_transforms\n test_dataset.transform = test_data_transforms\n else:\n train_dataset = l2l.data.OnDeviceDataset(\n dataset=train_dataset,\n transform=train_data_transforms,\n device=device,\n )\n valid_dataset = l2l.data.OnDeviceDataset(\n dataset=valid_dataset,\n transform=test_data_transforms,\n device=device,\n )\n test_dataset = l2l.data.OnDeviceDataset(\n dataset=test_dataset,\n transform=test_data_transforms,\n device=device,\n )\n train_dataset = l2l.data.MetaDataset(train_dataset)\n valid_dataset = l2l.data.MetaDataset(valid_dataset)\n test_dataset = l2l.data.MetaDataset(test_dataset)\n\n train_transforms = [\n NWays(train_dataset, train_ways),\n KShots(train_dataset, train_samples),\n LoadData(train_dataset),\n RemapLabels(train_dataset),\n ConsecutiveLabels(train_dataset),\n ]\n valid_transforms = [\n NWays(valid_dataset, test_ways),\n KShots(valid_dataset, test_samples),\n LoadData(valid_dataset),\n ConsecutiveLabels(valid_dataset),\n RemapLabels(valid_dataset),\n ]\n test_transforms = [\n NWays(test_dataset, test_ways),\n KShots(test_dataset, test_samples),\n LoadData(test_dataset),\n RemapLabels(test_dataset),\n ConsecutiveLabels(test_dataset),\n ]\n\n _datasets = (train_dataset, valid_dataset, test_dataset)\n _transforms = (train_transforms, valid_transforms, test_transforms)\n return _datasets, _transforms", "def network_modified(input):\n\n up6 = upsample_and_concat( conv5, conv4, 256, 512 , 'up_conv1' )\n conv6=slim.conv2d(up6, 256,[3,3], rate=1, activation_fn=lrelu,scope='g_conv6_1')\n conv6=slim.conv2d(conv6,256,[3,3], rate=1, activation_fn=lrelu,scope='g_conv6_2')\n\n up7 = upsample_and_concat( conv6, conv3, 128, 256 , 'up_conv2' )\n conv7=slim.conv2d(up7, 128,[3,3], rate=1, activation_fn=lrelu,scope='g_conv7_1')\n conv7=slim.conv2d(conv7,128,[3,3], rate=1, activation_fn=lrelu,scope='g_conv7_2')\n\n up8 = upsample_and_concat( conv7, conv2, 64, 128 , 'up_conv3')\n conv8=slim.conv2d(up8, 64,[3,3], rate=1, activation_fn=lrelu,scope='g_conv8_1')\n conv8=slim.conv2d(conv8,64,[3,3], rate=1, activation_fn=lrelu,scope='g_conv8_2')\n\n up9 = upsample_and_concat( conv8, conv1, 32, 64 , 'up_conv4')\n conv9=slim.conv2d(up9, 32,[3,3], rate=1, activation_fn=lrelu,scope='g_conv9_1')\n conv9=slim.conv2d(conv9,32,[3,3], rate=1, activation_fn=lrelu,scope='g_conv9_2')\n\n conv10=slim.conv2d(conv9,12,[1,1], rate=1, activation_fn=None, scope='g_conv10')\n out = tf.depth_to_space(conv10,2)\n return out", "def __init__(self, hparams):\n super(ThreeLayerClassifier, self).__init__()\n self.hparams = hparams\n self.layer_1 = torch.nn.Linear(self.hparams[\"input_size\"], 128)\n self.layer_2 = torch.nn.Linear(128, 256)\n self.layer_3 = torch.nn.Linear(256, self.hparams[\"targets\"])", "def __init__(self, num_gpus):\n\n super(Critic, self).__init__()\n n_in = IMG_CHANNELS\n n_out = 1\n\n feature_map = IMG_SIZE\n kernel_size = 4\n stride = 2\n padding = 1\n bias = False\n\n self.num_gpus = num_gpus\n\n self.network = nn.Sequential(\n # nodes = IMG_CHANNELS * IMG_SIZE * IMG_SIZE\n nn.Conv2d(n_in, feature_map, kernel_size, stride, padding, bias=bias),\n nn.LeakyReLU(0.2, inplace=True),\n\n # nodes = feature_map * 2\n nn.Conv2d(feature_map, feature_map * 2, kernel_size, stride, padding, bias=bias),\n nn.BatchNorm2d(feature_map * 2),\n nn.LeakyReLU(0.2, inplace=True),\n\n # nodes = feature_map * 4\n nn.Conv2d(feature_map * 2, feature_map * 4, kernel_size, stride, padding, bias=bias),\n nn.BatchNorm2d(feature_map * 4),\n nn.LeakyReLU(0.2, inplace=True),\n\n # nodes = feature_map * 8\n nn.Conv2d(feature_map * 4, feature_map * 8, kernel_size, stride, padding, bias=bias),\n nn.BatchNorm2d(feature_map * 8),\n nn.LeakyReLU(0.2, inplace=True),\n\n # nodes = feature_map * 8\n nn.Conv2d(feature_map * 8, n_out, kernel_size, 1, 0, bias=bias),\n # scratched sigmoid activation function\n )", "def mgcNetArchMax(outLayer, l2_val, **kwargs):\n\n def_vals = {\"input_img_rows\" : 72,\n \"input_img_cols\" : 72,\n \"channels\" : 1,\n \"nb_classes\" : 13\n } # default parameters value\n\n for k, v in def_vals.items():\n kwargs.setdefault(k, v)\n\n input_img_rows = kwargs['input_img_rows']\n input_img_cols = kwargs['input_img_cols']\n channels = kwargs['channels']\n nb_classes = kwargs['nb_classes']\n\n \n # Input: 72 x 72 x 1\n img_shape = layers.Input(shape = (input_img_rows, input_img_cols, channels))\n\n # Layer 1\n #------------------------\n conv1 = layers.Conv2D(filters=32, kernel_size=(2, 2), padding='same', kernel_regularizer=regularizers.l2(l2_val))(img_shape)\n conv1 = layers.Activation('relu')(conv1)\n conv1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)\n conv1 = layers.Dropout(0.4)(conv1)\n\n # Layer 2\n #------------------------\n conv2 = layers.Conv2D(filters=64, kernel_size=(2,2), padding='same', kernel_regularizer=regularizers.l2(l2_val))(conv1)\n conv2 = layers.Activation('relu')(conv2) \n conv2 = layers.MaxPooling2D(pool_size=(2, 2))(conv2)\n conv2 = layers.Dropout(0.4)(conv2)\n\n # Layer 3\n #------------------------\n conv3 = layers.Conv2D(filters=128, kernel_size=(2,2), padding='same', kernel_regularizer=regularizers.l2(l2_val))(conv2)\n conv3 = layers.Activation('relu')(conv3) \n conv3 = layers.MaxPooling2D(pool_size=(2, 2))(conv3)\n conv3 = layers.Dropout(0.4)(conv3)\n\n # Layer 4\n #------------------------\n conv4 = layers.Conv2D(filters=256, kernel_size=(2,2), padding='same', dilation_rate = (2, 2), kernel_regularizer=regularizers.l2(l2_val))(conv3)\n conv4 = layers.Activation('relu')(conv4)\n conv4 = layers.MaxPooling2D(pool_size=(2, 2))(conv4)\n conv4 = layers.Dropout(0.4)(conv4)\n\n # Layer 5\n #------------------------\n output = layers.Conv2D(filters=128, kernel_size=(2,2), padding='same', kernel_regularizer=regularizers.l2(l2_val))(conv3) # skip layer 4\n output = layers.Activation('relu')(output) \n output = layers.MaxPooling2D(pool_size=(2, 2))(output)\n output = layers.Dropout(0.4)(output)\n\n\n \n # FC Layer\n #------------------------\n outputmlp = layers.Flatten()(output)\n outputmlp = layers.Dense(64, activation = 'relu')(outputmlp)\n outputmlp = layers.Dropout(0.5)(outputmlp)\n\n predictionsMlp = layers.Dense(nb_classes, activation='softmax')(outputmlp)\n \n \n # global averaging\n weight_decay=1E-4\n concat_axis = 1\n \n x = BatchNormalization(axis=concat_axis,\n gamma_regularizer=regularizers.l2(weight_decay),\n beta_regularizer=regularizers.l2(weight_decay))(output)\n x = Activation('relu')(x)\n x = layers.Dropout(0.4)(x)\n x = GlobalAveragePooling2D(data_format=K.image_data_format())(x)\n \n predictionsGloAvg = layers.Dense(nb_classes,\n activation='softmax',\n kernel_regularizer=regularizers.l2(weight_decay),\n bias_regularizer=regularizers.l2(weight_decay))(x)\n \n if outLayer == \"gloAvg\":\n predictions = predictionsGloAvg\n elif outLayer == \"mlp\":\n predictions = predictionsMlp\n \n # prediction model\n model = Model(img_shape, predictions, name = 'cnn_max')\n\n return model", "def resnet34(**kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model" ]
[ "0.6369418", "0.6306215", "0.6232091", "0.6182974", "0.60732025", "0.5921903", "0.5916691", "0.5855269", "0.5853199", "0.5839653", "0.57973075", "0.5778595", "0.574271", "0.573733", "0.57194585", "0.5616862", "0.55473757", "0.5541811", "0.554155", "0.5500795", "0.5457074", "0.5437526", "0.5434107", "0.54299426", "0.5425723", "0.5425672", "0.5417012", "0.53627396", "0.5351046", "0.5348419", "0.53476954", "0.5335457", "0.53158087", "0.5315778", "0.5299621", "0.5280798", "0.5280678", "0.5279434", "0.5273268", "0.5271608", "0.5261175", "0.5253323", "0.5250586", "0.52416426", "0.52242225", "0.52160966", "0.5208552", "0.5201719", "0.5201088", "0.519864", "0.51888233", "0.5177147", "0.51762486", "0.51705277", "0.51692265", "0.5164894", "0.5160123", "0.5158354", "0.5153969", "0.514698", "0.5135471", "0.5131554", "0.5128881", "0.512335", "0.51176226", "0.51161885", "0.5114846", "0.5113122", "0.510635", "0.508885", "0.5088843", "0.5082199", "0.5082036", "0.50723165", "0.5067348", "0.50652903", "0.5062541", "0.50622183", "0.5060365", "0.5056795", "0.5055298", "0.5054253", "0.50501424", "0.5046568", "0.50431955", "0.5040148", "0.50340027", "0.50330925", "0.5021385", "0.50198543", "0.5019795", "0.5018463", "0.50177735", "0.50127494", "0.5006821", "0.50049996", "0.50048435", "0.5002857", "0.5000138", "0.49981347" ]
0.6497924
0
Update information about a host
def update(self, compute_node=None, service=None): @utils.synchronized((self.hostname, compute_node)) def _locked_update(self, compute_node, service): if compute_node is not None: LOG.debug('Update host state from compute node: %s', compute_node) self._update_from_compute_node(compute_node) if service is not None: LOG.debug('Update host state with service: %s', service) self.service = service return _locked_update(self, compute_node, service)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update(self, host):\n pass", "def update_host(self, conf, tenant_id, network_id, host_id, body):\n\t\tpass", "def update(self, host, values):\n body = dict(host=values)\n return self._update(\"/os-hosts/%s\" % host, body, response_key='host')", "def update(self, **kwargs):\n\n host = self.get()\n if not host:\n self.raiseNotFoundError()\n return host.update(**kwargs)", "def updateHosts(request):\n\n updater = HostUpdater()\n updater.run()\n return http.HttpResponse(\"Ok\")", "def update_host(hostname, cpu_mhz, cpu_cores, ram):\n return update_host(hostname, cpu_mhz, cpu_cores, ram)", "def update(call: ServiceCall) -> None:\n called_host = call.data[ATTR_HOST]\n if called_host in hass.data[DOMAIN]:\n hass.data[DOMAIN][called_host].update()\n else:\n for iperf3_host in hass.data[DOMAIN].values():\n iperf3_host.update()", "def update(self, host_id, values):\n if not values:\n return _('No values to update passed.')\n return self._update('/os-hosts/%s' % host_id, values,\n response_key='host')", "def updateHost(self, *hosts):\n localhost_name = None\n old_hostnames = []\n for old_host in self.hosts.values():\n old_hostnames.append(old_host.name)\n if isinstance(old_host, LocalHost):\n if localhost_name is not None:\n logger.warning('Duplicate localhost found in lab.hosts')\n localhost_name = old_host.name\n for new_host in hosts:\n # Updating localhost\n if (isinstance(new_host, LocalHost) and localhost_name is not None):\n # Check for localhost clash\n if new_host.name != localhost_name:\n logger.warning('Localhost is already present: ' +\n '%s\\n' +\n 'Not updating host %s!', localhost_name, new_host.name)\n continue\n else:\n localhost_name = new_host.name\n # Will an update happen?\n if new_host.name in old_hostnames:\n logger.info('Overwriting host: %s', new_host.name)\n # Will it end up removing the localhost?\n if (new_host.name == localhost_name and\n not isinstance(new_host, LocalHost)):\n localhost_name = None\n self.hosts[new_host.name] = new_host\n if localhost_name is None:\n logger.warning('Localhost not yet present')", "def update_host_config(self, hostid, config, **kwargs):\n pass", "def host_update(self, host, ip_list, raw=True):\n\n endpoint = '/Domain/Host/Update'\n\n params = {\n 'Host' : host,\n 'IP_List' : \",\".join(ip_list)\n }\n\n response = self.__perform_get_request(endpoint, params)\n\n if response.status_code == 200:\n parsed_response = response.json()\n if raw:\n return parsed_response\n else:\n return parsed_response.get('status') == 'SUCCESS'", "def fill_host(self, data):\n check_input_params(data, self.HOST)\n self.host = data[self.HOST]", "def fill_host(self, data):\n check_input_params(data, self.HOST)\n self.host = data[self.HOST]", "def set_one(self, host_name, ip_address):\n self.hosts[host_name] = ip_address", "def host(self, host) :\n\t\ttry :\n\t\t\tself._host = host\n\t\texcept Exception as e:\n\t\t\traise e", "def update(self):\n self._state = status\n attributes['host'] = host\n attributes['port'] = port\n self.custom_attributes = attributes", "def sethost(self, host):\n self.__host = host", "def backend_info_update(context, host, value=None, delete_existing=False):\n info_ref = _backend_info_query(context, host)\n if info_ref:\n if value:\n info_ref.update({\"info_hash\": value})\n elif delete_existing and info_ref['deleted'] != 1:\n info_ref.update({\"deleted\": 1, \"deleted_at\": timeutils.utcnow()})\n else:\n info_ref = models.BackendInfo()\n info_ref.update({\"host\": host, \"info_hash\": value})\n info_ref.save(context.session)\n return info_ref", "def update_dht(self, d_ip, d_port):\n self.dht_ip = d_ip\n self.dht_port = d_port", "def host(self, host):\n\n self._host = host", "def host(self, host):\n\n self._host = host", "def host(self, host):\n\n self._host = host", "def host(self, host):\n\n self._host = host", "def _vm_update_host(zx, vm, host, log=None):\n log = log or zx.log\n hostid = host['hostid']\n log(DEBUG, 'VM %s already defined in Zabbix as host ID \"%s\"', vm, hostid)\n params = zx.diff_vm_host(vm, host, log=log) # Issue #chili-311\n\n if params:\n log(WARNING, 'Zabbix host ID \"%s\" configuration differs from current VM %s configuration', hostid, vm)\n log(INFO, 'Updating Zabbix host ID \"%s\" according to VM %s with following parameters: %s',\n hostid, vm, params)\n\n if zx.update_host(hostid, log=log, **params):\n log(INFO, 'Updated Zabbix host ID \"%s\"', hostid)\n zx.save_host_info(vm, log=log)\n else:\n log(ERROR, 'Could not update Zabbix host ID \"%s\"', hostid)\n return False\n\n else: # Host in sync with VM\n log(INFO, 'Zabbix host ID \"%s\" configuration is synchronized with current VM %s configuration', hostid, vm)\n return True\n\n return True", "def fusion_api_update_hypervisor_host_profile(self, uri=None, body=None, api=None, headers=None):\n return self.host_profile.update(body, uri, api, headers)", "def update(self, **kwargs):\n self.status = status.parse(status.get(host=self._host, port=self._port))", "def update_host_ovs(self, context):\n LOG.info(_('Updating Open vSwitch host data...'))\n LOG.debug(\"Current DOM: %s\" % self.current_dom.to_dict())\n LOG.debug(\"Requested DOM: %s\" % self.desired_dom.to_dict())\n\n builder = mob.MicroOperationBuilder(context,\n self.current_dom,\n self.desired_dom,\n self.rollback)\n\n mo_list = builder.get_micro_ops_for_update()\n\n # run validation\n return self._run_micro_op_list(mo_list)", "def rename(self, old_host, new_host):\n if new_host in self.hosts_:\n raise ValueError(\"Host %s: already exists.\" % new_host)\n for line in self.lines_: # update lines\n if line.host == old_host:\n line.host = new_host\n if line.key.lower() == \"host\":\n line.value = new_host\n line.line = \"Host %s\" % new_host\n self.hosts_.remove(old_host) # update host cache\n self.hosts_.add(new_host)", "async def update_info_data(_: datetime | None = None) -> None:\n\n try:\n (\n hass.data[DATA_INFO],\n hass.data[DATA_HOST_INFO],\n hass.data[DATA_STORE],\n hass.data[DATA_CORE_INFO],\n hass.data[DATA_SUPERVISOR_INFO],\n hass.data[DATA_OS_INFO],\n ) = await asyncio.gather(\n hassio.get_info(),\n hassio.get_host_info(),\n hassio.get_store(),\n hassio.get_core_info(),\n hassio.get_supervisor_info(),\n hassio.get_os_info(),\n )\n\n except HassioAPIError as err:\n _LOGGER.warning(\"Can't read Supervisor data: %s\", err)\n\n async_call_later(\n hass,\n HASSIO_UPDATE_INTERVAL,\n HassJob(update_info_data, cancel_on_shutdown=True),\n )", "def set(self, host, **kwargs):\n for p, c in self.configs_:\n if host in c.hosts_:\n c.set(host, **kwargs)\n return\n raise ValueError(\"Host %s: not found\" % host)", "def host(self, host: str):\n\n self._host = host", "def set_host(self, host: str) -> None:\n _LOGGER.debug(\"Setting host to %s\", host)\n host_url = urlparse(host)\n self.scheme = host_url.scheme or \"http\"\n self.host = host_url.netloc or host_url.path\n self.base_url = f\"{self.scheme}://{self.host}\"\n self.api_url = f\"{self.base_url}/apps/api/{self.app_id}\"", "def update_cluster_hosts(self, hosts):\n self._hosts = hosts\n self._collect_hosts_d = True", "def add_host(self, name, ip):\n rdataa = dns.rdata.from_text(dns.rdataclass.IN,dns.rdatatype.A,str(ip))\n rdataseta = dns.rdataset.from_rdata(300,rdataa)\n self.update.add(name,rdataseta)\n return dns.query.tcp(self.update,self.server_address)", "def insert_host(self, host):\n if host['host'] and host['user'] and host['passw']:\n hosts = Config().hosts\n cred = {'username': host['user'], 'password': host['passw']}\n hosts[host['host']] = cred\n Config().hosts = hosts", "def update_description(self, host, baseUrl, description):\n self._host = host\n self._urlBase = baseUrl\n self._description = description\n return", "def set_hostname(self, path, hostname):\n\n f = open(os.path.join(path, 'etc', 'hostname'), 'w')\n f.write(hostname + \"\\n\")\n f.close()\n\n hosts = os.path.join(path, 'etc', 'hosts')\n\n with open(hosts, 'rb') as f:\n reader = csv.reader(f, delimiter=\"\\t\")\n rows = [row for row in reader]\n\n for row in rows:\n if len(row) > 1 and row[0] == '127.0.1.1':\n row[1] = hostname\n break\n\n with open(hosts, 'w') as f:\n for row in rows:\n f.write(\"\\t\".join(row) + \"\\n\")", "def Host(self, h):\r\n\r\n self.host = h\r\n return self", "def update_field(self, field, value, harvesterid, harvesterhost):\n connection = self.connection\n query = \"\"\"UPDATE INSTANCES SET {0} = ? WHERE harvesterid = ? and harvesterhost = ?\"\"\".format(field)\n cur = connection.cursor()\n\n cur.execute(query, (value,\n harvesterid, harvesterhost))\n connection.commit()", "def host(self, host):\n if host is None:\n raise ValueError(\"Invalid value for `host`, must not be `None`\")\n\n self._host = host", "def update(args, config):\n print('Updates an HPC fleet with name \"{}\"'.format(args.fleet_name))", "def update(self, name=None, password=None, host=None):\n return self.manager.update(self, name=name, password=password,\n host=host)", "def update_see(self):\n _LOGGER.debug(\"Updating device tracker: %s\", self._name)\n self._see(\n dev_id=self.dev_id,\n host_name=self.name,\n battery=self.battery,\n gps=(self.lat, self.lon),\n attributes={\n 'status': self.status,\n 'id': self.dev_id,\n 'name': self.name,\n CONF_ICON: self.icon,\n 'vendor': VENDOR,\n 'model': self.model})", "def host_name(self, host_name):\n\n self._host_name = host_name", "def host_name(self, host_name):\n\n self._host_name = host_name", "def host_num(self, host_num):\n\n self._host_num = host_num", "def update():", "def update():", "def enable_host(self, name):\n from soppa.local import aslocal\n self.guest_ip = self.guest_ip()\n self.guest_host_name = name\n # Host (remote) change\n self.file.set_setting('/etc/hosts', '{0} {1}'.format('127.0.0.1', self.guest_host_name))\n # local change\n aslocal()\n self.file.set_setting('/etc/hosts', '{0} {1}'.format(self.guest_ip, name))", "def host(self, host: str, fields: str = None) -> dict:\n endpoint = f\"/api/host/{host}\" if host else \"/api/host/\"\n ret = self._request(\n endpoint=endpoint,\n params={\"fields\": fields} if fields else {},\n )\n return ret", "def update_all(self):\n self.update_head_node_ip()\n self.get_database_info()\n self.update_users()", "def get_host_stats(self, refresh=False):", "def update_host_heartbeat(self, hostname: str) -> bool:\n with self.lock:\n try:\n host = Query()\n self.hosts.update({'latest_recv': datetime.now().strftime(self.time_format)},\n host.hostname.matches(hostname))\n return True\n except Exception as err:\n raise UpdateError('Cannot update latest_recv of host with hostname={}'.format(hostname), err)", "def connect_with_host_data(self, host: Host):\n host_obj = self.content.load_host(host.instanceId)\n\n if host_obj.connectionString:\n print_light_grey('Found host data, trying to connect...')\n\n # Has a bounce host.\n if host_obj.connectionString.bounce_host:\n bounce_host = DiscoverHost(self.account_obj, bounce=True).get_bounce()\n\n if not DoConnectAndSave(host_obj, self.account_obj).bounce_regular_connect(bounce_host):\n sys.exit(0)\n else:\n if not DoConnectAndSave(host_obj, self.account_obj).regular_connect():\n sys.exit(0)\n\n print_orange('Found host data is obsolete, trying to find a new path...')\n\n raise HostNotFound", "def ip_update(self, custom_domain, heroku_host):\n update_pattern = None\n resultmsg = \"TargetHost:%s Result:\" % custom_domain\n new_dns_a_record = None\n\n dns_a_record = self.get_dns_A_record(custom_domain)\n heroku_host_ip = self.get_heroku_host_ip(heroku_host)\n\n #Store A record to Dozens Server\n if dns_a_record is None:\n update_pattern = \"Create\"\n new_dns_a_record = self.create_A_record(heroku_host_ip,\n custom_domain, Config.DEFAULT_TTL)\n elif dns_a_record[\"content\"] != heroku_host_ip:\n update_pattern = \"Update\"\n new_dns_a_record = self.update_A_record(heroku_host_ip,\n dns_a_record)\n elif dns_a_record[\"content\"] == heroku_host_ip:\n update_pattern = \"Already updated\"\n new_dns_a_record = dns_a_record\n\n #Evaluate and cache the result\n if new_dns_a_record is not None:\n resultmsg += \"Success.%s%s\" % (update_pattern, new_dns_a_record)\n resultflg = True\n if update_pattern != \"Alread updated\":\n self._set_cache(custom_domain, new_dns_a_record)\n else:\n resultmsg += \"Fail. %s.\" % update_pattern\n resultflg = False\n\n return (resultflg, resultmsg)", "def do_hostinfo(self, args):\n host = opts = None\n if args:\n args = args.split()\n host = args.pop()\n\n if not host:\n print('Usage: hostinfo [-cdmu] host_name_or_ip')\n print(' uptime and load stats returned if no options specified')\n return\n\n try:\n ip = socket.gethostbyname(host)\n except socket.gaierror:\n print('cannot resolve', host, file=sys.stderr)\n return\n\n opts = []\n while args:\n arg = args.pop(0)\n if arg.startswith('--'):\n if arg == '--cpu':\n opts.append('c')\n elif arg == '--disk':\n opts.append('d')\n elif arg == '--memory':\n opts.append('m')\n elif arg == '--uptime':\n opts.append('u')\n else:\n print('unrecognized option:', arg, file=sys.stderr)\n return\n else:\n if arg[0] == '-':\n for ch in arg[1:]:\n if ch in ('cdmu') and ch not in opts:\n opts.append(ch)\n else:\n print('unrecognized option:', ch, file=sys.stderr)\n return\n\n stats = self._qm.get_host_stats(ip)\n\n if not opts:\n # Get uptime and load averages.\n up = stats['uptime']\n load = stats['cpu_load']\n print('Up for %s days, %s hours, %s minutes, '\n 'load averages: %s, %s, %s'\n % (up['days'], up['hours'], up['minutes'], load['one'],\n load['five'], load['fifteen']))\n return\n\n all_stats = []\n for opt in opts:\n if opt == 'd':\n # Get disk usage.\n disks = stats['disk_usage']\n st = ['Disk Usage:']\n for mount, disk_info in disks.viewitems():\n st.append(' Usage for: %s' % mount)\n for k, v in disk_info.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'c':\n # Get CPU load.\n load_stats = stats['cpu_load']\n st = ['CPU Load Average:']\n st.append(' last one minute: %s' % load_stats['one'])\n st.append(' last five minutes: %s' % load_stats['five'])\n st.append(' last fifteen minutes: %s' % load_stats['fifteen'])\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'm':\n # Get Memory Usage.\n memory_usage = stats['memory_usage']\n st = ['Memory usage:']\n for k, v in memory_usage.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'u':\n # Get uptime.\n up = stats['uptime']\n st = ['Uptime:']\n st.append(' Up for %s days, %s hours and %s minutes'\n % (up['days'], up['hours'], up['minutes']))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n\n print('\\n'.join(all_stats))", "def getHostInfo():", "def usage(self, host):", "def update( ):\r\n pass", "def _update_hosts_file(self, resolution):\n self._execute_command('echo {0} >> /etc/hosts'.format(resolution),\n sudo=True)", "def handle_ping(self, host):\n self.send(\"PONG :{}\".format(host))", "async def async_step_host(self, info: Optional[dict] = None):\n errors = {}\n if info is not None:\n try:\n data = await self._async_get_entry_data(\n self._device_info.serial,\n self._device_info.credential,\n self._device_info.product_type,\n self._device_info.name,\n info.get(CONF_HOST),\n )\n except CannotConnect:\n errors[\"base\"] = \"cannot_connect\"\n except CannotFind:\n errors[\"base\"] = \"cannot_find\"\n else:\n return self.async_create_entry(\n title=self._device_info.name,\n data=data,\n )\n\n info = info or {}\n return self.async_show_form(\n step_id=\"host\",\n data_schema=vol.Schema(\n {vol.Optional(CONF_HOST, default=info.get(CONF_HOST, \"\")): str}\n ),\n errors=errors,\n )", "def hostname(self, hostname):\n\n self._hostname = hostname", "def hostname(self, hostname):\n\n self._hostname = hostname", "def update_resources_for_this_host(cache, db):\n free_cpu, free_mem = get_resources()\n my_ip = cache[\"ip\"]\n\n logger.info(\"UPDATING\", extra = {\"cpu\": free_cpu, \"mem\": free_mem, \"ip\": my_ip})\n try:\n db.hset(my_ip, mapping={\"cpu\": free_cpu, \"mem\": free_mem})\n except Exception as e:\n logger.error(e)\n raise e", "def set_host_ip(self, host, host_ip):\n host.setIP(str(host_ip.ip), prefixLen=self.NETPREFIX)", "def host_ip(self, host_ip):\n\n self._host_ip = host_ip", "def poll_host(self, server, obj, name):\n\n self.log.debug('found host: %s' % (name,))\n\n status = 0\n cpu_total = cpu_usage = cpu_percent = cpu_count = cpu_mhz_per_core = 0\n mem_total = mem_usage = mem_percent = 0\n vms_total = vms_running = vms_stopped = 0\n\n if '.' in name and name.count('.') != 3:\n name = name.split('.')[0]\n\n props = server._retrieve_properties_traversal(property_names=[\n 'name',\n 'summary.overallStatus',\n 'summary.quickStats.overallMemoryUsage',\n 'summary.quickStats.overallCpuUsage',\n 'summary.hardware.memorySize',\n 'summary.hardware.numCpuCores',\n 'summary.hardware.cpuMhz',\n ], from_node=obj, obj_type='HostSystem')\n\n for prop_set in props:\n for prop in prop_set.PropSet:\n pn, pv = prop.Name, prop.Val\n\n if pn == 'summary.overallStatus':\n status = HOST_STATUS.index(pv)\n elif pn == 'summary.quickStats.overallMemoryUsage':\n mem_usage = pv\n elif pn == 'summary.quickStats.overallCpuUsage':\n cpu_usage = pv\n elif pn == 'summary.hardware.memorySize':\n mem_total = pv / MB\n elif pn == 'summary.hardware.numCpuCores':\n cpu_count = pv\n elif pn == 'summary.hardware.cpuMhz':\n cpu_mhz_per_core = pv\n\n vms_total = len(server.get_registered_vms(obj))\n vms_running = len(server.get_registered_vms(obj, status='poweredOn'))\n vms_stopped = len(server.get_registered_vms(obj, status='poweredOff'))\n\n cpu_total = cpu_count * cpu_mhz_per_core\n cpu_percent = cpu_usage / float(cpu_total) * 100\n mem_percent = mem_usage / float(mem_total) * 100\n\n stats = {\n 'status': status,\n 'cpu_total': cpu_total,\n 'cpu_usage': cpu_usage,\n 'cpu_percent': cpu_percent,\n 'cpu_count': cpu_count,\n 'mem_total': mem_total,\n 'mem_usage': mem_usage,\n 'mem_percent': mem_percent,\n 'vms_total': vms_total,\n 'vms_running': vms_running,\n 'vms_stopped': vms_stopped,\n }\n\n return stats", "def host_info(self, host):\n\n endpoint = '/Domain/Host/Info'\n\n params = {\n 'Host' : host,\n }\n \n response = self.__perform_get_request(endpoint, params)\n\n if response.status_code == 200:\n parsed_response = response.json()\n return parsed_response", "def update_task_info(self, url, path):\n try:\n db_connect = pymysql.connect(**self._taskdb_config)\n with db_connect.cursor() as cursor:\n task_id = self.get_task_id(url)\n cursor.execute(\"UPDATE mv SET localpath = '%s' WHERE taskid = '%s'\" % (path, task_id))\n db_connect.commit()\n except:\n db_connect.rollback()\n print(\"Fail updating task information, url=%s, path=%s\" % (url, path))\n else:\n print(\"Success updating task information, url=%s, path=%s\" % (url, path))\n finally:\n db_connect.close()", "def update(self, update):\n\n params = shlex.split(update)\n if params[0] in self.addr:\n self.addr[params[0]].update(*params)\n\n else:\n a = Addr(self)\n # add both name and IP address\n self.addr[params[0]] = a\n self.addr[params[1]] = a\n a.update(*params)\n self.notify(\"addrmap_added\", *[a], **{})", "def update_host_configs(self, host_configs, **kwargs):\n for hostid, config in host_configs.items():\n self.update_host_config(hostid, config, **kwargs)", "def update(self):\n # TO DO for updating urls if changed\n pass", "def update_host_metrics(self, hostname, metrics: List[Dict]) -> bool:\n with self.lock:\n try:\n host = Query()\n self.hosts.upsert({'metrics': metrics},\n host.hostname.matches(hostname))\n\n return True\n except Exception as err:\n raise UpdateError('Cannot update metrics: {} of host with hostname={}'.format(metrics, hostname), err)", "def modify(request, host_id):\n POST = HostForm.filter_querydict(request.user, request.POST)\n host = get_host_or_404(request.user, pk=host_id)\n prefix = str(host_id)\n if request.is_ajax():\n template = \"clariadmin/ajax_host.html\"\n else:\n template = \"clariadmin/host.html\"\n \n remote_addr = get_request_remote_addr(request)\n \n add_fields = AdditionnalFieldForm(host=host, prefix=prefix)\n if POST:\n form = HostForm(request.user, remote_addr,\n POST, instance=host, prefix=prefix)\n if POST.get(\"delete\", False):\n form.delete()\n return redirect('list_hosts')\n if form.is_valid():\n host, add_fields = form.save(POST=POST, prefix=prefix)\n redir = POST.get('submit_button', False)\n if redir == 'new':\n return redirect('new_host')\n elif redir == 'save':\n pass\n elif redir == 'return':\n return redirect('list_hosts')\n else:\n form = HostForm(request.user, remote_addr,\n instance=host, prefix=prefix)\n form.log_action(u\"consulté\")\n return render_to_response(template, {\n \"form\": form,\n 'additionnal_fields': add_fields,\n 'prefix': prefix,\n 'ajax': request.is_ajax(),\n \"host\": host}, context_instance=RequestContext(request))", "def dispatch_host(name, data):\n\n for key, value in data.items():\n self.dispatch(name, 'host_%s' % (key,), name, value)", "def update_H(self, curl_E):", "def share_resources_host_update(context, current_host, new_host):\n\n resources = {\n 'instances': models.ShareInstance,\n 'servers': models.ShareServer,\n 'groups': models.ShareGroup,\n }\n result = {}\n\n for res_name, res_model in resources.items():\n host_field = res_model.host\n query = model_query(\n context, res_model, read_deleted=\"no\",\n ).filter(host_field.like('{}%'.format(current_host)))\n count = query.update(\n {host_field: func.replace(host_field, current_host, new_host)},\n synchronize_session=False,\n )\n result.update({res_name: count})\n return result", "def reinstall_host(self, hostid, config, **kwargs):\n pass", "def add_label_to_existing(hostname, new_labels):\n\n logging.debug('going to add labels %s to existing host %s' % (new_labels, hostname))\n\n checkmk_api_url = config['checkmk_api_url']\n checkmk_api_username = config['checkmk_api_username']\n checkmk_api_secret = config['checkmk_api_secret']\n checkmk_puppetdb_label = config['checkmk_puppetdb_label']\n\n # Save the attributes, save the ~world~ existing labels\n req_params = { 'action': 'get_host',\n '_username': config['checkmk_api_username'],\n '_secret': config['checkmk_api_secret'],\n 'hostname': hostname,\n 'output_format': 'json' }\n r = requests.post(checkmk_api_url, req_params)\n\n existing_labels = {}\n try:\n existing_labels.update(r.json()['result']['attributes']['labels'])\n except:\n pass\n\n # add new labels to existing labels and ensure from_puppetdb label present\n existing_labels.update(new_labels)\n existing_labels.update({ 'from_puppetdb': checkmk_puppetdb_label })\n\n payload = {'request': json.dumps({\n 'hostname': hostname,\n 'attributes': {\n 'labels': existing_labels\n }\n })}\n\n logging.debug('-- adding labels %s to host %s' % (existing_labels, hostname))\n r = requests.post(\"%s?action=edit_host&_username=%s&_secret=%s\" % (checkmk_api_url, checkmk_api_username, checkmk_api_secret), data=payload)\n logging.debug('-- got resp code = %d' % r.status_code)\n logging.debug('-- got resp text = %s' % r.text)\n r_json = json.loads(r.text)\n\n # Successful edit_host gives response of {\"result\": null, \"result_code\": 0}\n if r_json['result_code'] == 0 and r_json['result'] is None:\n logging.info('added labels %s to %s successfully' % (existing_labels, hostname))\n else:\n logging.warn('failed to add labels %s to host %s' % (r_json['result'], hostname))", "def get_host_info(self):\n\n if len(self.index) == 0:\n # Need to load index from cache\n self.load_index_from_cache()\n\n if not self.args.host in self.index:\n # try updating the cache\n self.do_api_calls_update_cache()\n if not self.args.host in self.index:\n # host might not exist anymore\n return self.json_format_dict({}, True)\n\n node_id = self.index[self.args.host]\n print \"NODE ID %s\" % node_id\n print \"INDEX: %s\" % self.index\n\n node = self.get_node(node_id)\n node_vars = {}\n for direct_attr in [\n \"api_id\",\n \"datacenter_id\",\n \"label\",\n \"display_group\",\n \"create_dt\",\n \"total_hd\",\n \"total_xfer\",\n \"total_ram\",\n \"status\",\n \"alert_cpu_enabled\",\n \"alert_cpu_threshold\",\n \"alert_diskio_enabled\",\n \"alert_diskio_threshold\",\n \"alert_bwin_enabled\",\n \"alert_bwin_threshold\",\n \"alert_bwout_enabled\",\n \"alert_bwout_threshold\",\n \"alert_bwquota_enabled\",\n \"alert_bwquota_threshold\",\n \"backup_weekly_daily\",\n \"backup_window\",\n \"watchdog\"\n ]:\n node_vars[direct_attr] = getattr(node, direct_attr)\n\n node_vars[\"datacenter_city\"] = self.get_datacenter_city(node)\n node_vars[\"public_ip\"] = [addr.address for addr in node.ipaddresses if addr.is_public][0]\n\n return self.json_format_dict(node_vars, True)", "def set_service_host(self, host):\n self._api_host = f\"https://{host}\"", "def test_update_hyperflex_cluster(self):\n pass", "def test_rebuild_on_host_updated_target(self):\n def fake_get_compute_info(context, host):\n self.assertTrue(context.is_admin)\n self.assertEqual('fake-mini', host)\n cn = objects.ComputeNode(hypervisor_hostname=NODENAME)\n return cn\n\n with test.nested(\n mock.patch.object(self.compute.driver, 'instance_on_disk',\n side_effect=lambda x: True),\n mock.patch.object(self.compute, '_get_compute_info',\n side_effect=fake_get_compute_info)\n ) as (mock_inst, mock_get):\n self._rebuild()\n\n # Should be on destination host\n instance = db.instance_get(self.context, self.inst.id)\n self.assertEqual(instance['host'], self.compute.host)\n self.assertEqual(NODENAME, instance['node'])\n self.assertTrue(mock_inst.called)\n self.assertTrue(mock_get.called)", "def opencloud_fetch_host_info( hostname ):\n raise Exception(\"Opencloud support not implemented\")", "def insert_host(self, info: 'vrmjobs.HostInfo') -> bool:\n with self.lock:\n try:\n host = self._check_host_existence(info.hostname)\n\n if not host:\n ports = []\n for p in info.ports:\n ports.append({\"daemon\": p.daemon, \"port\": p.port})\n\n self.hosts.insert({\"hostname\": info.hostname,\n \"inet_addr\": info.inet_addr,\n \"ports\": ports,\n \"type\": info.type.name,\n \"latest_recv\": datetime.now().strftime(self.time_format)})\n return True\n return False\n except Exception as err:\n raise InsertError('Cannot insert new host {}'.format(str(info)), err)", "def host(self, host: str):\n if host is None:\n raise ValueError(\"Invalid value for `host`, must not be `None`\") # noqa: E501\n\n self._host = host", "def command_update_hw(self, cmd):\n # TODO\n pass", "def set_hostname(self, new_hostname):\n return self.mycam.devicemgmt.SetHostname(new_hostname)", "def __add_host(self, host_form):\n try:\n host_object = Host.objects.get(\n host_name=host_form.cleaned_data['host_name']\n )\n for field in host_form.cleaned_data:\n setattr(\n host_object, field, host_form.cleaned_data[field]\n )\n host_object.save()\n return HttpResponseRedirect(reverse('log_collector:index'))\n except errors.ObjectDoesNotExist:\n return self.form_valid(host_form)", "def set_host_addr(self, addr: str) -> None:\n self.config[\"host_addr\"] = addr", "def collect_compute_info(self, ctxt, host_id, host_info):\n logger.info(\"Info of \" + host_id + \" :\" + str(host_info))\n self._compute_node_info.add_node_info(host_id, host_info)", "def _status(self, host):\n pass", "def _host_in_event(self, ev):\n self._update_nodes()\n\n if not self.nodes:\n return\n\n for node in self.nodes:\n if node.ip in ev.host.ipv4:\n datapath = self.dpset.get(ev.host.port.dpid)\n node.setPortInformation(ev.host.port.dpid, datapath, ev.host.port.port_no, ev.host.port)\n self._install_cdnengine_matching_flow(datapath, node.ip, node.port)\n self.logger.info('New Node connected the network. Matching rules were installed ' + node.__str__())", "def add(self, host, **kwargs):\n if host in self.hosts_:\n raise ValueError(\"Host %s: exists (use update).\" % host)\n self.hosts_.add(host)\n self.lines_.append(ConfigLine(line=\"\", host=None))\n self.lines_.append(ConfigLine(line=\"Host %s\" % host, host=host, key=\"Host\", value=host))\n for k, v in kwargs.items():\n if type(v) not in [list, tuple]:\n v = [v]\n mapped_k = _remap_key(k)\n for value in v:\n new_line = self._new_line(mapped_k, value)\n self.lines_.append(ConfigLine(line=new_line, host=host, key=mapped_k, value=value))\n self.lines_.append(ConfigLine(line=\"\", host=None))", "async def async_update_device_info(self) -> None:\n data = await self._async_request(\"get\", \"device\")\n self._device_info = cast(Dict[str, Any], data)", "def set_hostname(dut, host_name):\n cmd = \"sudo hostname {}\".format(host_name)\n st.config(dut, cmd)\n return", "async def force_info_update_supervisor(self) -> None:\n self.hass.data[DATA_SUPERVISOR_INFO] = await self.hassio.get_supervisor_info()\n await self.async_refresh()", "def set_host(host_index):\n env.hosts = [public_dns_names[int(host_index)]]\n env.password = [public_pwds[int(host_index)]]", "def rename(self, old_host, new_host):\n if new_host in self.hosts():\n raise ValueError(\"Host %s: already exists.\" % new_host)\n for p, c in self.configs_:\n if old_host in c.hosts_:\n c.rename(old_host, new_host)", "def softupdate_ip(request, ipaddress):\n\n softupdate_key = settings.SOFTUPDATE_KEY\n if request.POST.get(\"key\", \"invalid_key\") != softupdate_key:\n raise PermissionDenied()\n\n # LC: UGGLY and not \"portable\"\n STATUS_EN_SERVICE = 'En service'\n\n def noanswer(reason=\"\"):\n message = \"\"\"Modification impossible.\\n\"\"\"\n if reason and settings.DEBUG:\n message += \"\"\"%s\\n\"\"\" % (reason,)\n return HttpResponse(message, content_type=\"plain/text\")\n\n serial = request.POST.get(\"serial\", None)\n hostname = request.POST.get(\"hostname\", None)\n\n host = None\n errmsgs = []\n\n if serial:\n hosts = Host.objects.filter(serial=serial)\n if len(hosts) == 1:\n host = hosts[0]\n elif len(hosts) > 1:\n for h in hosts:\n if h.ip == ipaddress:\n host = h\n break\n\n if not host:\n errmsgs.append(\"Le host serial=%s est introuvable.\" % (serial,))\n\n if hostname and not host:\n hosts = Host.objects.filter(hostname=hostname,\n status__description=STATUS_EN_SERVICE)\n if len(hosts) == 1:\n host = hosts[0]\n elif len(hosts) > 1:\n for h in hosts:\n if h.ip == ipaddress:\n host = h\n break\n\n # Get the last log entry\n hostlogs = HostIPLog.objects.filter(host=host, log_ip=ipaddress) \\\n .order_by(\"-date\")\n if hostlogs:\n hostlog = hostlogs[0]\n else:\n hostlog = HostIPLog(host=host, log_ip=ipaddress)\n \n hostlog.log_queryfrom = get_request_remote_addr(request)\n hostlog.log_hostname = request.POST.get('hostname', 'unknown')\n hostlog.save()\n\n return HttpResponse('ok.', content_type='plain/text')" ]
[ "0.837656", "0.7654533", "0.71974707", "0.7100948", "0.69615793", "0.68412817", "0.67987186", "0.674337", "0.6699968", "0.6577405", "0.6523611", "0.6492715", "0.6492715", "0.63358724", "0.63345975", "0.62820405", "0.62123007", "0.62056744", "0.617595", "0.61610746", "0.61610746", "0.61610746", "0.61610746", "0.6108285", "0.6092122", "0.60271275", "0.59695065", "0.5967536", "0.59389544", "0.59380996", "0.5936333", "0.591039", "0.5880107", "0.586313", "0.5860245", "0.5844537", "0.582792", "0.5826754", "0.58136475", "0.5759978", "0.5752617", "0.573316", "0.5728305", "0.5720551", "0.5720551", "0.5707607", "0.57046366", "0.57046366", "0.5701365", "0.5692245", "0.5686141", "0.5667313", "0.5652508", "0.5651806", "0.56355363", "0.5629784", "0.561906", "0.56151736", "0.5613294", "0.5601301", "0.5598749", "0.5589822", "0.55757296", "0.55757296", "0.55716026", "0.5569411", "0.5564339", "0.55532163", "0.55520135", "0.5545522", "0.55276895", "0.5511757", "0.5509937", "0.55027246", "0.5489509", "0.54836035", "0.54830396", "0.5475939", "0.5473485", "0.5443448", "0.5433783", "0.54240644", "0.5419314", "0.5410301", "0.54074585", "0.5402802", "0.5377604", "0.537465", "0.5372494", "0.5369631", "0.53492236", "0.53461075", "0.53437823", "0.5339455", "0.5331059", "0.53247374", "0.5322207", "0.53200454", "0.53171265", "0.5307323", "0.52991325" ]
0.0
-1
Update information about a host from a Compute object
def _update_from_compute_node(self, compute_node): if (self.updated and compute_node.updated_at and self.updated > compute_node.updated_at): return self.uuid = compute_node.rp_uuid self.mem_available = compute_node.mem_available self.mem_total = compute_node.mem_total self.mem_free = compute_node.mem_free self.mem_used = compute_node.mem_used self.cpus = compute_node.cpus self.cpu_used = compute_node.cpu_used self.disk_total = compute_node.disk_total self.disk_used = compute_node.disk_used self.numa_topology = compute_node.numa_topology self.labels = compute_node.labels self.pci_stats = pci_stats.PciDeviceStats( stats=compute_node.pci_device_pools) self.disk_quota_supported = compute_node.disk_quota_supported self.runtimes = compute_node.runtimes self.enable_cpu_pinning = compute_node.enable_cpu_pinning self.updated = compute_node.updated_at
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update(self, host):\n pass", "def update_host(self, conf, tenant_id, network_id, host_id, body):\n\t\tpass", "def update(self, **kwargs):\n\n host = self.get()\n if not host:\n self.raiseNotFoundError()\n return host.update(**kwargs)", "def update(self, host, values):\n body = dict(host=values)\n return self._update(\"/os-hosts/%s\" % host, body, response_key='host')", "def update_host(hostname, cpu_mhz, cpu_cores, ram):\n return update_host(hostname, cpu_mhz, cpu_cores, ram)", "def test_rebuild_on_host_updated_target(self):\n def fake_get_compute_info(context, host):\n self.assertTrue(context.is_admin)\n self.assertEqual('fake-mini', host)\n cn = objects.ComputeNode(hypervisor_hostname=NODENAME)\n return cn\n\n with test.nested(\n mock.patch.object(self.compute.driver, 'instance_on_disk',\n side_effect=lambda x: True),\n mock.patch.object(self.compute, '_get_compute_info',\n side_effect=fake_get_compute_info)\n ) as (mock_inst, mock_get):\n self._rebuild()\n\n # Should be on destination host\n instance = db.instance_get(self.context, self.inst.id)\n self.assertEqual(instance['host'], self.compute.host)\n self.assertEqual(NODENAME, instance['node'])\n self.assertTrue(mock_inst.called)\n self.assertTrue(mock_get.called)", "def collect_compute_info(self, ctxt, host_id, host_info):\n logger.info(\"Info of \" + host_id + \" :\" + str(host_info))\n self._compute_node_info.add_node_info(host_id, host_info)", "def update(call: ServiceCall) -> None:\n called_host = call.data[ATTR_HOST]\n if called_host in hass.data[DOMAIN]:\n hass.data[DOMAIN][called_host].update()\n else:\n for iperf3_host in hass.data[DOMAIN].values():\n iperf3_host.update()", "def update(self, host_id, values):\n if not values:\n return _('No values to update passed.')\n return self._update('/os-hosts/%s' % host_id, values,\n response_key='host')", "def collect_compute_info(self, ctxt, host_id, host_info):\n cctxt = self.client.prepare(server=DEFAULT_SERVER, timeout=RPC_TIMEOUT)\n cctxt.cast(ctxt, \"collect_compute_info\", host_id=host_id, host_info=host_info)", "def test_update_hyperflex_cluster(self):\n pass", "def poll_host(self, server, obj, name):\n\n self.log.debug('found host: %s' % (name,))\n\n status = 0\n cpu_total = cpu_usage = cpu_percent = cpu_count = cpu_mhz_per_core = 0\n mem_total = mem_usage = mem_percent = 0\n vms_total = vms_running = vms_stopped = 0\n\n if '.' in name and name.count('.') != 3:\n name = name.split('.')[0]\n\n props = server._retrieve_properties_traversal(property_names=[\n 'name',\n 'summary.overallStatus',\n 'summary.quickStats.overallMemoryUsage',\n 'summary.quickStats.overallCpuUsage',\n 'summary.hardware.memorySize',\n 'summary.hardware.numCpuCores',\n 'summary.hardware.cpuMhz',\n ], from_node=obj, obj_type='HostSystem')\n\n for prop_set in props:\n for prop in prop_set.PropSet:\n pn, pv = prop.Name, prop.Val\n\n if pn == 'summary.overallStatus':\n status = HOST_STATUS.index(pv)\n elif pn == 'summary.quickStats.overallMemoryUsage':\n mem_usage = pv\n elif pn == 'summary.quickStats.overallCpuUsage':\n cpu_usage = pv\n elif pn == 'summary.hardware.memorySize':\n mem_total = pv / MB\n elif pn == 'summary.hardware.numCpuCores':\n cpu_count = pv\n elif pn == 'summary.hardware.cpuMhz':\n cpu_mhz_per_core = pv\n\n vms_total = len(server.get_registered_vms(obj))\n vms_running = len(server.get_registered_vms(obj, status='poweredOn'))\n vms_stopped = len(server.get_registered_vms(obj, status='poweredOff'))\n\n cpu_total = cpu_count * cpu_mhz_per_core\n cpu_percent = cpu_usage / float(cpu_total) * 100\n mem_percent = mem_usage / float(mem_total) * 100\n\n stats = {\n 'status': status,\n 'cpu_total': cpu_total,\n 'cpu_usage': cpu_usage,\n 'cpu_percent': cpu_percent,\n 'cpu_count': cpu_count,\n 'mem_total': mem_total,\n 'mem_usage': mem_usage,\n 'mem_percent': mem_percent,\n 'vms_total': vms_total,\n 'vms_running': vms_running,\n 'vms_stopped': vms_stopped,\n }\n\n return stats", "def set_one(self, host_name, ip_address):\n self.hosts[host_name] = ip_address", "def fusion_api_update_hypervisor_host_profile(self, uri=None, body=None, api=None, headers=None):\n return self.host_profile.update(body, uri, api, headers)", "def update_host_ovs(self, context):\n LOG.info(_('Updating Open vSwitch host data...'))\n LOG.debug(\"Current DOM: %s\" % self.current_dom.to_dict())\n LOG.debug(\"Requested DOM: %s\" % self.desired_dom.to_dict())\n\n builder = mob.MicroOperationBuilder(context,\n self.current_dom,\n self.desired_dom,\n self.rollback)\n\n mo_list = builder.get_micro_ops_for_update()\n\n # run validation\n return self._run_micro_op_list(mo_list)", "def updateHosts(request):\n\n updater = HostUpdater()\n updater.run()\n return http.HttpResponse(\"Ok\")", "def host(self, host) :\n\t\ttry :\n\t\t\tself._host = host\n\t\texcept Exception as e:\n\t\t\traise e", "def fill_host(self, data):\n check_input_params(data, self.HOST)\n self.host = data[self.HOST]", "def fill_host(self, data):\n check_input_params(data, self.HOST)\n self.host = data[self.HOST]", "def update(self, compute_node=None, service=None):\n @utils.synchronized((self.hostname, compute_node))\n def _locked_update(self, compute_node, service):\n if compute_node is not None:\n LOG.debug('Update host state from compute node: %s',\n compute_node)\n self._update_from_compute_node(compute_node)\n if service is not None:\n LOG.debug('Update host state with service: %s', service)\n self.service = service\n\n return _locked_update(self, compute_node, service)", "def update_host_config(self, hostid, config, **kwargs):\n pass", "def update_resources_for_this_host(cache, db):\n free_cpu, free_mem = get_resources()\n my_ip = cache[\"ip\"]\n\n logger.info(\"UPDATING\", extra = {\"cpu\": free_cpu, \"mem\": free_mem, \"ip\": my_ip})\n try:\n db.hset(my_ip, mapping={\"cpu\": free_cpu, \"mem\": free_mem})\n except Exception as e:\n logger.error(e)\n raise e", "def host_update(self, host, ip_list, raw=True):\n\n endpoint = '/Domain/Host/Update'\n\n params = {\n 'Host' : host,\n 'IP_List' : \",\".join(ip_list)\n }\n\n response = self.__perform_get_request(endpoint, params)\n\n if response.status_code == 200:\n parsed_response = response.json()\n if raw:\n return parsed_response\n else:\n return parsed_response.get('status') == 'SUCCESS'", "def update_cluster_hosts(self, hosts):\n self._hosts = hosts\n self._collect_hosts_d = True", "def compute_node_utilization_update(context, host, free_ram_mb_delta=0,\n free_disk_gb_delta=0, work_delta=0, vm_delta=0):\n session = get_session()\n compute_node = None\n with session.begin(subtransactions=True):\n compute_node = session.query(models.ComputeNode).\\\n options(joinedload('service')).\\\n filter(models.Service.host == host).\\\n filter_by(deleted=False).\\\n with_lockmode('update').\\\n first()\n if compute_node is None:\n raise exception.NotFound(_(\"No ComputeNode for %(host)s\") %\n locals())\n\n # This table thingy is how we get atomic UPDATE x = x + 1\n # semantics.\n table = models.ComputeNode.__table__\n if free_ram_mb_delta != 0:\n compute_node.free_ram_mb = table.c.free_ram_mb + free_ram_mb_delta\n if free_disk_gb_delta != 0:\n compute_node.free_disk_gb = (table.c.free_disk_gb +\n free_disk_gb_delta)\n if work_delta != 0:\n compute_node.current_workload = (table.c.current_workload +\n work_delta)\n if vm_delta != 0:\n compute_node.running_vms = table.c.running_vms + vm_delta\n return compute_node", "def updateHost(self, *hosts):\n localhost_name = None\n old_hostnames = []\n for old_host in self.hosts.values():\n old_hostnames.append(old_host.name)\n if isinstance(old_host, LocalHost):\n if localhost_name is not None:\n logger.warning('Duplicate localhost found in lab.hosts')\n localhost_name = old_host.name\n for new_host in hosts:\n # Updating localhost\n if (isinstance(new_host, LocalHost) and localhost_name is not None):\n # Check for localhost clash\n if new_host.name != localhost_name:\n logger.warning('Localhost is already present: ' +\n '%s\\n' +\n 'Not updating host %s!', localhost_name, new_host.name)\n continue\n else:\n localhost_name = new_host.name\n # Will an update happen?\n if new_host.name in old_hostnames:\n logger.info('Overwriting host: %s', new_host.name)\n # Will it end up removing the localhost?\n if (new_host.name == localhost_name and\n not isinstance(new_host, LocalHost)):\n localhost_name = None\n self.hosts[new_host.name] = new_host\n if localhost_name is None:\n logger.warning('Localhost not yet present')", "def update(self):\n self._state = status\n attributes['host'] = host\n attributes['port'] = port\n self.custom_attributes = attributes", "def init_host(self, host):\n LOG.debug(\"init_host\")\n\n self._cpc = self._client.cpcs.find(**{\n \"object-id\": CONF.dpm.cpc_object_id})\n LOG.debug(\"Matching hypervisor found %(cpcsubset_name)s for object-id \"\n \"%(cpcid)s and CPC %(cpcname)s\" %\n {'cpcsubset_name': CONF.host,\n 'cpcid': CONF.dpm.cpc_object_id,\n 'cpcname': self._cpc.properties['name']})\n\n utils.valide_host_conf(self._cpc)\n self._host = Host.Host(self._cpc, self._client)", "def _vm_update_host(zx, vm, host, log=None):\n log = log or zx.log\n hostid = host['hostid']\n log(DEBUG, 'VM %s already defined in Zabbix as host ID \"%s\"', vm, hostid)\n params = zx.diff_vm_host(vm, host, log=log) # Issue #chili-311\n\n if params:\n log(WARNING, 'Zabbix host ID \"%s\" configuration differs from current VM %s configuration', hostid, vm)\n log(INFO, 'Updating Zabbix host ID \"%s\" according to VM %s with following parameters: %s',\n hostid, vm, params)\n\n if zx.update_host(hostid, log=log, **params):\n log(INFO, 'Updated Zabbix host ID \"%s\"', hostid)\n zx.save_host_info(vm, log=log)\n else:\n log(ERROR, 'Could not update Zabbix host ID \"%s\"', hostid)\n return False\n\n else: # Host in sync with VM\n log(INFO, 'Zabbix host ID \"%s\" configuration is synchronized with current VM %s configuration', hostid, vm)\n return True\n\n return True", "def share_resources_host_update(context, current_host, new_host):\n\n resources = {\n 'instances': models.ShareInstance,\n 'servers': models.ShareServer,\n 'groups': models.ShareGroup,\n }\n result = {}\n\n for res_name, res_model in resources.items():\n host_field = res_model.host\n query = model_query(\n context, res_model, read_deleted=\"no\",\n ).filter(host_field.like('{}%'.format(current_host)))\n count = query.update(\n {host_field: func.replace(host_field, current_host, new_host)},\n synchronize_session=False,\n )\n result.update({res_name: count})\n return result", "def get_host_info(self):\n\n if len(self.index) == 0:\n # Need to load index from cache\n self.load_index_from_cache()\n\n if not self.args.host in self.index:\n # try updating the cache\n self.do_api_calls_update_cache()\n if not self.args.host in self.index:\n # host might not exist anymore\n return self.json_format_dict({}, True)\n\n node_id = self.index[self.args.host]\n print \"NODE ID %s\" % node_id\n print \"INDEX: %s\" % self.index\n\n node = self.get_node(node_id)\n node_vars = {}\n for direct_attr in [\n \"api_id\",\n \"datacenter_id\",\n \"label\",\n \"display_group\",\n \"create_dt\",\n \"total_hd\",\n \"total_xfer\",\n \"total_ram\",\n \"status\",\n \"alert_cpu_enabled\",\n \"alert_cpu_threshold\",\n \"alert_diskio_enabled\",\n \"alert_diskio_threshold\",\n \"alert_bwin_enabled\",\n \"alert_bwin_threshold\",\n \"alert_bwout_enabled\",\n \"alert_bwout_threshold\",\n \"alert_bwquota_enabled\",\n \"alert_bwquota_threshold\",\n \"backup_weekly_daily\",\n \"backup_window\",\n \"watchdog\"\n ]:\n node_vars[direct_attr] = getattr(node, direct_attr)\n\n node_vars[\"datacenter_city\"] = self.get_datacenter_city(node)\n node_vars[\"public_ip\"] = [addr.address for addr in node.ipaddresses if addr.is_public][0]\n\n return self.json_format_dict(node_vars, True)", "def backend_info_update(context, host, value=None, delete_existing=False):\n info_ref = _backend_info_query(context, host)\n if info_ref:\n if value:\n info_ref.update({\"info_hash\": value})\n elif delete_existing and info_ref['deleted'] != 1:\n info_ref.update({\"deleted\": 1, \"deleted_at\": timeutils.utcnow()})\n else:\n info_ref = models.BackendInfo()\n info_ref.update({\"host\": host, \"info_hash\": value})\n info_ref.save(context.session)\n return info_ref", "def Host(self, h):\r\n\r\n self.host = h\r\n return self", "def host(self, host):\n\n self._host = host", "def host(self, host):\n\n self._host = host", "def host(self, host):\n\n self._host = host", "def host(self, host):\n\n self._host = host", "def set(self, host, **kwargs):\n for p, c in self.configs_:\n if host in c.hosts_:\n c.set(host, **kwargs)\n return\n raise ValueError(\"Host %s: not found\" % host)", "def operate_on_host_relation(self, function, host, type=None, state='offline', health=None):\n try:\n conn = psycopg2.connect(\"dbname='{0}'\".format(DATABASE))\n cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n\n if function == 'INSERT':\n cur.execute(\"INSERT INTO host (host, type, state) VALUES (%s, %s, %s);\", (host, type, state,))\n elif function == 'UPDATE_STATE':\n cur.execute(\"UPDATE host SET state = %s WHERE host = %s;\", (state, host,))\n elif function == 'UPDATE_HEALTH':\n cur.execute(\"UPDATE host SET health = %s WHERE host = %s;\", (health, host,))\n elif function == 'DELETE':\n cur.execute(\"DELETE FROM host WHERE host = %s;\", (host,))\n else:\n print('NO FUNCTION FOUND')\n\n conn.commit()\n cur.close()\n except Exception as e:\n print(e)", "def host(self, host):\n for p, c in self.configs_:\n if host in c.hosts_:\n return c.host(host)\n return {}", "def test_update_hyperflex_cluster_profile(self):\n pass", "def _update(self, context, values, prune_stats=False):\n return db.compute_node_update(context, self.compute_node['id'],\n values, prune_stats)", "def mmo_cluster_hostInfo(self, mmo_connection, inc_mongos):\n return self.mmo_execute_on_cluster(mmo_connection, \"hostInfo\", inc_mongos)", "def restructure_host_cpu_data(host):\n init_cpu_counts(host)\n host.sockets = len(host.nodes or [])\n host.hyperthreading = False\n host.physical_cores = 0\n if not host.cpus:\n return\n host.cpu_model = host.cpus[0].cpu_model\n cpu_list = sorted(host.cpus, key=_sort_by_coreid)\n for cpu in cpu_list:\n inode = pecan.request.dbapi.inode_get(inode_id=cpu.forinodeid)\n cpu.numa_node = inode.numa_node\n if cpu.thread == 0:\n host.physical_cores += 1\n elif cpu.thread > 0:\n host.hyperthreading = True\n function = cpu.allocated_function or get_default_function(host)\n host.cpu_functions[cpu.numa_node][function].append(int(cpu.cpu))\n host.cpu_lists[cpu.numa_node].append(int(cpu.cpu))", "def compute_node_get_by_host(context, host):\n session = get_session()\n with session.begin():\n service = session.query(models.Service).\\\n filter_by(host=host, binary=\"monitor-bmc\").first()\n node = session.query(models.ComputeNode).\\\n options(joinedload('service')).\\\n filter_by(deleted=False,service_id=service.id)\n return node.first()", "def host(self, host: str, fields: str = None) -> dict:\n endpoint = f\"/api/host/{host}\" if host else \"/api/host/\"\n ret = self._request(\n endpoint=endpoint,\n params={\"fields\": fields} if fields else {},\n )\n return ret", "def update(self, target, query):\n node = self._data[target]\n name = \"%s node %.8s\" % (node['type'], target)\n\n query.update({\n 'type': node['type'],\n 'model': node['model']\n })\n\n logger.info(\"Validating query\")\n NodeValidator.validate(query)\n\n self._data[target] = dict_update(node, query, name)\n logger.info(\"Updated parameters above of %s\" % name)\n\n return {target: self._data[target]}", "def vm_update(args):\n ip1 = args.ip1\n flavor = args.flavor\n numcpus = args.numcpus\n memory = args.memory\n plan = args.plan\n autostart = args.autostart\n noautostart = args.noautostart\n dns = args.dns\n host = args.host\n domain = args.domain\n cloudinit = args.cloudinit\n template = args.template\n net = args.network\n information = args.information\n iso = args.iso\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n names = [common.get_lastvm(config.client)] if not args.names else args.names\n for name in names:\n if dns:\n common.pprint(\"Creating Dns entry for %s...\" % name)\n if net is not None:\n nets = [net]\n else:\n nets = k.vm_ports(name)\n if nets and domain is None:\n domain = nets[0]\n if not nets:\n return\n else:\n k.reserve_dns(name=name, nets=nets, domain=domain, ip=ip1)\n elif ip1 is not None:\n common.pprint(\"Updating ip of vm %s to %s...\" % (name, ip1))\n k.update_metadata(name, 'ip', ip1)\n elif cloudinit:\n common.pprint(\"Removing cloudinit information of vm %s\" % name)\n k.remove_cloudinit(name)\n return\n elif plan is not None:\n common.pprint(\"Updating plan of vm %s to %s...\" % (name, plan))\n k.update_metadata(name, 'plan', plan)\n elif template is not None:\n common.pprint(\"Updating template of vm %s to %s...\" % (name, template))\n k.update_metadata(name, 'template', template)\n elif memory is not None:\n common.pprint(\"Updating memory of vm %s to %s...\" % (name, memory))\n k.update_memory(name, memory)\n elif numcpus is not None:\n common.pprint(\"Updating numcpus of vm %s to %s...\" % (name, numcpus))\n k.update_cpus(name, numcpus)\n elif autostart:\n common.pprint(\"Setting autostart for vm %s...\" % name)\n k.update_start(name, start=True)\n elif noautostart:\n common.pprint(\"Removing autostart for vm %s...\" % name)\n k.update_start(name, start=False)\n elif information:\n common.pprint(\"Setting information for vm %s...\" % name)\n k.update_descrmation(name, information)\n elif iso is not None:\n common.pprint(\"Switching iso for vm %s to %s...\" % (name, iso))\n k.update_iso(name, iso)\n elif flavor is not None:\n common.pprint(\"Updating flavor of vm %s to %s...\" % (name, flavor))\n k.update_flavor(name, flavor)\n elif host:\n common.pprint(\"Creating Host entry for vm %s...\" % name)\n nets = k.vm_ports(name)\n if not nets:\n return\n if domain is None:\n domain = nets[0]\n k.reserve_host(name, nets, domain)", "def set_host_ip(self, host, host_ip):\n host.setIP(str(host_ip.ip), prefixLen=self.NETPREFIX)", "def dispatch_host(name, data):\n\n for key, value in data.items():\n self.dispatch(name, 'host_%s' % (key,), name, value)", "def test_specific_host_gets_instance(self):\n s_ref = self._create_compute_service(host='host1')\n compute1 = self.start_service('compute', host='host1')\n s_ref2 = self._create_compute_service(host='host2')\n instance_id1 = self._create_instance(host='host1', memory_mb='1')\n instance_id2 = self._create_instance(availability_zone='nova:host1')\n host = self.scheduler.driver.schedule_run_instance(self.context,\n instance_id2)\n self.assertEqual('host1', host)\n db.instance_destroy(self.context, instance_id2)\n db.instance_destroy(self.context, instance_id1)\n db.service_destroy(self.context, s_ref['id'])\n db.service_destroy(self.context, s_ref2['id'])", "def host_ip(self, host_ip):\n\n self._host_ip = host_ip", "def get_host_stats(self, refresh=False):", "def host(self, host):\n if host is None:\n raise ValueError(\"Invalid value for `host`, must not be `None`\")\n\n self._host = host", "def init_host(self, host):\n if self._drv_nodes is None:\n self.set_nodes([nova_conf.host])\n args = (drv_conf.tenant_id, drv_conf.client_id, drv_conf.client_secret,\n drv_conf.subscription_id)\n\n self.compute_client = utils.get_compute_client(*args)\n self.resource_client = utils.get_resource_client(*args)\n self.network_client = utils.get_network_client(*args)\n is_resource_created = utils.check_resource_existence(\n self.resource_client, drv_conf.resource_group)\n if not is_resource_created:\n utils.create_resource_group(\n self.resource_client, drv_conf.resource_group, drv_conf.region)\n\n self.flavor_info.update(\n utils.get_vm_sizes(self.compute_client, drv_conf.region))\n LOG.info(\"%s driver init with %s project, %s region\" %\n (self.name, drv_conf.tenant_id, drv_conf.region))", "def __init__(self, identity, fqdn, primary_ip, is_online, memory, storage,\n bandwidth, ip_addresses):\n super(Host, self).__init__(identity.name, identity.key, identity.hash,\n identity.vendor)\n self.fqdn = fqdn\n self.primary_ip = primary_ip\n self.is_online = is_online\n self.memory = memory\n self.storage = storage\n self.bandwidth = bandwidth\n self.ip_addresses = ip_addresses", "def sethost(self, host):\n self.__host = host", "def host_num(self, host_num):\n\n self._host_num = host_num", "def main():\n\n parser = cli.Parser()\n parser.add_required_arguments(cli.Argument.CLUSTER_NAME)\n parser.add_custom_argument('--key', required=True, action='store',\n help='Name of ESXi Advanced Setting to update')\n parser.add_custom_argument('--value', required=True, action='store',\n help='Value of the ESXi Advanced Setting to update')\n args = parser.get_args()\n try:\n si = service_instance.connect(args)\n\n content = si.RetrieveContent()\n\n cluster = pchelper.get_obj(content, [vim.ClusterComputeResource], args.cluster_name)\n\n hosts = cluster.host\n for host in hosts:\n option_manager = host.configManager.advancedOption\n option = vim.option.OptionValue(key=args.key,\n value=int(args.value))\n print(\"Updating %s on ESXi host %s \"\n \"with value of %s\" % (args.key, host.name, args.value))\n if option_manager.UpdateOptions(changedValue=[option]):\n print(\"Settings updated!\")\n\n except vmodl.MethodFault as ex:\n print(\"Caught vmodl fault : \" + ex.msg)\n return -1\n except Exception as ex:\n print(\"Caught exception : \" + str(ex))\n return -1\n\n return 0", "def host_info(vm_hostname):\n with _get_vm(vm_hostname) as vm:\n\n if vm.dataset_obj['datacenter_type'] != 'kvm.dct':\n raise NotImplementedError(\n 'This operation is not yet supported for {}'.format(\n vm.dataset_obj['datacenter_type'])\n )\n\n info = vm.info()\n\n # Disconnect fabric now to avoid messages after the table\n disconnect_all()\n\n categories = (\n ('General', (\n 'hypervisor',\n 'status',\n )),\n ('Network', (\n 'intern_ip',\n 'mac_address',\n )),\n ('Resources', (\n 'num_cpu',\n 'max_cpus',\n 'memory',\n 'memory_free',\n 'max_mem',\n 'disk',\n 'disk_size_gib',\n 'disk_free_gib',\n )),\n # Anything else will appear in this section\n ('Other', None),\n )\n\n def _progress_bar(free_key, capacity_key, result_key, unit):\n \"\"\"Helper to show nice progress bars.\"\"\"\n if free_key not in info or capacity_key not in info:\n return\n free = info[free_key]\n del info[free_key]\n capacity = info[capacity_key]\n del info[capacity_key]\n\n simple_stats = (\n 'Current: {} {unit}\\n'\n 'Free: {} {unit}\\n'\n 'Max: {} {unit}'.format(\n capacity - free, free, capacity, unit=unit))\n\n if not 0 <= free <= capacity > 0:\n log.warning(\n '{} ({}) and {} ({}) have weird ratio, skipping progress '\n 'calculation'.format(\n free_key, free, capacity_key, capacity)\n )\n info[result_key] = red(simple_stats)\n return\n\n assert 0 <= free <= capacity\n ratio = 1 - float(free) / float(capacity)\n if ratio >= 0.9:\n color = red\n elif ratio >= 0.8:\n color = yellow\n else:\n color = green\n\n max_bars = 20\n num_bars = int(round(ratio * max_bars))\n info[result_key] = (\n '[{}{}] {}%\\n{}'.format(\n color('#' * num_bars), ' ' * (max_bars - num_bars),\n int(round(ratio * 100)),\n simple_stats,\n )\n )\n\n _progress_bar('memory_free', 'memory', 'memory', 'MiB')\n _progress_bar('disk_free_gib', 'disk_size_gib', 'disk', 'GiB')\n\n max_key_len = max(len(k) for k in info.keys())\n for category, keys in categories:\n # Handle 'Other' section by defaulting to all keys\n keys = list(keys or info.keys())\n\n # Any info available for the category?\n if not any(k in info for k in keys):\n continue\n\n print('')\n print(white(category, bold=True))\n for k in keys:\n if k not in info:\n continue\n\n # Properly re-indent multiline values\n value = str(info.pop(k))\n value = ('\\n' + ' ' * (max_key_len + 3)).join(\n value.splitlines()\n )\n print('{} : {}'.format(k.ljust(max_key_len), value))", "def test_rebuild_on_host_updated_target_node_not_found(self):\n def fake_get_compute_info(context, host):\n raise exception.ComputeHostNotFound(host=host)\n with test.nested(\n mock.patch.object(self.compute.driver, 'instance_on_disk',\n side_effect=lambda x: True),\n mock.patch.object(self.compute, '_get_compute_info',\n side_effect=fake_get_compute_info)\n ) as (mock_inst, mock_get):\n self.assertRaises(exception.InstanceFaultRollback,\n self._rebuild, expect_error=True)\n\n # Should be on destination host\n instance = db.instance_get(self.context, self.inst.id)\n self.assertEqual('fake_host_2', instance['host'])\n self.assertEqual('fakenode2', instance['node'])\n mock_inst.assert_not_called()\n mock_get.assert_called_once_with(mock.ANY, self.compute.host)", "def process_host(desired_host):\n \n node_list = []\n host_info_list = [] \n if desired_host == \"all\":\n desired_host_list = getAllMachines()\n else:\n desired_host_list = (subprocess.getoutput(\"qconf -shgrp_resolved \" + '@' + str(desired_host))).split()\n qstat = subprocess.getoutput('qstat -f')\n for host in desired_host_list:\n if qstat.find(host) != (-1):\n #Searches the long string for the index of the occurance of the specified host, then\n #parses it the string for just that one line with the host that we want.\n host_info_list.append((qstat[qstat.find(host):].split('\\n'))[0])\n #Start at with everything at 0, and will count up as encountered.\n total_nodes = 0\n total_cores = 0\n disabled_cores = 0\n used_cores = 0\n free_cores = 0\n empty_nodes = 0\n disabled_nodes = 0\n for host in host_info_list:\n #simply gathering info qstat spat out for us\n temp_node = Node((host.split()[0]))\n cores = host.split()[2].replace('/', ' ').split()\n host_used_cores = cores[1]\n host_total_cores = cores[2]\n if len(host.split()) == 6 and (host.split()[5] == 'd' or host.split()[5] == 'E' or \\\n host.split()[5] == 'au' or host.split()[5] == 'Eau' or host.split()[5] == 'Eqw' \\\n or host.split()[5] == 'adu'):\n temp_node.set_disabled_switch(True)\n disabled_cores += int(host_total_cores)\n total_cores += int(host_total_cores)\n disabled_nodes += 1\n else: \n temp_node.set_disabled_switch(False)\n used_cores += int(host_used_cores)\n total_cores += int(host_total_cores)\n free_cores += int(host_total_cores) - int(host_used_cores)\n if int(host_used_cores) == 0:\n empty_nodes += 1\n temp_node.set_cores(host_total_cores, host_used_cores)\n total_nodes += 1\n node_list.append(temp_node) \n \n if len(sys.argv) == 3:\n if sys.argv[2] == '--details':\n print_detailed_host(total_cores, used_cores, total_nodes, empty_nodes, desired_host, \n disabled_cores, disabled_nodes, node_list)\n elif sys.argv[2] == '-v' or sys.argv[2] == '--visual':\n draw_queue(total_nodes, total_cores, used_cores, empty_nodes, desired_host, disabled_cores, \n disabled_nodes, node_list, free_cores)\n else:\n print('Error: Arg syntax error with: ' + sys.argv[2])\n show_usage(23)\n elif sys.argv[1] == \"-qlong\":\n # Returning values from this host group to the qlong function\n return(total_cores, used_cores, total_nodes, empty_nodes, disabled_cores,disabled_nodes, node_list)\n elif len(sys.argv) < 3:\n print_host_info(total_cores, used_cores, total_nodes, empty_nodes, desired_host, disabled_cores, \n disabled_nodes)\n else:\n print('Error: Too many args')\n show_usage(23)\n return", "def do_hostinfo(self, args):\n host = opts = None\n if args:\n args = args.split()\n host = args.pop()\n\n if not host:\n print('Usage: hostinfo [-cdmu] host_name_or_ip')\n print(' uptime and load stats returned if no options specified')\n return\n\n try:\n ip = socket.gethostbyname(host)\n except socket.gaierror:\n print('cannot resolve', host, file=sys.stderr)\n return\n\n opts = []\n while args:\n arg = args.pop(0)\n if arg.startswith('--'):\n if arg == '--cpu':\n opts.append('c')\n elif arg == '--disk':\n opts.append('d')\n elif arg == '--memory':\n opts.append('m')\n elif arg == '--uptime':\n opts.append('u')\n else:\n print('unrecognized option:', arg, file=sys.stderr)\n return\n else:\n if arg[0] == '-':\n for ch in arg[1:]:\n if ch in ('cdmu') and ch not in opts:\n opts.append(ch)\n else:\n print('unrecognized option:', ch, file=sys.stderr)\n return\n\n stats = self._qm.get_host_stats(ip)\n\n if not opts:\n # Get uptime and load averages.\n up = stats['uptime']\n load = stats['cpu_load']\n print('Up for %s days, %s hours, %s minutes, '\n 'load averages: %s, %s, %s'\n % (up['days'], up['hours'], up['minutes'], load['one'],\n load['five'], load['fifteen']))\n return\n\n all_stats = []\n for opt in opts:\n if opt == 'd':\n # Get disk usage.\n disks = stats['disk_usage']\n st = ['Disk Usage:']\n for mount, disk_info in disks.viewitems():\n st.append(' Usage for: %s' % mount)\n for k, v in disk_info.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'c':\n # Get CPU load.\n load_stats = stats['cpu_load']\n st = ['CPU Load Average:']\n st.append(' last one minute: %s' % load_stats['one'])\n st.append(' last five minutes: %s' % load_stats['five'])\n st.append(' last fifteen minutes: %s' % load_stats['fifteen'])\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'm':\n # Get Memory Usage.\n memory_usage = stats['memory_usage']\n st = ['Memory usage:']\n for k, v in memory_usage.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'u':\n # Get uptime.\n up = stats['uptime']\n st = ['Uptime:']\n st.append(' Up for %s days, %s hours and %s minutes'\n % (up['days'], up['hours'], up['minutes']))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n\n print('\\n'.join(all_stats))", "def add_host(self, name, ip):\n rdataa = dns.rdata.from_text(dns.rdataclass.IN,dns.rdatatype.A,str(ip))\n rdataseta = dns.rdataset.from_rdata(300,rdataa)\n self.update.add(name,rdataseta)\n return dns.query.tcp(self.update,self.server_address)", "def connect_with_host_data(self, host: Host):\n host_obj = self.content.load_host(host.instanceId)\n\n if host_obj.connectionString:\n print_light_grey('Found host data, trying to connect...')\n\n # Has a bounce host.\n if host_obj.connectionString.bounce_host:\n bounce_host = DiscoverHost(self.account_obj, bounce=True).get_bounce()\n\n if not DoConnectAndSave(host_obj, self.account_obj).bounce_regular_connect(bounce_host):\n sys.exit(0)\n else:\n if not DoConnectAndSave(host_obj, self.account_obj).regular_connect():\n sys.exit(0)\n\n print_orange('Found host data is obsolete, trying to find a new path...')\n\n raise HostNotFound", "def init_host(self, host=socket.gethostname()):\n ctxt = context.get_admin_context()\n\n LOG.debug('Hostname: %s' % (host,))\n LOG.debug('Instances: %s' % (db.instance_get_all_by_host(ctxt, host)))\n \n for instance in db.instance_get_all_by_host(ctxt, host):\n try:\n LOG.debug('Checking state of %s' % instance['name'])\n state = self.get_info(instance['name'])['state']\n except exception.NotFound:\n state = power_state.SHUTOFF\n\n LOG.debug('Current state of %s was %s.' %\n (instance['name'], state))\n db.instance_set_state(ctxt, instance['id'], state)\n\n if state == power_state.SHUTOFF:\n db.instance_destroy(ctxt, instance['id'])\n\n if state != power_state.RUNNING:\n continue", "def update(self, update):\n\n params = shlex.split(update)\n if params[0] in self.addr:\n self.addr[params[0]].update(*params)\n\n else:\n a = Addr(self)\n # add both name and IP address\n self.addr[params[0]] = a\n self.addr[params[1]] = a\n a.update(*params)\n self.notify(\"addrmap_added\", *[a], **{})", "def init_host(self, host):\n self._precreate_network()\n LOG.info(_LI(\"Create/Update Ntwork and Subnet, Done.\"))", "def update_compute_section(self):\n rconfig = configparser.RawConfigParser()\n rconfig.read(self.conf_file)\n if not rconfig.has_section('compute'):\n rconfig.add_section('compute')\n rconfig.set(\n 'compute', 'fixed_network_name',\n self.network.name if self.network else env.get(\"EXTERNAL_NETWORK\"))\n with open(self.conf_file, 'w', encoding='utf-8') as config_file:\n rconfig.write(config_file)", "def rename(self, old_host, new_host):\n if new_host in self.hosts_:\n raise ValueError(\"Host %s: already exists.\" % new_host)\n for line in self.lines_: # update lines\n if line.host == old_host:\n line.host = new_host\n if line.key.lower() == \"host\":\n line.value = new_host\n line.line = \"Host %s\" % new_host\n self.hosts_.remove(old_host) # update host cache\n self.hosts_.add(new_host)", "def host(self, host: str):\n\n self._host = host", "def opencloud_fetch_host_info( hostname ):\n raise Exception(\"Opencloud support not implemented\")", "def insert_host(self, host):\n if host['host'] and host['user'] and host['passw']:\n hosts = Config().hosts\n cred = {'username': host['user'], 'password': host['passw']}\n hosts[host['host']] = cred\n Config().hosts = hosts", "def compute_node_update(context, compute_id, values, auto_adjust):\n session = get_session()\n if auto_adjust:\n _adjust_compute_node_values_for_utilization(context, values, session)\n with session.begin(subtransactions=True):\n values['updated_at'] = timeutils.utcnow()\n convert_datetimes(values, 'created_at', 'deleted_at', 'updated_at')\n compute_ref = compute_node_get(context, compute_id, session=session)\n for (key, value) in values.iteritems():\n compute_ref[key] = value\n compute_ref.save(session=session)", "def getHostInfo():", "def init_host(self, host):\n LOG.debug(\"init_host\")", "def update_field(self, field, value, harvesterid, harvesterhost):\n connection = self.connection\n query = \"\"\"UPDATE INSTANCES SET {0} = ? WHERE harvesterid = ? and harvesterhost = ?\"\"\".format(field)\n cur = connection.cursor()\n\n cur.execute(query, (value,\n harvesterid, harvesterhost))\n connection.commit()", "def test_update_hyperflex_node_profile(self):\n pass", "def get_host(self, conf, tenant_id, network_id, host_id):\n\t\tpass", "def compute_node_utilization_set(context, host, free_ram_mb=None,\n free_disk_gb=None, work=None, vms=None):\n session = get_session()\n compute_node = None\n with session.begin(subtransactions=True):\n compute_node = session.query(models.ComputeNode).\\\n options(joinedload('service')).\\\n filter(models.Service.host == host).\\\n filter_by(deleted=False).\\\n with_lockmode('update').\\\n first()\n if compute_node is None:\n raise exception.NotFound(_(\"No ComputeNode for %(host)s\") %\n locals())\n\n if free_ram_mb != None:\n compute_node.free_ram_mb = free_ram_mb\n if free_disk_gb != None:\n compute_node.free_disk_gb = free_disk_gb\n if work != None:\n compute_node.current_workload = work\n if vms != None:\n compute_node.running_vms = vms\n\n return compute_node", "def update(self, dbase):\n dbase.updateVirtualSpace(\n self.__id,\n self.__name,\n self.__host,\n self.__size\n )", "def send_node_props(self, host_info):\n se = get_se()\n version = get_version()\n name = host_info.get_hostname()\n unique_id = '%s:Pool:%s' % (se, name)\n parent_id = \"%s:SE:%s\" % (se, se)\n\n sa = StorageElement.StorageElement()\n sar = StorageElementRecord.StorageElementRecord()\n sa.UniqueID(unique_id)\n sa.Name(name)\n sa.SE(se)\n sa.SpaceType(\"Pool\")\n sa.Implementation(XRD_NAME)\n sa.Version(version)\n sa.Status(XRD_STATUS)\n sa.ParentID(parent_id)\n sa.Timestamp(timestamp)\n sar.Timestamp(timestamp)\n sar.UniqueID(unique_id)\n sar.MeasurementType(\"raw\")\n sar.StorageType(\"disk\")\n sar.TotalSpace(1024*host_info.get_total_kb())\n sar.FreeSpace(1024*host_info.get_total_free_kb())\n sar.UsedSpace(1024*host_info.get_total_used_kb())\n Gratia.Send(sa)\n Gratia.Send(sar)", "def test_update_host(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n library = get_library(device, \"libtests.so\")\n a = numpy.empty((4711 * 1024,), dtype=int)\n a_expect = numpy.empty_like(a)\n pattern = int(0xdeadbeefabbaabba)\n a_expect[:] = pattern\n offl_a = stream.bind(a)\n stream.invoke(library.test_set_pattern, offl_a, offl_a.size, pattern)\n offl_a.update_host()\n stream.sync()\n\n self.assertTrue((a == a_expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(a, a_expect))", "def _host_in_event(self, ev):\n self._update_nodes()\n\n if not self.nodes:\n return\n\n for node in self.nodes:\n if node.ip in ev.host.ipv4:\n datapath = self.dpset.get(ev.host.port.dpid)\n node.setPortInformation(ev.host.port.dpid, datapath, ev.host.port.port_no, ev.host.port)\n self._install_cdnengine_matching_flow(datapath, node.ip, node.port)\n self.logger.info('New Node connected the network. Matching rules were installed ' + node.__str__())", "def do_update(cs, args):\n opts = {}\n opts['memory'] = args.memory\n opts['cpu'] = args.cpu\n opts['name'] = args.name\n if 'auto_heal' in args and args.auto_heal:\n opts['auto_heal'] = True\n if 'no_auto_heal' in args and args.no_auto_heal:\n opts['auto_heal'] = False\n opts = zun_utils.remove_null_parms(**opts)\n if not opts:\n raise exc.CommandError(\"You must update at least one property\")\n container = cs.containers.update(args.container, **opts)\n _show_container(container)", "def _create_compute_service(self, **kwargs):\n\n dic = {'binary': 'nova-compute', 'topic': 'compute',\n 'report_count': 0, 'availability_zone': 'dummyzone'}\n dic['host'] = kwargs.get('host', 'dummy')\n s_ref = db.service_create(self.context, dic)\n if 'created_at' in kwargs.keys() or 'updated_at' in kwargs.keys():\n t = utils.utcnow() - datetime.timedelta(0)\n dic['created_at'] = kwargs.get('created_at', t)\n dic['updated_at'] = kwargs.get('updated_at', t)\n db.service_update(self.context, s_ref['id'], dic)\n\n dic = {'service_id': s_ref['id'],\n 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100,\n 'vcpus_used': 16, 'local_gb_used': 10,\n 'hypervisor_type': 'qemu', 'hypervisor_version': 12003,\n 'cpu_info': ''}\n dic['memory_mb_used'] = kwargs.get('memory_mb_used', 32)\n dic['hypervisor_type'] = kwargs.get('hypervisor_type', 'qemu')\n dic['hypervisor_version'] = kwargs.get('hypervisor_version', 12003)\n db.compute_node_create(self.context, dic)\n return db.service_get(self.context, s_ref['id'])", "def worker(self, data):\n print(data)\n host_raw = self.connection.do_request('host.get', {\n 'filter': {'host': data[\"ext\"]},\n 'output': ['hostid']\n }).get(\"result\")\n # print(\"host_raw\", host_raw)\n if host_raw:\n host_id = host_raw[0].get(\"hostid\")\n\n else:\n host_new = self.connection.do_request('host.create', {\"host\" : f\"{data.get('ext')}\",\n \"templates\": [\n {\"templateid\" : self.template_id}\n ],\n \"groups\": [\n {\"groupid\": self.group_id}\n ]\n\n })\n\n host_id = host_new.get(\"result\").get(\"hostids\")[0]\n self.send_data(data)", "def update_all(self):\n self.update_head_node_ip()\n self.get_database_info()\n self.update_users()", "def update(args, config):\n print('Updates an HPC fleet with name \"{}\"'.format(args.fleet_name))", "def host_info(self, host):\n\n endpoint = '/Domain/Host/Info'\n\n params = {\n 'Host' : host,\n }\n \n response = self.__perform_get_request(endpoint, params)\n\n if response.status_code == 200:\n parsed_response = response.json()\n return parsed_response", "def show(self, req, id):\n ctxt = req.environ['nova.context']\n authorize(ctxt)\n try:\n LOG.info(\"List the info on nova-compute '%s'\" % id)\n LOG.debug(\"%s - %s\", req.environ, req.body)\n instances = dbapi.show_instances_on_host(ctxt, id)\n instances = [{'uuid': c.uuid,\n 'name': c.display_description,\n 'status': c.vm_state} for c in instances]\n compute_node = dbapi.compute_node_get_by_host(ctxt, id)\n total_ram = float(compute_node.memory_mb)\n used_ram = float(compute_node.memory_mb_used)\n percent = int(round((used_ram / total_ram) * 100))\n return {'host': {'name': id,\n 'percentUsed': percent,\n 'totalRAM': int(total_ram),\n 'usedRAM': int(used_ram),\n 'instances': instances}}\n except exception.ComputeHostNotFound:\n raise webob.exc.HTTPNotFound()", "def update(self):\n self.device = self._api.device_query(self._hardware_address, {})", "def update_optimizer(self, context, optimizer, host):\n pass", "def host(self, host):\n if host in self.hosts_:\n vals = defaultdict(list)\n for k, value in [(x.key.lower(), x.value) for x in self.lines_\n if x.host == host and x.key.lower() != \"host\"]:\n vals[k].append(value)\n flatten = lambda x: x[0] if len(x) == 1 else x\n return {k: flatten(v) for k, v in vals.items()}\n return {}", "def set_host(host_index):\n env.hosts = [public_dns_names[int(host_index)]]\n env.password = [public_pwds[int(host_index)]]", "def post_instance_ip_update(self, resource_id, resource_dict):\n pass", "def update_host_configs(self, host_configs, **kwargs):\n for hostid, config in host_configs.items():\n self.update_host_config(hostid, config, **kwargs)", "def omc_conf_set(host_id, omc_fields, omc_config, user_name):\n global sqlalche_obj\n sqlalche_obj.sql_alchemy_db_connection_open()\n err1 = [0, 0]\n result = \"\"\n param = []\n resultarray = {}\n param.append('omcIpAddress.1')\n param.append('periodicStatsTimer.1')\n form_name = ['OMC IP address', 'Periodic Statistics Timer']\n dictarr = []\n device_param_list = sqlalche_obj.session.query(Hosts.snmp_version_id, Hosts.snmp_write_community, Hosts.ip_address, Hosts.snmp_port, Hosts.config_profile_id).\\\n filter(Hosts.host_id == host_id).all()\n odu16_omc_conf_table = sqlalche_obj.session.query(SetOdu16OmcConfTable).filter(\n SetOdu16OmcConfTable.config_profile_id == device_param_list[0][4]).all()\n result += str(odu16_omc_conf_table)\n for i in range(len(omc_fields)):\n omc_oid = oid_name[omc_fields[i]]\n omc_type = oid_type[omc_fields[i]]\n omc_type_val = omc_config[i]\n result += snmp_set(device_param_list[0][0], device_param_list[0][1], device_param_list[0][2], device_param_list[\n 0][3], omc_oid, omc_type, omc_type_val)\n err = error_odu16(result, param, err1)\n try:\n el = EventLog()\n # el.log_event( \"description detail\" , \"user_name\" )\n if 1 in err1:\n el.log_event(\n \"Values Updated in UBR UNMP Form\", \"%s\" % (user_name))\n if int(err1[0]) == 1:\n odu16_omc_conf_table[0].omc_ip_address = omc_config[0]\n if int(err1[1]) == 1:\n odu16_omc_conf_table[0].periodic_stats_timer = omc_config[1]\n sqlalche_obj.session.commit()\n sqlalche_obj.sql_alchemy_db_connection_close()\n for j in range(0, len(omc_fields)):\n dict = {}\n dict[\"name\"] = form_name[j]\n dict[\"value\"] = omc_config[j]\n dict[\"textbox\"] = omc_fields[j]\n dict[\"status\"] = err1[j]\n dictarr.append(dict)\n if err != '':\n raise Set_exception\n except Set_exception, e:\n resultarray[\"result\"] = dictarr\n resultarray[\"tableName\"] = 'SetOdu16OmcConfTable'\n resultarray['formAction'] = 'omc_config_form.py'\n sqlalche_obj.sql_alchemy_db_connection_close()\n return str(resultarray)", "def pre_instance_ip_update(self, resource_id, resource_dict):\n pass", "def put(self, *args, **kwargs):\n\n addr = EtherAddress(args[0])\n\n if 'desc' in kwargs:\n self.service.update(addr, kwargs['desc'])\n else:\n self.service.update(addr)" ]
[ "0.7401245", "0.7244819", "0.67560756", "0.6739064", "0.63846487", "0.63500065", "0.6329535", "0.62974095", "0.61992675", "0.6124308", "0.60235953", "0.6017592", "0.598866", "0.5938988", "0.59209824", "0.58763146", "0.58476484", "0.5826684", "0.5826684", "0.582578", "0.57337654", "0.57160884", "0.5690749", "0.5687323", "0.56857383", "0.5658935", "0.56173104", "0.56147546", "0.5600463", "0.5581531", "0.557739", "0.55748564", "0.55512094", "0.5542867", "0.5542867", "0.5542867", "0.5542867", "0.5525084", "0.55065143", "0.5483996", "0.54727817", "0.5470068", "0.5469677", "0.5454105", "0.54402107", "0.54373765", "0.5428247", "0.542774", "0.5402316", "0.5385924", "0.5381184", "0.5373285", "0.53610146", "0.53395617", "0.5334809", "0.53229064", "0.5322357", "0.53137046", "0.5290569", "0.52796537", "0.52620506", "0.52549386", "0.5243349", "0.52406996", "0.5230648", "0.52259463", "0.5219592", "0.5218948", "0.52030194", "0.5196456", "0.51925373", "0.5191728", "0.5189801", "0.5183129", "0.5150318", "0.51501495", "0.5147496", "0.51413184", "0.51355195", "0.51344085", "0.5114421", "0.51141876", "0.50867766", "0.5083709", "0.5075764", "0.5052138", "0.50505656", "0.5040798", "0.50350046", "0.50300395", "0.5026408", "0.50247854", "0.5014525", "0.50049925", "0.50022626", "0.50003725", "0.49782708", "0.4969402", "0.49630576", "0.49602008" ]
0.5865002
16
Incrementally update host state from a Container object.
def consume_from_request(self, container): @utils.synchronized(self._lock_name) @set_update_time_on_success def _locked(self, container): # Scheduler API is inherently multi-threaded as every incoming RPC # message will be dispatched in its own green thread. So the # shared host state should be consumed in a consistent way to make # sure its data is valid under concurrent write operations. self._locked_consume_from_request(container) return _locked(self, container)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _increment(state: Dict, host: str = None) -> Dict:\n logger.info(f'view-count@{host}: {state[host] + 1}')\n\n return {\n **state,\n host: state[host] + 1\n }", "def _inc_counter(self) -> None:\n self._state_storage.increment_counter()", "def _update_container(self):\n client = docker.from_env()\n self.container = client.containers.get(self.session.container_id)", "def _update(self, host):\n pass", "def updateCounter(self):\n self.counter = self.counter + 1\n self.syncDataStructure[\"+\"][str(self.instanceID)] = self.counter", "def update(self):\n self._state = status\n attributes['host'] = host\n attributes['port'] = port\n self.custom_attributes = attributes", "def update(self, container, representation):\n pass", "def update(self):\n stats = self._thread.stats()\n if self._var_id == CONTAINER_MONITOR_STATUS:\n self._state = stats.get('status', None)\n elif self._var_id == CONTAINER_MONITOR_MEMORY_USAGE:\n self._state = stats.get('memory_usage', None)\n elif self._var_id == CONTAINER_MONITOR_CPU_PERCENTAGE:\n self._state = stats.get('cpu_percent', None)\n if 'cpu' in stats:\n self._attributes[ATTR_ONLINE_CPUS] = stats['cpu'].get('online_cpus', None)\n elif self._var_id == CONTAINER_MONITOR_MEMORY_PERCENTAGE:\n self._state = stats.get('memory_percent', None)\n # Network\n elif self._var_id == CONTAINER_MONITOR_NETWORK_UP:\n self._state = round(stats.get('network_up', None) / 1024.0, PRECISION)\n elif self._var_id == CONTAINER_MONITOR_NETWORK_DOWN:\n self._state = round(stats.get('network_down', None) / 1024.0, PRECISION)", "def host_up(self, host):\n with self.cond:\n if self.state is not None:\n LOG.warning(\"host_up called, but we think host is already up\")\n self._host_down()\n\n # Wait until all operations using a previous state generation are\n # complete before initialising a new one. Note that self.state is\n # already None, set either by initialisation or by host_down. This\n # means the current state will not be returned to any new callers,\n # and use_count will eventually reach zero.\n # We do this to avoid a race between _HostMountState initialisation\n # and an on-going mount/unmount operation\n while self.use_count != 0:\n self.cond.wait()\n\n # Another thread might have initialised state while we were\n # wait()ing\n if self.state is None:\n LOG.debug('Initialising _HostMountState generation %(gen)i',\n {'gen': self.generation})\n self.state = _HostMountState(host, self.generation)\n self.generation += 1", "def test_update_container(self):\n pass", "def container_refresh(self, kwargs=None):\n scode, hosts = Rest.get('Host')\n filter = {}\n n = 1\n e = {}\n data = []\n for host in hosts:\n os.environ[\"DOCKER_HOST\"] = host['Ip'] + \":\" + str(host['Port'])\n self.client = docker.from_env()\n filter['Ip'] = os.environ[\"DOCKER_HOST\"].split(':')[0]\n try:\n containers = self.client.containers.list(all, **kwargs)\n except docker.errors.APIError as e:\n Console.error(e.explanation)\n Rest.delete('Container', filter)\n continue\n if len(containers) == 0:\n print(\"No containers exist \" + str(host['Ip']))\n Rest.delete('Container', filter)\n continue\n\n for containerm in containers:\n container = containerm.__dict__['attrs']\n container['Ip'] = os.environ[\"DOCKER_HOST\"].split(':')[0]\n data.append(container)\n d = {}\n d['Ip'] = os.environ[\"DOCKER_HOST\"].split(':')[0]\n d['Id'] = container['Id']\n d['Name'] = container['Name']\n d['Image'] = container['Config']['Image']\n d['Status'] = container['State']['Status']\n d['StartedAt'] = container['State']['StartedAt']\n e[n] = d\n n = n + 1\n Rest.delete('Container', filter)\n Rest.post('Container', data)\n Console.ok(str(Printer.dict_table(e, order=['Ip', 'Id', 'Name', 'Image', 'Status', 'StartedAt'])))", "def update(self):\n self._state = 23", "def inc(self):\n self._value += 1", "def inc(self):\n \n self.count += 1", "def increment(self):\n self._deltas += 1", "def inc( self ):\n self.count += 1", "def incInstCount(self):\n self.instCount += 1", "def _increment_state(self, increment):\n self._read_state[StateKey.POSITION] += increment", "def increment_counter(self) -> None:", "def update(self):\n self._state = self._state", "def increase_counter(self):\n self.values = self.values + 1", "def increment(cls, value):\r\n value.value += 1", "def update_instigator_state(self, state: InstigatorState):", "def update_status(self, command_dict):\n # just save it as Node redis entity\n Node[self.node.name] = self.node", "def _host_in_event(self, ev):\n self._update_nodes()\n\n if not self.nodes:\n return\n\n for node in self.nodes:\n if node.ip in ev.host.ipv4:\n datapath = self.dpset.get(ev.host.port.dpid)\n node.setPortInformation(ev.host.port.dpid, datapath, ev.host.port.port_no, ev.host.port)\n self._install_cdnengine_matching_flow(datapath, node.ip, node.port)\n self.logger.info('New Node connected the network. Matching rules were installed ' + node.__str__())", "def insert_host_states(hosts):\n IMPL.insert_host_states(hosts)", "def remote_update(self, increment):\r\n\r\n self.window += increment", "def update(self, state):\n self.states.append(state)", "def visit_container(self, container):\n self._connect_items(container)", "def at_added(self, host):\n\n if self.host:\n if self.host == host:\n return\n else:\n raise ComponentRegisterError(\"Components must not register twice!\")\n\n self.host = host", "def increment(self):\n self.data[self.pointer] += 1\n self.data[self.pointer] %= 256", "def update(self):\r\n self._state = self._dev.state", "def host_num_in(self, host_num_in):\n\n self._host_num_in = host_num_in", "def inc_pc(self, size):\n current_pc = self.get_register('PC')\n self.set_pc(current_pc + size)", "def update_counter(self, counter, entity):", "def update(self, value):\n if value < self.min:\n self.min = value\n if value > self.max:\n self.max = value\n self.total += value\n self.instances += 1\n self.values.append(value)", "def update(self, **kwargs):\n return self.client.api.update_container(self.id, **kwargs)", "def __update_container(self, path, obj_stat):\n try:\n self.logger.debug('Update container interface called')\n return self.asyn_helper.call \\\n (\"update_container\", path, obj_stat)\n except Exception as err:\n self.logger.error(('update_container for %(con_dir)s failed '\n 'close failure: %(exc)s : %(stack)s'),\n {'con_dir' : path,\n 'exc': err, 'stack': ''.join(traceback.format_stack())})\n raise err", "def increment_node_index(self):\n self.node_index += 1", "def update_alive_status(self):\n self.alive = self.health > 0", "def wait_for_container(self):\n i = 0\n while True:\n ip_address = self.btcd_container.attrs[\"NetworkSettings\"][\"IPAddress\"]\n if ip_address.startswith(\"172\"):\n self.rpcconn.ipaddress = ip_address\n break\n self.btcd_container.reload()\n time.sleep(0.5)\n i = i + 1\n if i > 20:\n raise Exception(\"Timeout while starting bitcoind-docker-container!\")", "def increment_pc(self):\n self.program_counter[-1] += 1", "def increment(self):\r\n return self.add(1)", "def updateVisits(self):\n self.nVisits += 1", "def update(self, status):\n\n for name, c in self.children.items():\n c.update(status.child(name))", "def update_gear_status(key, value):\n\n fw = context.client\n dest_container = fw.get(context.destination['id'])\n kwargs = {key: value}\n dest_container.update_info(kwargs)\n log.info(repr(kwargs))", "def update_gear_status(key, value):\n\n fw = context.client\n dest_container = fw.get(context.destination['id'])\n kwargs = {key: value}\n dest_container.update_info(kwargs)\n log.info(repr(kwargs))", "def test_integer_inplace_update(self):\r\n vm = Integer.value_manager(None, None, 5)\r\n assert not vm.changed\r\n vm.value += 1\r\n assert vm.changed", "def host_status_control(self, host_status_control):\n\n self._host_status_control = host_status_control", "def update_container():\n return exec_fn(_update_container)", "def container(self, container):\n if not container.is_public():\n container.make_public()\n self._container = container", "def test_update_hyperflex_cluster(self):\n pass", "def incr_registers(self):\n pass", "def add_to_inventory(self, item):\n\t\tif item in self.inventory:\n\t\t\tself.inventory[item] += 1\n\t\telse:\n\t\t\tself.inventory[item] = 1", "def increment(cls):\n index = random.randint(0, SimpleCounterShard.NUM_SHARDS - 1)\n shard_name = 'shard' + str(index)\n counter = SimpleCounterShard.objects.get_or_create(pk=shard_name)[0]\n counter.count += 1\n counter.save()", "def update_cluster_hosts(self, hosts):\n self._hosts = hosts\n self._collect_hosts_d = True", "def start(self, container: Container):", "def update_host(self, conf, tenant_id, network_id, host_id, body):\n\t\tpass", "def toggle_container(self,new_bool):\n self.container = new_bool", "def put_container(self, account, container):\n \n pass", "def _register_container(self, container):\n found = False\n try:\n for host, location, container in Container.Container.host_generator(container,\n known_networks=self.networks.keys()):\n websocket = \"ws\" in host.scheme or \"wss\" in host.scheme\n secured = 'https' in host.scheme or 'wss' in host.scheme\n http = 'http' in host.scheme or 'https' in host.scheme\n # it might return string if there's a error in processing\n if type(host) is not str:\n if (host.hostname, host.port) in self.hosts:\n existing_host: Host = self.hosts[(host.hostname, host.port)]\n existing_host.add_container(location, container, websocket=websocket, http=http)\n ## if any of the containers in for the virtualHost require https, the all others will be redirected to https.\n if secured:\n existing_host.secured = True\n host = existing_host\n else:\n host.secured = secured\n host.add_container(location, container, websocket=websocket, http=http)\n self.hosts[(host.hostname, host.port)] = host\n\n if host.secured:\n if host.hostname not in self.ssl_certificates:\n host.ssl_expiry = self.ssl.expiry_time(host.hostname)\n else:\n host.ssl_expiry = self.ssl_certificates[host.hostname]\n if (host.ssl_expiry - datetime.datetime.now()).days > 2:\n self.ssl_certificates[host.hostname] = host.ssl_expiry\n\n found = True\n self.containers.add(container.id)\n\n except Container.NoHostConiguration:\n print(\"Skip Container:\", \"No VIRTUAL_HOST configuration\", \"Id:\" + container.id,\n \"Name:\" + container.attrs[\"Name\"].replace(\"/\", \"\"), sep=\"\\t\")\n except Container.UnreachableNetwork:\n print(\"Skip Container:\", \"UNREACHABLE Network \", \"Id:\" + container.id,\n \"Name:\" + container.attrs[\"Name\"].replace(\"/\", \"\"), sep=\"\\t\")\n return found", "def update(self):\n self._num_frames += 1", "def _handle_HostEvent (self, event):\n self.host_alive.append(event.entry) \n print type(event.entry).__name__", "def incr_logical_clock(self):\n self._logical_clock += 1", "def update_state(self, dstate):\n pass", "def _add_to_inv(self, block_):\n if block_ in self._inventory:\n self._inventory[block_] += 1\n else:\n self._inventory[block_] = 1", "def _reload_object_state(self, fake_object):\n bucket = self._get_bucket(fake_object._bucket_name)\n obj_state = bucket[\"objects\"].get(fake_object.key)\n if obj_state is None:\n fake_object._current_data[\"data\"] = None\n return\n fake_object._current_data[\"data\"] = json.loads(obj_state[\"data\"])\n fake_object._current_data[\"content_type\"] = obj_state[\"content_type\"]\n fake_object._current_data[\"indexes\"] = set()\n for index_name, index_data in bucket[\"indexes\"].iteritems():\n for index_value, key in index_data:\n if key == fake_object.key:\n fake_object._current_data[\"indexes\"].add(\n (index_name, index_value))", "def update_count(self):\n pass", "def update(call: ServiceCall) -> None:\n called_host = call.data[ATTR_HOST]\n if called_host in hass.data[DOMAIN]:\n hass.data[DOMAIN][called_host].update()\n else:\n for iperf3_host in hass.data[DOMAIN].values():\n iperf3_host.update()", "def container(self, container):\n\n self._container = container", "def container(self, container):\n\n self._container = container", "def update_state(self, context):\n pass", "def update(self):\n self._state = get_local_ip()", "def increment(self) -> None:\n self._increment_called = True\n self.append(deepcopy(self._base_metric))", "def set_incremental_state(module, incremental_state, key, value):\n if incremental_state is not None:\n full_key = _get_full_incremental_state_key(module, key)\n incremental_state[full_key] = value", "def reload(self):\n self.containers = list(filter(_check_alive_container, self.containers))", "def register_this_container(cache, db):\n\n # Get container id.\n # bash_command = \"\"\"head -1 /proc/self/cgroup|cut -d/ -f3\"\"\"\n # output = str(subprocess.check_output(['bash','-c', bash_command]), \"utf-8\").strip()\n\n # logger.info(output)\n\n my_host_name = socket.gethostname()\n my_ip = socket.gethostbyname(my_host_name)\n cache[\"ip\"] = my_ip\n cache[\"host\"] = my_host_name\n\n free_cpu, free_mem = get_resources()\n\n logger.info({\"host_name\": my_host_name, \"ip\": my_ip})\n try:\n pipe = db.pipeline()\n pipe.sadd(SET_NAME, my_ip).hset(my_ip, mapping={\"host_id\": my_host_name, \"cpu\": free_cpu, \"mem\": free_mem})\n pipe.execute()\n except Exception as e:\n logger.error(e)\n raise e", "def inc(self, amount=1):\n if amount < 0:\n raise ValueError('Counters can only be incremented by non-negative amounts.')\n self._shared_list.append((self._labels_args, ('inc', amount)))", "def _update_state(self) -> None:\n raise NotImplementedError(\"\")", "async def increment(self):\n async with self.lock:\n self.counter += 1", "def put_on_stack(self, item, stack_to_refresh):\n self.container.append(item)\n stack_to_refresh.changed_last = False\n self.changed_last = True", "def updateState(self):\n self.state = self.microgridPolicy.computeState();", "def esxi_host_count(self, esxi_host_count):\n\n self._esxi_host_count = esxi_host_count", "def nx_to_redis_(self, client_address, name):\n self.client_to_redis(client_address)\n k = '{};{};nx'.format(client_address, name)\n self.redis.incr(k)\n self.redis.expire(k, TTL_GRACE)\n return", "def _handle_coordinator_update(self) -> None:\n self._attr_is_on = self.relay.active\n self.async_write_ha_state()", "def increment(self, amount):\n pass", "def init_host(self, host=socket.gethostname()):\n ctxt = context.get_admin_context()\n\n LOG.debug('Hostname: %s' % (host,))\n LOG.debug('Instances: %s' % (db.instance_get_all_by_host(ctxt, host)))\n \n for instance in db.instance_get_all_by_host(ctxt, host):\n try:\n LOG.debug('Checking state of %s' % instance['name'])\n state = self.get_info(instance['name'])['state']\n except exception.NotFound:\n state = power_state.SHUTOFF\n\n LOG.debug('Current state of %s was %s.' %\n (instance['name'], state))\n db.instance_set_state(ctxt, instance['id'], state)\n\n if state == power_state.SHUTOFF:\n db.instance_destroy(ctxt, instance['id'])\n\n if state != power_state.RUNNING:\n continue", "def update_state(self):\n if self._coordinator.data:\n # get consumption value\n value_list = self._coordinator.data['values']\n values = [v['value'] for v in value_list]\n self._state = f\"{sum(values):.2f}\"", "def update_state(self):\n\n # Start off assuming no space in the queues and no pointer to a\n # shortest queue.\n self.min_queue = None\n self.has_space_in_a_server_queue = False\n self.queue_size = 0\n self.online_server_count = 0\n\n # Loop through all the servers.\n for server in self.server_list:\n\n # If server is online....\n if server.online is True:\n\n # Increment count of online servers\n self.online_server_count += 1\n\n # If any server has space...\n if len(server.queue) < server.max_queue_size:\n\n # 'Has Space' is True and remains true.\n if self.has_space_in_a_server_queue is False:\n self.has_space_in_a_server_queue = True\n\n # First non-full server we come to.\n if self.min_queue is None:\n self.min_queue = server\n\n # If we already had a non-full queue in hand,\n # compare it to the present one.\n elif len(server.queue) < len(self.min_queue.queue):\n self.min_queue = server\n\n # Increment the count of the parallel server block.\n self.queue_size += len(server.queue)", "def post_group_build(self, host, containers, task):\n formation = FormationIntrospector(host, self.app.containers).introspect()\n containers_to_restart = set()\n for container in containers:\n if container.system:\n # If the running instance is based on an outdated image, restart it\n try:\n instance = formation.get_container_instance(container.name)\n except ValueError:\n continue\n image_details = host.client.inspect_image(container.image_name)\n if instance and image_details and image_details[\"Id\"] != instance.image_id:\n containers_to_restart.add(container)\n if containers_to_restart:\n self.app.invoke(\"restart\", containers=containers_to_restart)", "def makeBox(self) -> None:\n self.state[CASH] = self.state[CASH] + 1", "def container_status_change(self, status=None, containerName=None, kwargs=None):\n if status is None:\n Console.info(\"No status specified\")\n return\n\n try:\n container = self.client.containers.get(containerName)\n # need to check this ..\n if status is \"start\":\n container.start(**kwargs)\n elif status is \"pause\":\n container.pause(**kwargs)\n elif status is \"unpause\":\n container.unpause(**kwargs)\n elif status is \"stop\":\n container.stop(**kwargs)\n else:\n Console.error('Invalid Commmand')\n return\n\n container = self.client.containers.get(containerName)\n filter = {}\n container_dict = container.__dict__['attrs']\n filter['Id'] = container_dict['Id']\n filter['Ip'] = os.environ[\"DOCKER_HOST\"].split(':')[0]\n container_dict['Ip'] = os.environ[\"DOCKER_HOST\"].split(':')[0]\n Rest.post('Container', container_dict, filter)\n Console.ok('Container ' + container.name + ' status changed to ' + status)\n except docker.errors.APIError as e:\n Console.error(e.explanation)\n return", "def incrementTimers(self):\n # online servers\n for server in self.online_servers:\n self.online_servers[server][0] += 1\n # offline servers\n for server in self.offline_servers:\n self.offline_servers[server][0] += 1\n \n return", "def set_state(self, state):\n #print(\"ComponentBase.set_state\")\n for k,v in state.items():\n #print(\" Set {:14s} to {:s}\".format(k,str(v)))\n if k == \"connectors\":\n for con_state in v:\n self.add_connector() \n self.connectors[-1].set_state(con_state)\n else:\n setattr(self, k, v)", "def update(self):\n\n self._state = get_balance(self.addresses)", "def _increment_state(self, bytes_read):\n self._read_state[StateKey.POSITION] += bytes_read", "def increment_register_index(self) -> None:\n self._parent_node.increment_register_index()", "def host_num(self, host_num):\n\n self._host_num = host_num", "def process(self, container):\n pass;", "def update_host_ovs(self, context):\n LOG.info(_('Updating Open vSwitch host data...'))\n LOG.debug(\"Current DOM: %s\" % self.current_dom.to_dict())\n LOG.debug(\"Requested DOM: %s\" % self.desired_dom.to_dict())\n\n builder = mob.MicroOperationBuilder(context,\n self.current_dom,\n self.desired_dom,\n self.rollback)\n\n mo_list = builder.get_micro_ops_for_update()\n\n # run validation\n return self._run_micro_op_list(mo_list)", "def compute_container_lines(self):\n for order in self:\n order.containers_count = len(order.container_line_ids.mapped('container_id'))" ]
[ "0.6660293", "0.6045445", "0.561173", "0.5611655", "0.5515509", "0.54336005", "0.5417022", "0.5400421", "0.53642195", "0.53447664", "0.5336348", "0.5238454", "0.5179859", "0.5147485", "0.5103677", "0.5096942", "0.50933737", "0.50852305", "0.50512236", "0.50192106", "0.4984766", "0.4969593", "0.49625477", "0.49305606", "0.49217126", "0.49146575", "0.487262", "0.48463398", "0.48402005", "0.4835365", "0.4827958", "0.48262036", "0.4816843", "0.481468", "0.47968006", "0.47779968", "0.47706252", "0.4767408", "0.47622952", "0.47622445", "0.47460163", "0.47363734", "0.4727968", "0.47274956", "0.47155997", "0.47049886", "0.47049886", "0.46895126", "0.46810952", "0.46593913", "0.4651446", "0.4641192", "0.46394998", "0.46346968", "0.4630553", "0.4624764", "0.46187976", "0.46116996", "0.46069956", "0.45976007", "0.45902306", "0.45810473", "0.45784035", "0.45763257", "0.45740953", "0.45563883", "0.45547166", "0.4553284", "0.45528394", "0.45512795", "0.45512795", "0.4537954", "0.4537488", "0.4530428", "0.4526455", "0.45246163", "0.45191848", "0.45189747", "0.4514839", "0.4513535", "0.45130703", "0.45098683", "0.45028168", "0.45021984", "0.45014262", "0.4497786", "0.4493969", "0.44907033", "0.44905612", "0.44821653", "0.44810173", "0.4479246", "0.44718236", "0.446372", "0.44566897", "0.4456413", "0.44463915", "0.44411764", "0.44399995", "0.4436047", "0.44339642" ]
0.0
-1
Set updated time of HostState when consuming succeed.
def set_update_time_on_success(function): @functools.wraps(function) def decorated_function(self, container): return_value = None try: return_value = function(self, container) except Exception as e: # Ignores exception raised from consume_from_request() so that # booting container would fail in the resource claim of compute # node, other suitable node may be chosen during scheduling retry. LOG.warning("Selected host: %(host)s failed to consume from " "container. Error: %(error)s", {'host': self.hostname, 'error': e}) else: self.updated = timeutils.utcnow() return return_value return decorated_function
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def host_up(self, host):\n with self.cond:\n if self.state is not None:\n LOG.warning(\"host_up called, but we think host is already up\")\n self._host_down()\n\n # Wait until all operations using a previous state generation are\n # complete before initialising a new one. Note that self.state is\n # already None, set either by initialisation or by host_down. This\n # means the current state will not be returned to any new callers,\n # and use_count will eventually reach zero.\n # We do this to avoid a race between _HostMountState initialisation\n # and an on-going mount/unmount operation\n while self.use_count != 0:\n self.cond.wait()\n\n # Another thread might have initialised state while we were\n # wait()ing\n if self.state is None:\n LOG.debug('Initialising _HostMountState generation %(gen)i',\n {'gen': self.generation})\n self.state = _HostMountState(host, self.generation)\n self.generation += 1", "def _update(self, host):\n pass", "def update_isolation(self, time: int):", "def updateState(self):\n self.state = self.microgridPolicy.computeState();", "def _delay_update(now):\n _LOGGER.debug(\n \"%s Called delayed (%ssec) update\", self._name, self._delay\n )\n self.schedule_update_ha_state()\n self._timer = None", "def _idle(self):\n # self._purge_timedout()\n # ...", "def state_sys_time(self, state_sys_time):\n self._state_sys_time = state_sys_time", "def _mark_fresh(self):\n if self._is_stale:\n self._logger.debug(\"%s: transition to fresh\", self.ping_address)\n self.on_fresh()\n self._is_stale = False", "def trigger(self):\n # Update current state.\n self.current_inst = self.future_inst\n self.current_timer = self.future_timer\n # Initialize future state.\n self.future_inst = self.current_inst\n self.future_timer = max(0, self.current_timer - 1)", "def test_update_with_target_state(self):\n self.switch._target_state = True\n self.port.data = {}\n self.port.data[\"output\"] = \"stale\"\n self.switch.update()\n assert 1.0 == self.port.data[\"output\"]\n assert self.switch._target_state is None\n self.port.data[\"output\"] = \"untouched\"\n self.switch.update()\n assert \"untouched\" == self.port.data[\"output\"]", "def pre_step(self,status):\n self.t0 = time.time()\n pass", "def update_callback(self):\n self.schedule_update_ha_state(True)", "def update_callback(self):\n self.schedule_update_ha_state(True)", "def set_to_slow(self):\n self.set_remote_status(1)\n logging.info(__name__ + ' : Setting Helium Probe in SLOW rate')\n self._execute('S1')\n self.set_remote_status(3)", "def update(self):\n self._state = 23", "def _update_callback(self) -> None:\n self.async_schedule_update_ha_state(force_refresh=True)", "def _update_callback(self) -> None:\n self.async_schedule_update_ha_state(force_refresh=True)", "def update(self, dt):\n\n self.state_stack.update(dt)\n if self.state_stack.peek().quit:\n self.finish = True", "async def wait_for_state(self):\n await self.state_got.wait()\n assert self.time_step == self.rl_agent.current_round\n self.state_got.clear()", "def _server_poll_expcompleted_(self):\n #print \"class Princeton_CCD function _server_poll_expcompleted_\" \n try:\n last_state = self.polled_running\n except (AttributeError,UnboundLocalError):\n self.polled_running = False\n last_state = False\n self.polled_running = self.query_running()\n if (not bool(last_state) and bool(self.polled_running)):\n self.begin_acq_time = time.time()\n #print self.query_running(), last_state\n #if ((last_state == True) and (self.polled_running == False)): CP\n if (bool(last_state) and not bool(self.polled_running)):\n self.end_acq_time = time.time()\n return True\n else:\n return False", "def _set_timed_state(self, state_attribute_name, start_time_attribute_name, execution_time_attribute_name,\n new_state):\n current_state = getattr(self, state_attribute_name)\n if current_state == STATE_NOT_STARTED and new_state == STATE_RUNNING:\n setattr(self, start_time_attribute_name, datetime.utcnow())\n\n if current_state == STATE_RUNNING and new_state in [STATE_COMPLETE, STATE_FAILED]:\n execution_time = datetime.utcnow() - getattr(self, start_time_attribute_name)\n execution_time = (execution_time.days * 3600 * 24) + \\\n execution_time.seconds\n setattr(self, execution_time_attribute_name, execution_time)\n\n setattr(self, state_attribute_name, new_state)", "def update_timeval(self):\n self.timeval = self.get_timeval()", "def update(self):\n super().update()\n self.checkTimeToLive()", "async def after_update_callback(self, _: XknxConnectionState) -> None:\n self.async_write_ha_state()", "def suspend(host=None,time=10):\r\n if host:\r\n host.suspend(time)", "def _on_state_update(self) -> None:\n super()._on_state_update()\n self._set_futures(True)", "def update_time(self):\n pass # Do nothing", "async def async_update(self) -> None:\n if not self.last_seen:\n return\n if self.location_name:\n self._state = self.location_name\n elif self.gps is not None and self.source_type == SourceType.GPS:\n zone_state = zone.async_active_zone(\n self.hass, self.gps[0], self.gps[1], self.gps_accuracy\n )\n if zone_state is None:\n self._state = STATE_NOT_HOME\n elif zone_state.entity_id == zone.ENTITY_ID_HOME:\n self._state = STATE_HOME\n else:\n self._state = zone_state.name\n elif self.stale():\n self.mark_stale()\n else:\n self._state = STATE_HOME\n self.last_update_home = True", "def _on_band_timer(self):\n self._update_band_state()", "def async_update_state(self, state):\n _LOGGER.debug(\"state=%s\", state)\n self._state = state\n self.async_write_ha_state()", "def _update_handler(self, state):\n self._schedule_remaining_events()", "def update(self, time):\n raise NotImplementedError", "def update(self, time):\n raise NotImplementedError", "def run(self):\n while self._should_run and not self._done_check(self._state):\n try:\n state = self._state.update()\n # Catch any Exception but let any BaseException be raised\n except Exception as error:\n state = self._state.recover(error)\n self._state = state\n if state:\n time.sleep(float(self._state.update_period))", "def update_state(self, elapsed_time):\n if self.state == INITIAL_BUFFERING_STATE:\n# config_pytomo.LOG.debug('State: INITIAL_BUFFERING_STATE')\n# config_pytomo.LOG.debug('current_time=%s; Total_bytes=%s' %\n# (self.current_time, self._total_bytes))\n if (self.flv_timestamp > config_pytomo.INITIAL_BUFFER):\n self.state = PLAYING_STATE\n if config_pytomo.DEMO:\n config_pytomo.LOG.info('\\n\\nStart\\n')\n self.start_playback = elapsed_time\n self.initial_data = self._total_bytes\n try:\n self.initial_rate = (self.initial_data * 8\n / self.current_time / 1000)\n except ZeroDivisionError:\n self.initial_rate = 0\n elif self.state == PLAYING_STATE:\n# config_pytomo.LOG.debug('State: PLAYING_STATE')\n# config_pytomo.LOG.debug('current_time=%s; Total_bytes=%s' %\n# (self.current_time, self._total_bytes))\n self.accumulated_playback = self.flv_timestamp\n video_playback_time = (self.current_time - self.start_playback -\n self.accumulated_buffer)\n #print (\"PLaying state\", self.flv_timestamp, video_playback_time,\n #self.accumulated_buffer)\n if ((self.flv_timestamp - video_playback_time)\n < config_pytomo.MIN_PLAYOUT_BUFFER):\n self.state = BUFFERING_STATE\n self.interruptions += 1\n if config_pytomo.DEMO:\n config_pytomo.LOG.info('\\n\\nInterruption\\n')\n #import pdb; pdb.\n elif self.state == BUFFERING_STATE:\n# config_pytomo.LOG.debug('State: BUFFERING_STATE')\n# config_pytomo.LOG.debug('current_time=%s; Total_bytes=%s' %\n# (self.current_time, self._total_bytes))\n self.accumulated_buffer += elapsed_time\n video_playback_time = (self.current_time - self.start_playback -\n self.accumulated_buffer)\n #print \"BUFFERING_STATE \", self.flv_timestamp, video_playback_time\n if (self.flv_timestamp - video_playback_time\n > config_pytomo.MIN_PLAYOUT_RESTART):\n self.state = PLAYING_STATE\n if config_pytomo.DEMO:\n config_pytomo.LOG.info('\\n\\nRestart\\n')", "def time_updated(self, time_updated):\n self._time_updated = time_updated", "async def async_added_to_hass(self):\n # If not None, we got an initial value.\n await super().async_added_to_hass()\n if self._state is not None:\n return\n\n state = await self.async_get_last_state()\n self._state = state and state.state == STATE_ON", "def update(self):\n self._state = get_local_ip()", "def update(self, time=None):\n if self.realtime:\n return\n if time is None: # clock in externally-clocked mode, need valid time\n return\n self._time = time", "def _controller_state_timer_cb(self):\n self._controller_state_pub.publish(str(self.controller_state))\n self._behavior_profile_pub.publish(str(self.behavior_profile))\n self._phone_support_pub.publish(not self.phone_link.is_stealth())", "def run_out_of_time(self):\n self.out_of_time = True", "def synchronyze_board_and_host_time(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def async_defer_or_update_ha_state(self) -> None:\n if not self.hass.is_running:\n return\n\n self.async_update_group_state()\n self.async_write_ha_state()", "def state_changed(target, new_value, old_value, initiator):\n\n if (new_value == _WorkState.RUNNING and\n (old_value not in [_WorkState.RUNNING, _WorkState.PAUSED] or\n target.time_started == None)):\n target.time_started = datetime.utcnow()\n target.time_finished = None\n\n elif new_value in (_WorkState.DONE, _WorkState.FAILED):\n target.time_finished = datetime.utcnow()", "async def test_state_update(hass: HomeAssistant) -> None:\n await init_integration(hass)\n\n state = hass.states.get(\"sensor.home_cloud_ceiling\")\n assert state\n assert state.state != STATE_UNAVAILABLE\n assert state.state == \"3200.0\"\n\n future = utcnow() + timedelta(minutes=60)\n\n current_condition = load_json_object_fixture(\n \"accuweather/current_conditions_data.json\"\n )\n current_condition[\"Ceiling\"][\"Metric\"][\"Value\"] = 3300\n\n with patch(\n \"homeassistant.components.accuweather.AccuWeather.async_get_current_conditions\",\n return_value=current_condition,\n ), patch(\n \"homeassistant.components.accuweather.AccuWeather.requests_remaining\",\n new_callable=PropertyMock,\n return_value=10,\n ):\n async_fire_time_changed(hass, future)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"sensor.home_cloud_ceiling\")\n assert state\n assert state.state != STATE_UNAVAILABLE\n assert state.state == \"3300\"", "def update(self):\n self._state = status\n attributes['host'] = host\n attributes['port'] = port\n self.custom_attributes = attributes", "def update(self, current_time, *args):\n self.blockers = self.set_blockers()\n self.current_time = current_time\n state_function = self.state_dict[self.state]\n state_function()\n self.location = self.get_tile_location()", "def update_host_heartbeat(self, hostname: str) -> bool:\n with self.lock:\n try:\n host = Query()\n self.hosts.update({'latest_recv': datetime.now().strftime(self.time_format)},\n host.hostname.matches(hostname))\n return True\n except Exception as err:\n raise UpdateError('Cannot update latest_recv of host with hostname={}'.format(hostname), err)", "async def update_state(self, state: dict):\n self.last_update = current_time() * 1000\n self.last_position = state.get('position', 0)\n self.position_timestamp = state.get('time', 0)\n\n try:\n await self.update_title()\n except Exception: # I don't want the task to finish because of a stupid thing\n pass\n\n event = PlayerUpdateEvent(\n self, self.last_position, self.position_timestamp)\n await self.node._dispatch_event(event)", "async def async_added_to_hass(self) -> None:\n await self.async_base_added_to_hass()\n state = await self.async_get_last_state()\n if state and state.attributes.get(ATTR_TEMPERATURE):\n self._attr_target_temperature = float(state.attributes[ATTR_TEMPERATURE])", "def test_timeout_loop(self):\n\n self.assertIsNone(self.state.becomeCandidateTimeout)\n # This should do nothing\n self.state.cancelBecomeCandidateTimeout()\n self.assertIsNone(self.state.becomeCandidateTimeout)\n\n results = self.state.begin()\n\n self.state.resetElectionTimeout()\n self.assertTrue(self.state.electionTimeout > 0.150)\n self.assertTrue(self.state.electionTimeout < 0.350)\n self.assertTrue(isinstance(self.state.becomeCandidateTimeout,\n base.DelayedCall))", "async def async_added_to_hass(self):\n # If not None, we got an initial value.\n await super().async_added_to_hass()\n state = await self.async_get_last_state()\n if not state:\n return\n self._state = state.state == \"on\"", "def update(self, t):\n self.state.send(t)", "def update(self):\r\n self._state = self._dev.state", "async def async_update(self):\n await self.hass.async_add_job(self.hdc1000_client.update)\n if self.type == SENSOR_TEMP:\n temperature = round(self.hdc1000_client.temperature, 1)\n if self.temp_unit == TEMP_FAHRENHEIT:\n temperature = round(celsius_to_fahrenheit(temperature), 1)\n self._state = temperature\n elif self.type == SENSOR_HUMID:\n self._state = round(self.hdc1000_client.humidity, 1)", "def set_status_update_waiter_shutdown(self):\n self.set_state(CHANNEL_MOVE_STATE_CLIENT_SHUTDOWN)\n self.set_status_update_waiter()", "def set_lock_time():\n\n pass", "def testSettled(self):\n self.injectEvent(safe.Settling.SETTLED)\n self.assertCurrentState(safe.Grabbing)", "def init_host(self, host=socket.gethostname()):\n ctxt = context.get_admin_context()\n\n LOG.debug('Hostname: %s' % (host,))\n LOG.debug('Instances: %s' % (db.instance_get_all_by_host(ctxt, host)))\n \n for instance in db.instance_get_all_by_host(ctxt, host):\n try:\n LOG.debug('Checking state of %s' % instance['name'])\n state = self.get_info(instance['name'])['state']\n except exception.NotFound:\n state = power_state.SHUTOFF\n\n LOG.debug('Current state of %s was %s.' %\n (instance['name'], state))\n db.instance_set_state(ctxt, instance['id'], state)\n\n if state == power_state.SHUTOFF:\n db.instance_destroy(ctxt, instance['id'])\n\n if state != power_state.RUNNING:\n continue", "def _status(self, host):\n pass", "def _ping(self):\n\n self.last_ping = time.time()\n try:\n logger.debug(\"(%s) PING\", self.device[\"ip\"])\n _send_request(self.device, tf.HEART_BEAT)\n except socket.error:\n self.force_reconnect = True", "def _update_state(self) -> None:\n raise NotImplementedError(\"\")", "def state_wait_enter(cfg, app, win):", "def _handle_coordinator_update(self) -> None:\n self._thermostat = self.coordinator.data[self._thermostat.serial_number]\n self.async_write_ha_state()", "def __setstate__(self, state):\n self.__dict__ = state\n self.get_esoh_solver = lru_cache()(self._get_esoh_solver)", "def time_thread(self):\n while self.time > 0:\n t.sleep(1)\n self.time -= 1\n self.end_round(\"Time is up\")", "def update(self):\n self._state = self._state", "def update(self,dt):\n #print self._state\n if self._state == STATE_INACTIVE:\n self._inactive()\n elif self._state == STATE_COUNTDOWN:\n self._countdown()\n elif self._state == STATE_PAUSED:\n self._paused()\n elif self._state == STATE_ACTIVE:\n self._active()\n elif self._state == STATE_RESET:\n self._reset()\n elif self._state == STATE_COMPLETE:\n self._complete()", "def _async_update_humidity(self, state):\n try:\n self.ccs811_client.set_humidity(float(state.state))\n except ValueError as ex:\n _LOGGER.error(\"Unable to update from sensor: %s\", ex)", "def set_power_state(self, task, pstate):\n _set_and_wait(task, pstate)", "def set_to_fast(self):\n self.set_remote_status(1)\n logging.info(__name__ + ' : Setting Helium Probe in FAST rate')\n self._execute('T1')\n self.set_remote_status(3)", "def host_status_control(self, host_status_control):\n\n self._host_status_control = host_status_control", "def __enter__(self):\n self.start = timeit.default_timer()", "async def _timein_refresh(self):\n\t\t\n\t\tawait self.refresh_cache()", "async def async_update(self) -> None:\n if (\n self.device.appliance.status.get(BSH_POWER_STATE, {}).get(ATTR_VALUE)\n == BSH_POWER_ON\n ):\n self._state = True\n elif (\n self.device.appliance.status.get(BSH_POWER_STATE, {}).get(ATTR_VALUE)\n == self.device.power_off_state\n ):\n self._state = False\n elif self.device.appliance.status.get(BSH_OPERATION_STATE, {}).get(\n ATTR_VALUE, None\n ) in [\n \"BSH.Common.EnumType.OperationState.Ready\",\n \"BSH.Common.EnumType.OperationState.DelayedStart\",\n \"BSH.Common.EnumType.OperationState.Run\",\n \"BSH.Common.EnumType.OperationState.Pause\",\n \"BSH.Common.EnumType.OperationState.ActionRequired\",\n \"BSH.Common.EnumType.OperationState.Aborting\",\n \"BSH.Common.EnumType.OperationState.Finished\",\n ]:\n self._state = True\n elif (\n self.device.appliance.status.get(BSH_OPERATION_STATE, {}).get(ATTR_VALUE)\n == \"BSH.Common.EnumType.OperationState.Inactive\"\n ):\n self._state = False\n else:\n self._state = None\n _LOGGER.debug(\"Updated, new state: %s\", self._state)", "async def async_added_to_hass(self):\n self._device.register_update_callback(self.async_schedule_update_ha_state)", "async def _wait_setheist(self, ctx, seconds: int):\r\n guild = ctx.guild\r\n config = await self.thief.get_guild_settings(guild)\r\n theme = await self.thief.config.guild(guild).Theme()\r\n t_crew = theme[\"Crew\"]\r\n\r\n if seconds > 0:\r\n config[\"Wait\"] = seconds\r\n await self.thief.config.guild(guild).Config.set(config)\r\n time_fmt = self.thief.time_format(seconds)\r\n msg = \"Setting {} gather time to {}.\".format(t_crew, time_fmt)\r\n else:\r\n msg = \"Need a number higher than 0.\"\r\n await ctx.send(msg)", "async def refresh_entity_state(self):", "def mark_stale(self) -> None:\n self._state = STATE_NOT_HOME\n self.gps = None\n self.last_update_home = False", "async def async_added_to_hass(self):\n # If not None, we got an initial value.\n await super().async_added_to_hass()\n state = await self.async_get_last_state()\n \n if state is not None:\n\n if state.state is not None:\n self._attr_native_value = state.state\n self._attr_state = state.state\n self._hass.data[DOMAIN][get_gas_tariff_override_key(self._serial_number, self._mprn)] = self._attr_native_value\n \n self._attributes = {}\n for x in state.attributes.keys():\n self._attributes[x] = state.attributes[x]\n \n _LOGGER.debug(f'Restored OctopusEnergyPreviousAccumulativeGasCostTariffOverride state: {self._attr_state}')", "async def _async_force_refresh_state(self):\n await self._shade.refresh()\n self._async_update_current_cover_position()\n self.async_write_ha_state()", "def _set_environment(self) -> None:\n last_update_time = time.time()\n while True:\n # The 'math' in the next line keeps the refresh intervals more regular since the update takes time to\n # complete.\n time.sleep(REFRESH_INTERVAL - (time.time() - last_update_time)) # REFRESH_INTERVAL - ELAPSED_TIME\n last_update_time = time.time()\n with self.lock:\n if self.desired_environment:\n self._update_environment(self.desired_environment)", "def test_update_state(self):\n pass", "def timer_update(self):\n if self.mineboard.gamestate is not None:\n return\n time_so_far = round(time.time()-self.start_time)\n if time_so_far == 1:\n self.now.set(f\"Time so far: {time_so_far} second\")\n else:\n self.now.set(f\"Time so far: {time_so_far} seconds\")\n self.after(1000, self.timer_update) # calls this function every second", "def test_update_instances_schedule_state(self):\n pass", "def _wait_for_completion(self):\n if self.do_timing:\n self.timer.start(\"Running.\")\n\n while self.state != State.COMPLETED:\n self._update_state()\n\n if self.do_timing:\n self.timer.stop()", "def refresh(self):\n\n self._refreshed_on = time.time() * 1000", "def valkkafsmanager_set_time_cb(self, t):\n self.signals.set_time.emit(t)", "def service( self ):\n\n self.alive = time.time()", "def settimeout(self,timeout=10):\r\n # Update\r\n self.timeout = timeout", "def test_swact_fails_when_host_query_fails(self):\n\n # mock the get_host query is empty and raises an exception\n self.sysinv_client.get_host.side_effect = \\\n Exception(\"Unable to find host controller-0\")\n\n # invoke the strategy state operation on the orch thread\n self.worker.perform_state_action(self.strategy_step)\n\n # verify the swact command was never attempted\n self.sysinv_client.swact_host.assert_not_called()\n\n # verify that the state moves to the next state\n self.assert_step_updated(self.strategy_step.subcloud_id,\n consts.STRATEGY_STATE_FAILED)", "async def _async_update_data(self) -> None:\n try:\n if not self.device:\n self.device = await async_wifi_bulb_for_host(self.hass, self.host)\n else:\n await self.hass.async_add_executor_job(self.device.update_state)\n except FLUX_LED_EXCEPTIONS as ex:\n raise UpdateFailed(ex) from ex\n\n if not self.device.raw_state:\n raise UpdateFailed(\"The device failed to update\")", "def _async_update_temperature(self, state):\n try:\n self.ccs811_client.set_temperature(float(state.state))\n except ValueError as ex:\n _LOGGER.error(\"Unable to update from sensor: %s\", ex)", "def prepareflow(self):\r\n self.time = 0\r\n #self.timeout = timeout\r\n Dummy = things(0,0,0)\r\n Dummy.threadqueue.append(-1)\r\n Dummy.s=self\r\n Dummy.name = 'End of simulation.'\r\n heappush(self.queue, (self.timeout, (Dummy, 1000)))\r\n #while true:\r\n #(now, (item, i)) = heappop(self.queue)\r\n #if now >= timeout: break\r", "def set_state(self, state_dict: Dict) -> None:\n self._state_waiting_to_be_consumed.update(state_dict)", "async def _timeToRun(self) -> None:\n d: Deferred[None] = Deferred()\n self._waiting.append(d)\n await d", "def after_tick(self, time):\n pass", "async def async_update(self):\n await self.hass.async_add_executor_job(self._client.update)\n if self._client.sensor.sample_ok:\n if self._variable == SENSOR_TEMPERATURE:\n value = round(self._client.sensor.temperature, 1)\n if self.unit_of_measurement == TEMP_FAHRENHEIT:\n value = celsius_to_fahrenheit(value)\n else:\n value = round(self._client.sensor.humidity, 1)\n self._state = value\n else:\n _LOGGER.warning(\"Bad sample\")", "def async_update_stale(self, now: datetime) -> None:\n for device in self.devices.values():\n if (device.track and device.last_update_home) and device.stale(now):\n self.hass.async_create_task(device.async_update_ha_state(True))", "def watchdog_timer(state, wait=3):\n time.sleep(wait)\n if not state['completed']:\n _thread.interrupt_main()" ]
[ "0.6223716", "0.61008537", "0.6037427", "0.5854355", "0.568859", "0.5629717", "0.56151396", "0.5608436", "0.5571809", "0.5568824", "0.55633", "0.5542896", "0.5542896", "0.5508925", "0.549049", "0.5477234", "0.5477234", "0.54645914", "0.54598725", "0.5455025", "0.54405177", "0.5433824", "0.5418674", "0.5408871", "0.5394907", "0.53945714", "0.53926414", "0.5386645", "0.538653", "0.5378444", "0.53720754", "0.53519964", "0.53519964", "0.53494334", "0.5339035", "0.53278816", "0.5327173", "0.53250366", "0.5306546", "0.5306348", "0.53026956", "0.5299985", "0.5293389", "0.528944", "0.52820534", "0.52773446", "0.52755713", "0.5275433", "0.5267509", "0.5260403", "0.52583086", "0.52511835", "0.5250839", "0.5246726", "0.5237781", "0.5234691", "0.5234354", "0.52280843", "0.5227773", "0.52268595", "0.5226035", "0.52233917", "0.5199315", "0.51981366", "0.5192237", "0.5188515", "0.51784235", "0.51697093", "0.51653826", "0.5159667", "0.5156788", "0.51566845", "0.51534045", "0.51520264", "0.5150045", "0.5142729", "0.514217", "0.51413095", "0.5139709", "0.51365906", "0.5135539", "0.512949", "0.5127027", "0.5118361", "0.51138204", "0.5111749", "0.510824", "0.51063246", "0.5102449", "0.50945735", "0.5091535", "0.50874525", "0.5081085", "0.5073666", "0.5070929", "0.50692415", "0.5068765", "0.50679696", "0.5067621", "0.50669295" ]
0.5197365
64
The uri returned from request.uri is not properly urlencoded (sometimes it's partially urldecoded) This is a weird hack to get werkzeug to return the proper urlencoded string uri
def _get_uri_from_request(request): uri = request.base_url if request.query_string: uri += '?' + request.query_string.decode('utf-8') return uri
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _urlnorm(self, uri):\r\n (scheme, authority, path, query, fragment) = parse_uri(uri)\r\n if not scheme or not authority:\r\n raise Exception(\"Only absolute URIs are allowed. uri = %s\" % uri)\r\n authority = authority.lower()\r\n scheme = scheme.lower()\r\n if not path:\r\n path = \"/\"\r\n\r\n # Could do syntax based normalization of the URI before\r\n # computing the digest. See Section 6.2.2 of Std 66.\r\n request_uri = query and \"?\".join([path, query]) or path\r\n scheme = scheme.lower()\r\n defrag_uri = scheme + \"://\" + authority + request_uri\r\n\r\n return defrag_uri", "def uri(self):\n parts = []\n # if I have a scheme\n if self.scheme: parts.append('{}:'.format(self.scheme))\n # if I have an authority\n if self.authority: parts.append('//{}'.format(self.authority))\n # if I have an address\n if self.address: parts.append('{}'.format(self.address))\n # if I have a query\n if self.query: parts.append('?{}'.format(self.query))\n # if I have a fragment\n if self.fragment: parts.append('#{}'.format(self.fragment))\n # assemble and return\n return ''.join(parts)", "def _base_uri(self) -> str:\n if self.use_original_uri:\n header_value = self.use_original_uri.get(\"header_value\")\n conditions = self.use_original_uri.get(\"claim_conditions\")\n if conditions.get(\"any\"):\n uri = self.request.headers.get(header_value)\n else:\n key = self.claims.get(conditions.get(\"claim_key\"))\n val = self.claims.get(conditions.get(\"claim_value\"))\n if self.claims.get(key) == val:\n uri = self.request.headers.get(header_value)\n else:\n uri = self.request.uri\n else:\n uri = self.request.uri\n if not uri:\n uri = self.request.uri\n return uri.split(\"?\")[0]", "def get_url():\n if os.environ['SERVER_PORT'] == '80':\n scheme = 'http://'\n else:\n scheme = 'https://'\n host = os.environ['SERVER_NAME']\n script_name = urllib.quote(os.environ.get('SCRIPT_NAME', ''))\n path_info = urllib.quote(os.environ.get('PATH_INFO', ''))\n qs = os.environ.get('QUERY_STRING', '')\n if qs:\n qs = '?' + qs\n return scheme + host + script_name + path_info + qs", "def cleanUri(uri):\n if not uri.startswith(\"/\") and not uri.startswith('http'):\n uri = \"/\" + uri\n\n if 'http://' in uri or 'https://' in uri:\n uri = uri.split('://')[0] + '://' + \\\n uri.split('://')[1].replace(\"//\", \"/\")\n else:\n uri = uri.replace(\"//\", \"/\")\n\n if uri.endswith(\"/\"):\n uri = uri[:-1]\n\n return uri", "def get_correct_url(request: flask.Request) -> str:\n\n parsed_url = urlparse(request.url_root)\n request_scheme = request.headers.get('X-Scheme')\n if request_scheme is not None:\n # use the same scheme that the request used\n return parsed_url._replace(scheme=request_scheme).geturl()\n elif parsed_url.scheme == \"http\" and \"localhost\" not in parsed_url.netloc:\n # if the request scheme is unknown use https unless we're referring\n # to localhost\n return parsed_url._replace(scheme=\"https\").geturl()\n else:\n # give up and don't make any changes\n return request.url_root", "def __redirect_uri(self):\n uri = '%s://%s%s' % (request.scheme, request.hostname,\n request.path_info)\n if request.get_vars:\n uri += '?' + urlencode(request.get_vars)\n return uri", "def quote_uri(uri):\n import urlparse\n import urllib\n\n up=urlparse.urlparse(uri)\n np=urllib.quote(up[2])\n return urlparse.urlunparse((up[0],up[1],np,up[3],up[4],up[5]))", "def normalize_uri(uri):\n if isinstance(uri, str):\n uri = uri.decode('utf-8')\n return uri.strip().replace(u' ', u'_')", "def test_unicode(self):\n iri = u'http://localhost/expos\\xe9?doppelg\\xe4nger=Bryan O\\u2019Sullivan#r\\xe9sum\\xe9'\n uri = b'http://localhost/expos%C3%A9?doppelg%C3%A4nger=Bryan%20O%E2%80%99Sullivan#r%C3%A9sum%C3%A9'\n self.assertEqual(flatten(url.URL.fromString(iri)), uri)", "def full_url(self):\n return \"%s://%s%s\" % (self.protocol, self.host, self.uri)", "def raw_url(self) -> str:\n return self.url_as(raw=True)", "def uri(self, path):\n path = ensure_slash(path)\n return 'http://127.0.0.1:%d%s' % (self.port, path)", "def __str__(self):\r\n self.query = urllib.urlencode(self.args)\r\n self.query = urllib.unquote(self.query)\r\n return urlparse.urlunparse((self.scheme, self.netloc, self.path, self.params, self.query, self.fragment))", "def url(self, request_path=\"\"):\n return f\"{self.scheme}://{self.host}/{request_path}\"", "def iri2uri(uri): \r\n if isinstance(uri ,unicode):\r\n (scheme, authority, path, query, fragment) = urlparse.urlsplit(uri)\r\n authority = authority.encode('idna')\r\n # For each character in 'ucschar' or 'iprivate'\r\n # 1. encode as utf-8\r\n # 2. then %-encode each octet of that utf-8 \r\n uri = urlparse.urlunsplit((scheme, authority, path, query, fragment))\r\n uri = \"\".join([encode(c) for c in uri])\r\n return uri", "def get_url(self, uri):\n # TODO make this a prepend_if_needed type method\n return urllib.parse.urljoin(self.hostname, uri)", "def build_url(app, request):\n return '%s%s' % (app.url_root, request.path[1:])", "def build_url(app, request):\n return '%s%s' % (app.url_root, request.path[1:])", "def get_uri(self):\n if self._uri is None:\n self._uri = \"{0}{1}/{2}\".format(\n self.session.resource_prefix,\n self.base_uri,\n self.ip_or_ifname_or_group_name,\n )\n\n return self._uri", "def to_url(request):\r\n scheme, netloc, path, query, fragment = urlsplit(to_utf8(request.url))\r\n query = parse_qs(query)\r\n\r\n for key, value in request.data_and_params.iteritems():\r\n query.setdefault(key, []).append(value)\r\n\r\n query = urllib.urlencode(query, True)\r\n return urlunsplit((scheme, netloc, path, query, fragment))", "def full_uri(path):\n protocol = 'https' if settings.USE_HTTPS else 'http'\n domain = Site.objects.get_current().domain\n return \"{}://{}{}\".format(protocol, domain, path)", "def _encode_url(full_url):\n return urllib.parse.quote(full_url, safe=\"%/:=&?~#+!$,;'@()*[]|\")", "def urlsafe(self):\n # This is 3-4x faster than urlsafe_b64decode()\n urlsafe = base64.b64encode(self.reference().Encode())\n return urlsafe.rstrip('=').replace('+', '-').replace('/', '_')", "def test_unicode_query_string():\n assert (normalize_url(\"http://example.com/?file=résumé.pdf\") ==\n \"http://example.com/?file=r%C3%A9sum%C3%A9.pdf\")", "def get_full_url(self, url):\n param_str = self.request.GET.urlencode()\n request_url = u'%s%s' % (self.base_url, url)\n request_url += '?%s' % param_str if param_str else ''\n return request_url", "def __str__(self):\n if self._str is None:\n # special cases\n if self == URI.INVALID():\n self._str = \"[invalid]\"\n elif self == URI.EMPTY():\n self._str = \"\"\n elif self == URI.INLINE():\n self._str = \"[inline]\"\n elif self == URI.EVAL():\n self._str = \"[eval]\"\n elif not self._isEmpty(self._scheme) and self._isEmpty(self._host) and self._isEmpty(self._port) and self._isEmpty(self._path) and self._isEmpty(self._query):\n self._str = self._scheme + \":\"\n else:\n self._str = \"\"\n if self._scheme in defaults.schemesWithNoDoubleSlash:\n self._str += self._scheme + \":\"\n elif self._scheme is not None:\n self._str += self._scheme + \"://\"\n \n self._str += self._host\n \n if self._port is not None:\n self._str += \":\" + str(self._port)\n \n if self._path is not None:\n self._str += urllib.quote(self._path.encode('utf8')).decode('ascii')\n \n if self._query is not None:\n self._str += \"?\" + self._query\n return self._str", "def uri_string(self):\n if isinstance(self.entity, int):\n uri_string = \"{{{0}}}\".format(self.entity)\n elif isinstance(self.entity, NodePointer):\n uri_string = \"{{{0}}}\".format(self.entity.address)\n else:\n try:\n uri_string = self.entity.ref\n except AttributeError:\n uri_string = ustr(self.entity)\n if self.segments:\n if not uri_string.endswith(\"/\"):\n uri_string += \"/\"\n uri_string += \"/\".join(map(percent_encode, self.segments))\n return uri_string", "def _full_url(url, _params={}):\n\n # Support for unicode domain names and paths.\n scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)\n\n if not scheme:\n raise ValueError(\"Invalid URL %r: No schema supplied\" % url)\n\n netloc = netloc.encode('idna')\n\n if isinstance(path, unicode):\n path = path.encode('utf-8')\n\n path = requote_path(path)\n\n url = str(urlparse.urlunparse([scheme, netloc, path, params, query,\n fragment]))\n\n if _params:\n if urlparse.urlparse(url).query:\n return '%s&%s' % (url, _params)\n else:\n return '%s?%s' % (url, _params)\n else:\n return url", "def getQualifiedURL(uri = None):\n schema, stdport = ('http', '80')\n host = os.environ.get('HTTP_HOST')\n if not host:\n host = os.environ.get('SERVER_NAME')\n port = os.environ.get('SERVER_PORT', '80')\n if port != stdport: host = host + \":\" + port\n result = \"%s://%s\" % (schema, host)\n if uri: result = result + uri\n return result", "def EndpointURI(self):\n return '/'.join(str(x) for x in [self.base_endpoint,self.match,self.resource] if x)", "def get_request_uri(request):\n # DEV: Use django.http.request.HttpRequest._get_raw_host() when available\n # otherwise back-off to PEP 333 as done in django 1.8.x\n if hasattr(request, \"_get_raw_host\"):\n host = request._get_raw_host()\n else:\n try:\n # Try to build host how Django would have\n # https://github.com/django/django/blob/e8d0d2a5efc8012dcc8bf1809dec065ebde64c81/django/http/request.py#L85-L102\n if \"HTTP_HOST\" in request.META:\n host = request.META[\"HTTP_HOST\"]\n else:\n host = request.META[\"SERVER_NAME\"]\n port = str(request.META[\"SERVER_PORT\"])\n if port != (\"443\" if request.is_secure() else \"80\"):\n host = \"{0}:{1}\".format(host, port)\n except Exception:\n # This really shouldn't ever happen, but lets guard here just in case\n log.debug(\"Failed to build Django request host\", exc_info=True)\n host = \"unknown\"\n\n # Build request url from the information available\n # DEV: We are explicitly omitting query strings since they may contain sensitive information\n return parse.urlunparse(\n parse.ParseResult(scheme=request.scheme, netloc=host, path=request.path, params=\"\", query=\"\", fragment=\"\",)\n )", "def normalize_base_string_uri(uri, host=None):\n uri = to_unicode(uri)\n scheme, netloc, path, params, query, fragment = urlparse.urlparse(uri)\n\n # The scheme, authority, and path of the request resource URI `RFC3986`\n # are included by constructing an \"http\" or \"https\" URI representing\n # the request resource (without the query or fragment) as follows:\n #\n # .. _`RFC3986`: https://tools.ietf.org/html/rfc3986\n\n if not scheme or not netloc:\n raise ValueError('uri must include a scheme and netloc')\n\n # Per `RFC 2616 section 5.1.2`_:\n #\n # Note that the absolute path cannot be empty; if none is present in\n # the original URI, it MUST be given as \"/\" (the server root).\n #\n # .. _`RFC 2616 section 5.1.2`: https://tools.ietf.org/html/rfc2616#section-5.1.2\n if not path:\n path = '/'\n\n # 1. The scheme and host MUST be in lowercase.\n scheme = scheme.lower()\n netloc = netloc.lower()\n\n # 2. The host and port values MUST match the content of the HTTP\n # request \"Host\" header field.\n if host is not None:\n netloc = host.lower()\n\n # 3. The port MUST be included if it is not the default port for the\n # scheme, and MUST be excluded if it is the default. Specifically,\n # the port MUST be excluded when making an HTTP request `RFC2616`_\n # to port 80 or when making an HTTPS request `RFC2818`_ to port 443.\n # All other non-default port numbers MUST be included.\n #\n # .. _`RFC2616`: https://tools.ietf.org/html/rfc2616\n # .. _`RFC2818`: https://tools.ietf.org/html/rfc2818\n default_ports = (\n ('http', '80'),\n ('https', '443'),\n )\n if ':' in netloc:\n host, port = netloc.split(':', 1)\n if (scheme, port) in default_ports:\n netloc = host\n\n return urlparse.urlunparse((scheme, netloc, path, params, '', ''))", "def test_path_percent_encoding():\n assert (normalize_url(\"http://example.com/hello world{}\") ==\n \"http://example.com/hello%20world%7B%7D\")", "def base_url(self, code: str, uri: str, protocol=None) -> str:\n protocol, host = self._hostname(code, protocol)\n if protocol == 'https':\n uri = self.ssl_pathprefix(code) + uri\n return urlparse.urljoin(f'{protocol}://{host}', uri)", "def url(self):\n url = os.environ.get('PATH_INFO')\\\n or os.environ.get('REQUEST_URI')\n return url if url else ''", "def get_uri(self):\r\n return self.uri", "def _proper_url(self, url):\n if self.base_url not in url:\n url = self.base_url + url\n url = re.sub(r'(?<!https:)//', '/', url)\n if not url.endswith('/') and '?' not in url:\n url = url + '/'\n if url.endswith('?'):\n url = url[:-1]\n return url", "def full_url(resource):\r\n # if (url/resource == '127.0.0.1':)\r\n if resource == '/' or resource == ' ':\r\n url = \"{0}{1}\".format(ROOT_DIRECTORY, URL_TEST)\r\n # else (if url/resource == 'Specific resource')\r\n else:\r\n url = \"{0}{1}\".format(ROOT_DIRECTORY, str(resource).replace('/', '\\\\'))\r\n print(f'the client request = {url}')\r\n return url", "def _get_url(self, absolute):", "def renderHTTP(self, ctx):\n return url.URL.fromContext(ctx).child('')", "def _build_uri(self, uri_base, params):\n if not params:\n return uri_base\n else:\n uri_extension = \"?\"\n for param in params:\n uri_extension = uri_extension + param + \"&\"\n uri_extension = uri_extension[:-1] # clip off the final & \n uri = uri_base + uri_extension\n return uri", "def test_percent_encode_querystring():\n assert (normalize_url(\"http://example.com/?a=hello{}\") ==\n \"http://example.com/?a=hello%7B%7D\")", "def test_dont_percent_encode_safe_chars_query():\n assert (normalize_url(\"http://example.com/a/?face=(-.-)\") ==\n \"http://example.com/a?face=(-.-)\")", "def UriStrFor(iterated_uri, obj):\n return '%s://%s/%s' % (iterated_uri.scheme, obj.bucket.name, obj.name)", "def uri(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"uri\")", "def uri(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"uri\")", "def uri(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"uri\")", "def unquote(uri):\r\n uri = uri.encode('ascii')\r\n unquoted = urllib_unquote(uri)\r\n return unquoted.decode('utf-8')", "def _uri(helper):\n return '/'.join((\n helper.context_meta['server_uri'],\n 'servicesNS',\n 'nobody',\n 'Splunk_TA_paloalto',\n 'storage',\n 'collections',\n 'data',\n 'minemeldfeeds'))", "def full(self):\n url = (self.scheme + ':') if self.scheme else ''\n url += '//' + self.netloc + self.relative()\n return url", "def get_request_url(environ, use_x_forwarded_for=False):\n # type: (Dict[str, str], bool) -> str\n return \"%s://%s/%s\" % (\n environ.get(\"wsgi.url_scheme\"),\n get_host(environ, use_x_forwarded_for),\n wsgi_decoding_dance(environ.get(\"PATH_INFO\") or \"\").lstrip(\"/\"),\n )", "def get_full_url(request_handler, path):\n pr = urlparse(request_handler.request.url)\n return '%s://%s%s' % (pr.scheme, pr.netloc, path)", "def uri(base, *path, **query):\n if base and base.endswith('/'):\n base = base[:-1]\n retval = [base]\n\n # build the path\n path = '/'.join([''] +\n [unicode_quote(s.strip('/')) for s in path\n if s is not None])\n if path:\n retval.append(path)\n\n # build the query string\n params = []\n for name, value in query.items():\n if type(value) in (list, tuple):\n params.extend([(name, i) for i in value if i is not None])\n elif value is not None:\n if value is True:\n value = 'true'\n elif value is False:\n value = 'false'\n params.append((name, value))\n if params:\n retval.extend(['?', unicode_urlencode(params)])\n\n return ''.join(retval)", "def get_return_to_from_path(req: web.Request) -> str:\n if req.path == \"/login\":\n return urllib.parse.quote(\"/\", safe=\"\")\n\n return urllib.parse.quote(req.path, safe=\"\")", "def uri(self) -> str:\n return self._uri", "def _make_url(self, path):\n if not self.base_location:\n raise ValueError(\"No base_location set. Cannot construct url.\")\n\n if path:\n path = self._normalise_last_slashes(path)\n path = self._normalise_head_slashes(path)\n\n return \"\".join((self.base_location, self.endpoint, path))", "def construct_url(self,*path):\n base = self.request.protocol+\"://\"+self.request.host+\"/\"\n return base+\"/\".join(path)", "def toString(self):\n self.query = {}\n for i in self.arguments:\n self.query[i] = self.arguments[i]\n\n self.query = urlencode(self.query)\n\n return urlparse.urlunsplit((self.scheme, self.netloc,\n self.path, self.query,self.fragment))", "def uri(self):\n return self._uri", "def uri(self):\n return self._uri", "def uri(self):\n return self._uri", "def uri(self):\n return self._uri", "def uri(self):\n return self._uri", "def uri(self):\n return self._uri", "def normalize_uri(uri):\n return normalize_uri_result(uri).unsplit()", "def encode_sm_uri(uri): # TODO is a shit\n\n encoded = uri\n for pat, replace in replaces.items():\n encoded = encoded.replace(pat, replace)\n return encoded", "def test_additional_query_args():\n assert (normalize_url(\"http://example.com?c=d\", [(\"a\", \"b\")]) ==\n \"http://example.com/?a=b&c=d\")\n assert (normalize_url(\"http://example.com\", [(\"a\", \"b\")]) ==\n \"http://example.com/?a=b\")\n assert (normalize_url(\"http://example.com\", [(\"résumé\", \"résumé\")]) ==\n \"http://example.com/?r%C3%A9sum%C3%A9=r%C3%A9sum%C3%A9\")", "def build_uri(\n url: str, query_params: Optional[Dict] = None, fragment: Optional[Dict] = None\n) -> str:\n if query_params is None:\n query_params = {}\n\n if fragment is None:\n fragment = {}\n\n parsed_url = urlparse(url)\n uri = urlunsplit(\n (\n parsed_url.scheme,\n parsed_url.netloc,\n parsed_url.path,\n urlencode(query_params, quote_via=quote), # type: ignore\n urlencode(fragment, quote_via=quote), # type: ignore\n )\n )\n return uri", "def test_path(self):\n urlpath = url.URL.fromString(\"http://example.com/foo/bar?baz=quux#foobar\")\n self.assertEqual(urlpath.path, \"foo/bar\")\n urlpath = url.URL.fromString(\"http://example.com/foo%2Fbar?baz=quux#foobar\")\n self.assertEqual(urlpath.path, \"foo%2Fbar\")\n urlpath = url.URL.fromString(\"http://example.com/-_.!*'()?baz=quux#foo\")\n self.assertEqual(urlpath.path, \"-_.!*'()\")", "def generate_uri(uri):\n return uri[:-5] + uuid.uuid4().hex", "def get_uri(self):\n return self.url", "def test_unicode_path():\n assert (normalize_url(\"http://example.com/résumé\") ==\n \"http://example.com/r%C3%A9sum%C3%A9\")", "def _override_tourl(self):\n base_url = urlparse.urlparse(self.url)\n try:\n query = base_url.query\n except AttributeError:\n # must be python <2.5\n query = base_url[4]\n query = parse_qs(query)\n for k, v in self.items():\n query.setdefault(k, []).append(v)\n\n try:\n scheme = base_url.scheme\n netloc = base_url.netloc\n path = base_url.path\n params = base_url.params\n fragment = base_url.fragment\n except AttributeError:\n # must be python <2.5\n scheme = base_url[0]\n netloc = base_url[1]\n path = base_url[2]\n params = base_url[3]\n fragment = base_url[5]\n\n url = (scheme, netloc, path, params,\n urllib.urlencode(query, True), fragment)\n return urlparse.urlunparse(url)", "def Url(self) -> str:", "def url(self, path=\"/\"):\n return \"http://127.0.0.1:%d/%s\" % (\n self.server_address[1],\n path.lstrip(\"/\"),\n )", "def test_normalize_percent_encoding_in_querystring():\n assert (normalize_url(\"http://example.com/?a=b%c2\") ==\n \"http://example.com/?a=b%C2\")", "def _make_url(self):\n ...", "def GetURL(self, rel_url):\n return 'http://localhost:%d/%s' % (self.port, rel_url)", "def encoded_query_str(request):\n return updated_query_str(request)", "def urlify(board):\n return(board.replace(\" \",\"%20\"))", "def url():\n ...", "def _absurl(fragment):\r\n root = settings.MEDIA_URL\r\n root += root[-1:] != '/' and '/' or ''\r\n return urlparse.urljoin(root, fragment)", "def get_uri(self):\n return self.__uri", "def get_query_string(self):\r\n pass", "def _uri(self):\n raise NotImplementedError", "def geturl(environ, query=True, path=True, use_server_name=False):\n url = [f\"{environ['wsgi.url_scheme']}://\"]\n if use_server_name:\n url.append(environ[\"SERVER_NAME\"])\n if environ[\"wsgi.url_scheme\"] == \"https\":\n if environ[\"SERVER_PORT\"] != \"443\":\n url.append(f\":{environ['SERVER_PORT']}\")\n else:\n if environ[\"SERVER_PORT\"] != \"80\":\n url.append(f\":{environ['SERVER_PORT']}\")\n else:\n url.append(environ[\"HTTP_HOST\"])\n if path:\n url.append(getpath(environ))\n if query and environ.get(\"QUERY_STRING\"):\n url.append(f\"?{environ['QUERY_STRING']}\")\n return \"\".join(url)", "def friendly_uri(environ, start_response):\n _setup_friendly_environ(environ)\n return get_tiddler(environ, start_response)", "def validate_uri(value: Any) -> str:\n uri_value = str(value)\n\n if urlparse(uri_value).scheme == \"tcp\":\n # pylint: disable-next=no-value-for-parameter\n return cast(str, vol.Schema(vol.Url())(uri_value))\n\n raise vol.Invalid(\"invalid Wyoming Protocol URI\")", "def test_unicode():\n create_request(\"/\")\n unicodestring = (u'\\N{LATIN SMALL LETTER A WITH GRAVE}'\n u'\\N{LATIN SMALL LETTER E WITH GRAVE}'\n u'\\N{LATIN SMALL LETTER I WITH GRAVE}'\n u'\\N{LATIN SMALL LETTER O WITH GRAVE}'\n u'\\N{LATIN SMALL LETTER U WITH GRAVE}')\n print url(unicodestring)\n eq_(url('/', x=unicodestring),\n '/?x=%C3%A0%C3%A8%C3%AC%C3%B2%C3%B9'\n )", "def encode_url(self, url):\n # turn string into unicode\n if not isinstance(url, unicode):\n url = url.decode('utf8')\n\n # parse it\n parsed = urlsplit(url)\n\n # divide the netloc further\n netloc_pattern = re.compile(r\"\"\"\n (?:(?P<user>[^:@]+)(?::(?P<password>[^:@]+))?@)?\n (?P<host>[^:]+)\n (?::(?P<port>[0-9]+))?\n \"\"\", re.X | re.U)\n netloc_parsed = netloc_pattern.match(parsed.netloc).groupdict()\n\n # encode each component\n scheme = parsed.scheme\n user = netloc_parsed['user'] and quote(netloc_parsed['user'])\n password = (netloc_parsed['password'] and\n quote(netloc_parsed['password']))\n host = netloc_parsed['host']\n port = netloc_parsed['port'] and netloc_parsed['port']\n path = '/'.join( # could be encoded slashes!\n quote(unquote(pce).encode('utf8'), '')\n for pce in parsed.path.split('/')\n )\n query = quote(unquote(parsed.query), '=&?/')\n fragment = quote(unquote(parsed.fragment))\n\n # put it back together\n netloc = ''\n if user:\n netloc += user\n if password:\n netloc += ':' + password\n netloc += '@'\n netloc += host\n if port:\n netloc += ':'+port\n return urlunsplit((scheme, netloc, path, query, fragment))", "def normalize_url(url):\n # print(url)\n if not url.startswith('http://') and not url.startswith('https://'):\n return 'https://{}/{}'.format(zone_name, url.replace('//', '/'))\n return url", "def base_uri(relative_path=''):\n base_path = get_app_root()\n if not os.path.exists(base_path):\n raise ValueError('Path %s does not exist' % base_path)\n\n return 'file://%s' % os.path.join(base_path, relative_path)", "def compile_route_to_url(self):\n\n if 'http' in self.redirect_url:\n return self.redirect_url\n\n # Split the url into a list\n split_url = self.redirect_url.split('/')\n\n # Start beginning of the new compiled url\n compiled_url = '/'\n\n # Iterate over the list\n for url in split_url:\n\n # if the url contains a parameter variable like @id:int\n if '@' in url:\n url = url.replace('@', '').replace(\n ':int', '').replace(':string', '')\n compiled_url += str(self.param(url)) + '/'\n else:\n compiled_url += url + '/'\n\n # The loop isn't perfect and may have an unwanted trailing slash\n if compiled_url.endswith('/') and not self.redirect_url.endswith('/'):\n compiled_url = compiled_url[:-1]\n\n # The loop isn't perfect and may have 2 slashes next to eachother\n if '//' in compiled_url:\n compiled_url = compiled_url.replace('//', '/')\n\n return compiled_url", "def shorten_url():\n return rh.shorten_url(request)", "def test_unreserved_percentencoding():\n assert (normalize_url(\"http://www.example.com/%7Eusername/\") ==\n \"http://www.example.com/~username\")\n assert (normalize_url('http://example.com/foo%23bar') ==\n 'http://example.com/foo%23bar')\n assert (normalize_url('http://example.com/foo%2fbar') ==\n 'http://example.com/foo%2Fbar')\n assert (normalize_url('http://example.com/foo%3fbar') ==\n 'http://example.com/foo%3Fbar')", "def urlpath(self, url):\n\t\t# remove schema + hostname\n\t\turl = re.sub('^[^:]*://[^/]+', '/', url)\n\n\t\treturn self.canonicalize(url)", "def uri(self) -> Optional[str]: # noqa: D401\n return self._uri", "def get_url(\n self,\n *,\n context: Context,\n ) -> str:\n request = context['request']\n\n # We want to use a relative URL in the diff viewer as we will not be\n # re-rendering the page when switching between revisions.\n from reviewboard.urls import diffviewer_url_names\n match = request.resolver_match\n\n if match.url_name in diffviewer_url_names:\n return 'raw/'\n\n return local_site_reverse(\n 'raw-diff',\n request,\n kwargs={\n 'review_request_id': context['review_request'].display_id,\n })", "def test_getsafeurl_authed_having_port():\n url = 'https://user:[email protected]:8080/project/objects.inv'\n expected = 'https://[email protected]:8080/project/objects.inv'\n actual = _get_safe_url(url)\n assert expected == actual" ]
[ "0.67314285", "0.65002495", "0.6443756", "0.6264452", "0.6258882", "0.6235578", "0.6230169", "0.6225036", "0.61642367", "0.6159104", "0.61166435", "0.6091703", "0.60887986", "0.6040494", "0.602881", "0.6023126", "0.60162497", "0.6009078", "0.6009078", "0.5996785", "0.5991015", "0.5989411", "0.59613985", "0.5939465", "0.59283805", "0.59275925", "0.59133846", "0.5911823", "0.59070665", "0.58872634", "0.588619", "0.58833", "0.5868059", "0.58529943", "0.58489054", "0.5834858", "0.58310133", "0.58239186", "0.57979894", "0.5749654", "0.57495946", "0.574129", "0.57203424", "0.5715809", "0.57134277", "0.56956697", "0.56956697", "0.56956697", "0.5691761", "0.5688912", "0.5687137", "0.567435", "0.56704", "0.5665536", "0.56561255", "0.5647089", "0.56368876", "0.5634208", "0.56316674", "0.56232107", "0.56232107", "0.56232107", "0.56232107", "0.56232107", "0.56232107", "0.5619134", "0.56163013", "0.5614214", "0.56092155", "0.5606191", "0.560316", "0.5598135", "0.559322", "0.5577067", "0.55699605", "0.55627066", "0.5553486", "0.55418426", "0.55394757", "0.55340195", "0.553165", "0.5520892", "0.5517255", "0.55143476", "0.55073506", "0.55053824", "0.5499315", "0.5494427", "0.5464951", "0.5463112", "0.54576725", "0.5457476", "0.5455087", "0.5454849", "0.54469883", "0.54421633", "0.5437285", "0.5436216", "0.54323775", "0.5428669" ]
0.68531525
0
Defines, parses, checks, and returns command line arguments
def parse_arguments(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('--adj', help=""".adj file regulon""", type=argparse.FileType(mode='r'), required=True) parser.add_argument('--expr_genes', help="""list of gene IDs in expression matrix (first column of expr matrix)""", type=argparse.FileType(mode='r'), required=True) # parser.add_argument('--cutoff_percent', help="""remove entire row (regulator plus genes) # if percent (out of 100) of genes remaining is below this value AND # genes remaining is below # the cutoff_number argument""", type=int, required=False, default=30) parser.add_argument('--cutoff_number', help=""""remove entire row (regulator plus regulon genes) if number of genes remaining is below this value, defaults to 25""", type=int, required=False, default=25) args = parser.parse_args() return args
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_arguments(args):", "def command_line_arguments():\n\n try:\n parser = argparse.ArgumentParser(description='Log Handler/Cleaner/Copier for Idemia DocAuth')\n\n # Add required arguments.\n parser.add_argument('action', choices=['clean', 'download'], type=str, help='clean or download')\n\n # Parse the arguments\n args = parser.parse_args()\n\n return args\n\n except Exception as err:\n print(err)\n return", "def _read_cmd_args():\n\n # Check if argument count is correct.\n if len(sys.argv) != 5:\n print(\"[ERR] Invalid number of command line arguments!\")\n _usage()\n sys.exit(1)\n\n # Get path to config file\n configfile = sys.argv[1]\n if not os.path.exists(configfile):\n print(f\"[ERR] Config file {configfile} does not exist!\")\n sys.exit(1)\n\n # Get top directory of LIS data\n topdatadir = sys.argv[2]\n if not os.path.exists(topdatadir):\n print(f\"[ERR] LIS data directory {topdatadir} does not exist!\")\n sys.exit(1)\n\n # Get valid year and month\n yyyymm = sys.argv[3]\n if len(yyyymm) != 6:\n print(\"[ERR] Invalid length of YYYYMM, must be 6 characters!\")\n sys.exit(1)\n year = int(yyyymm[0:4])\n month = int(yyyymm[4:6])\n try:\n startdate = datetime.datetime(year, month, day=1)\n except ValueError:\n print(\"[ERR] Invalid YYYYMM passed to script!\")\n sys.exit(1)\n\n # Get model forcing ID\n model_forcing = sys.argv[4]\n\n return configfile, topdatadir, startdate, model_forcing", "def process_command_line_arguments() -> Namespace:\n\n parser = build_parser()\n arguments = parser.parse_args()\n\n return arguments", "def get_args():\n parser = argparse.ArgumentParser(\n description=\"Sets up package within the pheeno's directory.\"\n )\n\n # Required arguments\n parser.add_argument(\"-x\", \"--execute\", action=\"execute\", required=True,\n help=\"something\", default=False)\n\n # Optional arguments\n parser.add_argument(\"-s\", \"--save\", action=\"store\", required=False,\n help=\"something\", default=False)", "def readArgs():\n parser = argparse.ArgumentParser(description=\n \"\"\"Debug script. This program is used in order to generate a summary\n statistics for the csv files generated by the annotation_parser. Things\n like the average amount of overlap of each window and the average deviation.\n \"\"\")\n\n parser.add_argument('-f', '--csv-dir', metavar='',\n dest='csv_dir',\n action='store', default=os.path.dirname(os.path.abspath(__file__)),\n help='Specify the csv directory.')\n parser.add_argument('-d', '--deviation', metavar='',\n dest='deviation', action='store',\n default=50,\n help='percentage set point from which evaluate the deviation from.')\n\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(\n description=\"Reads datapacket pcds, interpolates quaternions and generates scans from dataset in config file\")\n parser.add_argument(\"--visualization\", \"-v\", action=\"store_true\", help=\"if generated clouds should be visualized\")\n parser.add_argument(\"--directory\", \"-d\",\n help=\"if only specified directory should be interpolated, e.g. 'fragments/fragment0'\")\n args = parser.parse_args()\n return args.visualization, args.directory", "def parse_cmdline():\n\tparser = ArgumentParser(prog=\"FastP_QC.py\", description=\"\"\"Script collects stats from fastp jsons.\"\"\")\n\tparser.add_argument(\"-r1\", \"--r1_stats\", dest=\"r1_stats\", action=\"store\", required=True, help=\"Text file with r1 stats, from q30.py script.\")\n\tparser.add_argument(\"-r2\", \"--r2_stats\", dest=\"r2_stats\", action=\"store\", required=True, help=\"Text file with r2 stats, from q30.py script.\")\n\tparser.add_argument(\"-n\", \"--name\", dest=\"name\", action=\"store\", required=True, help=\"Sample name\")\n\targs = parser.parse_args()\n\treturn args", "def parse_arguments():\n\n args = Arguments()\n parser = argparse.ArgumentParser(\"Update river flow directions\")\n parser.add_argument('python_config_filename',\n metavar='python-config-filename',\n help='Full path to python configuration file',\n type=str)\n #Adding the variables to a namespace other than that of the parser keeps the namespace clean\n #and allows us to pass it directly to main\n parser.parse_args(namespace=args)\n return args", "def get_args():\n parser = argparse.ArgumentParser(description=\"Arguments for data exploration\")\n parser.add_argument(\"--tokenize\",\n dest=\"tokenize\",\n action=\"store_true\",\n help=\"Tokenize by words and sentences, counting averages/sd for each.\")\n return parser", "def getArgs():\n ProgDesc = (\"Creates a route type csv file of the type whose contents can be \"\n \"copied and pasted in to EMIT to change the proportions of \"\n \"different vehicle categories with a particular route type.\")\n ANPRDesc = (\"The ANPR file should be a csv file created using fleetSplitFromANPR.\")\n parser = argparse.ArgumentParser(description=ProgDesc)\n parser.add_argument('anprfile', type=str,\n help=\"The ANPR file to be processed. \"+ANPRDesc)\n parser.add_argument('basefile', type=str,\n help=(\"A file containing the base route type proportions. \"\n \"This should be created by clicking 'copy' on the \"\n \"route type window of EMIT, pasteing the results in \"\n \"to a spreadsheet, and saving as a csv file.\"))\n parser.add_argument('--saveloc', metavar='save location',\n type=str, nargs='?', default='Auto',\n help=\"Path where the outpt csv file should be saved.\")\n\n\n args = parser.parse_args()\n return args", "def parse_command_line_arguments():\n parser = argparse.ArgumentParser(\n description=\"Convert dependency files into list of GitHub links.\",\n epilog=\"For help with this program, contact John Speed at [email protected].\",\n )\n parser.add_argument(\n \"--python\",\n default=False, # default value is False\n help=\"Convert requirements.txt file into GitHub links.\",\n )\n return parser.parse_args()", "def _parse_command_line_arguments():\n parser = ArgumentParser(\n description=(\n 'Command-line tool to generate a list of unique from a TS file from FermiFAST'\n ),\n )\n parser.add_argument(\n 'ts-file',\n type=str,\n help=(\n 'A file containing the TS sky map'\n ),\n )\n parser.add_argument('--skiprows',\n type=int,\n help='number of rows to skip at the top (default 0)',\n required=False)\n parser.set_defaults(skiprows=0)\n arguments = vars(parser.parse_args())\n return arguments", "def parse_command_line_arguments():\n\n description, epilog = __doc__.split(\"\\n\\n\", 1)\n\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=description,\n epilog=epilog)\n\n parser.add_argument('-s', '--s', dest='s', action='store', type=float, required=True,\n help='Minimum frequency')\n parser.add_argument('-c', '--credentials', dest='credentials', action='store',\n default=\"./.tpass\",\n help='File with Twitter credentials (username and password, separated by a space)')\n\n args = parser.parse_args()\n \n return args", "def get_cmd_args():\n\n\n\t#Creates the Argument Parser\n\tparser = ArgumentParser(description = \"ID Lab qPCR Analysis v\" + VERSION + \" \" + QUALITY)\n\n\t#Adds the input file argument\n\tparser.add_argument('-f', '--file',\n\t\t\t\tnargs = '+',\n\t\t\t\ttype = FileType('r'),\n\t\t\t\trequired = True)\n\n\t#Adds the output directory\n\tparser.add_argument('-o', '--output',\n\t\t\t\trequired = True)\n\n\t#Adds the model argument, to select between the three models\n\tparser.add_argument('-m', '--mod', '--model',\n\t\t\t\tnargs = '?',\n\t\t\t\tchoices = ['relative', 'absolute', 'stability'],\n\t\t\t\trequired = True)\n\n\t#Adds the control genes argument, taking a list of gene names\n\tparser.add_argument('-cg', '--cgenes', '--controlgenes',\n\t\t\t\tnargs = '+',\n\t\t\t\trequired = True)\n\n\t#Adds the optional control sample argument for the stability model, taking a list of sample names\n\tparser.add_argument('-cs', '--csample', '--controlsamples',\n\t\t\t\tnargs = '*')\n\n\t#Adds optional outlier cutoff\n\tparser.add_argument('-oc', '--ocutoff',\n\t\t\t\ttype = float,\n\t\t\t\tdefault = 0.3)\n\n\t#Adds optional max outliers\n\tparser.add_argument('-om', '--omax',\n\t\t\t\ttype = float,\n\t\t\t\tdefault = 0.5)\n\n\t#Adds optional encoding \n\tparser.add_argument('-e', '--encoding',\n\t\t\t\tdefault = 'ISO-8859-1')\n\n\t#Adds optional header size\n\tparser.add_argument('-hd', '--header',\n\t\t\t\tdefault = 47)\n\n\treturn vars(parser.parse_args())", "def parse_command_line():\r\n\r\n parser = argparse.ArgumentParser(description='User args')\r\n parser.add_argument(\"--action\", choices=['train', 'predict', 'demo', 'test'], required=True, help=\"Choose action.\")\r\n parser.add_argument(\"--model\", choices=['vgg', 'unet', 'fpn'], required=True, help=\"Choose model.\")\r\n parser.add_argument(\"--dataset\", choices=['full', 'small'], required=True, help=\"Choose dataset.\")\r\n\r\n return parser.parse_args()", "def get_args():\n use = (\"%prog is used to run user-specified actions against files. It is \"\n \"typically scheduled to run periodically, and uses a config file for \"\n \"criteria. \\n\"\n \" \\n\"\n \" %prog [file] [misc options]\")\n\n parser = argparse.ArgumentParser(description=use,\n formatter_class=argparse.RawTextHelpFormatter)\n\n parser.add_argument('--config-name',\n dest='config_name',\n default=None,\n help='Specifies the name of the config file in the standard xdg datagristle config')\n parser.add_argument('-c', '--config-file',\n dest='config_file',\n default=None,\n help='Specifies the name of the config file in a user-specified directory')\n parser.add_argument('--log-dir',\n dest='log_dir',\n default=None,\n help='Overrides the xdg-default logging directory. XDG default on linux is $HOME/.cache/datagristle/gristle_process')\n parser.add_argument('--log-level',\n dest='log_level',\n choices=['debug', 'info', 'warning', 'error', 'critical'],\n help='Specifies level of detail in logs. Default is None - which leaves it to the config file.')\n\n parser.add_argument('--testrun',\n default=False,\n action='store_true',\n dest='test_run',\n help=('Identify files to act upon - but just list them'))\n\n parser.add_argument('--long-help',\n default=False,\n action='store_true',\n help='Print more verbose help')\n\n args = parser.parse_args()\n\n if args.long_help:\n print(__doc__)\n sys.exit(0)\n\n\n return args", "def checkArguments ( ) :\r\n\r\n if len( sys.argv ) <= 1 : return None\r\n\r\n\r\n # splits the arguments that contain quotes\r\n \r\n wordList = [ ]\r\n\r\n for argument in sys.argv :\r\n\r\n wordList.extend( argument.split( '\"' ) )\r\n\r\n\r\n # places all the arguments that start with \"--\" at the end, and joins the others into words\r\n\r\n noMinusList = [ ]\r\n\r\n minusList = [ ]\r\n\r\n argument = \"\"\r\n\r\n for word in wordList[ 1 : ] :\r\n\r\n # strips spaces and quotes\r\n \r\n word = word.strip( \" \\\"'\" ) \r\n\r\n if word.startswith( \"--\" ) :\r\n\r\n minusList.append( word )\r\n\r\n if len( argument ) > 0 : noMinusList.append( argument )\r\n\r\n argument = \"\"\r\n\r\n elif argument == \"\" :\r\n\r\n argument = word\r\n\r\n else :\r\n\r\n argument = argument + \" \" + word\r\n\r\n if len( argument ) > 0 : noMinusList.append( argument )\r\n\r\n\r\n # library = 1st argument of the form \"-- ... /\" that exists\r\n\r\n libraryPath = None\r\n\r\n for argument in minusList :\r\n\r\n if ( ( argument.endswith( os.sep ) ) and ( os.path.exists( argument.strip( \"- \" ) ) ) ) :\r\n\r\n libraryPath = argument.strip( \"-\" )\r\n\r\n break\r\n\r\n # recomposes the command line\r\n \r\n sys.argv = wordList[ : 1 ] + noMinusList + minusList \r\n\r\n return libraryPath", "def __parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', '--force', action=\"store_true\", default=False,\n help='overwrite existing database files during import')\n parser.add_argument('-e', '--extension', action=\"store\", default='txt',\n help='specify file extension. default is \"txt\"')\n parser.add_argument('-d', '--delimiter', action=\"store\", default='\\t',\n help='specify column delimiter. default is tab (\\\\t)')\n parser.add_argument('-m', '--mark', action=\"store\", default='.',\n help='specify decimal mark for numeric data. default is'\n ' dot (.)')\n parser.add_argument('-o', '--outformat', action=\"store\", default='npz',\n help='specify output database format. default is \"npz\"'\n ' for numpy database. use \"mat\" for matlab '\n ' database format.')\n parser.add_argument('-r', '--recursive', action=\"store_true\", default=False,\n help='recursively walk through all sub-directories of'\n ' current working directory')\n parser.add_argument('-p', '--pcs', action=\"store_true\", default=True,\n help='indicate if files are pcs files.')\n parser.add_argument('-c', '--colheadlines', action=\"store\", default='1',\n help='number of lines spanned by the column headers')\n args = parser.parse_args()\n return args", "def parse_args():\n parser = argparse.ArgumentParser(description='Eval')\n parser.add_argument(\n '--cfg', help='experiment configure file path', type=str, \\\n default=\"validation.config.Config\")\n return parser.parse_args()", "def parse_arguments():\n\n parser = argparse.ArgumentParser(\n description=\"生成用户字符串识别的切分字符串\"\n )\n parser.add_argument(\n \"-o\",\n \"--output_dir\",\n type=str,\n nargs=\"?\",\n help=\"The output directory\",\n default=\"output/\"\n )\n parser.add_argument(\n \"-i\",\n \"--input_file\",\n type=str,\n nargs=\"?\",\n help=\"When set, this argument uses a specified text file as source for the text\",\n default=\"\",\n required=True\n )\n parser.add_argument(\n \"-mi\",\n \"--min_char_count\",\n type=int,\n nargs=\"?\",\n help=\"The minimum number of characters per line, Default is 3.\",\n default=3,\n\n )\n parser.add_argument(\n \"-ma\",\n \"--max_char_count\",\n type=int,\n nargs=\"?\",\n help=\"The maximum number of characters per line, Default is 20.\",\n default=20,\n )\n return parser.parse_args()", "def parse_command_line_arguments(argv):\n print(\"reading command line arguments in...\")\n\n parser = argparse.ArgumentParser(description='Description of your program')\n parser.add_argument('-i', '--input', help='Location of input csv file', required=True)\n parser.add_argument('-p', '--predicting', help='The column name containing the category to predict', required=True)\n parser.add_argument('-s', '--scoring', help='The scoring type to be used with model evaluation', required=False)\n parser.add_argument('-c', '--scale', help='List of column names to scale values for', nargs='+', required=False)\n args = parser.parse_args()\n\n return args.input, args.predicting, args.scoring, args.scale", "def handle_cmdline_args():\n\n parser = argparse.ArgumentParser(\n description='Generate synthetic data from a specification in a json '\n 'file using the \"synth-method\" described in the json file. ')\n\n parser.add_argument(\n '-i', dest='infile', required=True,\n help='The input json file. Must contain a \"synth-method\" property')\n\n parser.add_argument(\n '-o', dest='outfile_prefix', required=True, help='The prefix of the output paths (data json and csv), relative to the QUIPP-pipeline root directory')\n\n args = parser.parse_args()\n return args", "def get_args():\n # parse command line args\n parser = argparse.ArgumentParser(\n description=HEADER,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument(\n '-c',\n '--config-file',\n default=os.path.join(os.path.dirname(__file__), 'config-user.yml'),\n help='Configuration file')\n parser.add_argument(\n '-r',\n '--recipe-files',\n type=str,\n nargs='+',\n help='Recipe files (list or single file)')\n parser.add_argument(\n '-d',\n '--main-dir',\n default=os.path.join(os.environ['HOME'], 'ESMVALTOOL_ROSE'),\n help='Main analysis directory; default to $HOME/ESMVALTOOL_ROSE')\n parser.add_argument(\n '-s',\n '--suite-dir',\n default=os.path.join(os.environ['HOME'], 'u-bd684'),\n help='u-bd684 suite directory; default to $HOME/u-bd684')\n parser.add_argument(\n '-n',\n '--no-submit',\n action='store_true',\n help=\"Flag to NOT submit the Rose suite.\")\n parser.add_argument(\n '-l',\n '--log-level',\n default='info',\n choices=['debug', 'info', 'warning', 'error'])\n args = parser.parse_args()\n return args", "def parseArgs():\n parser = argparse.ArgumentParser(description='Runs RHEAS simulation.')\n parser.add_argument('config', help='configuration file')\n parser.add_argument('-d', metavar='DB', help='name of database to connect')\n parser.add_argument('-u', help='update database', action='store_true')\n args = parser.parse_args()\n return args.config, args.d, args.u", "def get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--input\",\n help=\"text file, one compound per line\", required=True)\n parser.add_argument(\"-j\", \"--jsononto\",\n help=\"json ontology of families\", required=True)\n parser.add_argument(\"-o\", \"--output\",\n help=\"output file\", required=True)\n parser.add_argument(\"-e\", \"--encoded\",\n help=\"identifiers are encoded\", action=\"store_true\", default=False)\n\n args = parser.parse_args()\n\n run_analysis(args.input, args.jsononto, args.output, args.encoded)", "def _parse_args():\n args = sys.argv[1:]\n cmd_parser = argparse.ArgumentParser()\n cmd_parser.add_argument(\n '--produce-sub',\n dest='produce_sub',\n help='Produce submision file',\n default=False,\n action='store_true',\n )\n cmd_parser.add_argument(\n '--search-cv',\n dest='search_cv',\n help='Perform Search of parameters',\n default=False,\n action='store_true',\n )\n cmd_opts = cmd_parser.parse_args(args=args)\n return cmd_opts", "def parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('-u', '--urls_dirpath', type=unicode)\n parser.add_argument('-r', '--resources_dir', type=unicode)\n parser.add_argument('-t', '--total_docs', type=int)\n parser.add_argument('-m', '--mapping', type=unicode,\n help='File with the yago to lkif mapping')\n\n return parser.parse_args()", "def get_cli_arguments(self):\n pass", "def parse_args():\n parser = argparse.ArgumentParser(description='Google reminders cli',\n epilog=usage,\n formatter_class=argparse.RawTextHelpFormatter)\n return parser.parse_args()", "def configure_commandline(cmdline_arguments: argparse.Namespace) -> Optional[Text]:", "def _ParseCommandArguments():\n arg_parser = argparse.ArgumentParser()\n arg_parser.usage = __doc__\n\n arg_parser.add_argument('--download-dir',\n type=str,\n required=True,\n help='Directory into which corpora are downloaded.')\n arg_parser.add_argument('--build-dir',\n required=True,\n type=str,\n help='Directory where fuzzers were built.')\n args = arg_parser.parse_args()\n return args", "def parseInputArgs():\n parser = argparse.ArgumentParser(description=\"Unix cut analog\", usage='%(prog)s [arguments]')\n\n # pos arg\n parser.add_argument('filename', type=str, help='input file name')\n\n # req arg\n requiredNamed = parser.add_argument_group('required arguments')\n requiredNamed.add_argument('-f', '--fields', type=str, help='list of fields, separated by comma', required=True)\n # optional args\n parser.add_argument('-s', '--separator', type=str, default='\\t', help='column separator, default tab')\n\n args = parser.parse_args()\n return args", "def parseArgs():\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', default='fsod', help='training dataset') # use fsod dataset for default\n parser.add_argument('--cfg', dest='cfg_file', required=True, help='optional config file')\n parser.add_argument('--load_ckpt', help='path to load checkpoint')\n parser.add_argument('--load_detectron', help='path to load detectron weight pickle file')\n parser.add_argument('--output_dir', help='output directory to save the testing results.')\n parser.add_argument('--range', help='[start, end)', type=int, nargs=2)\n parser.add_argument('--visualize', dest='visualize', help='output images of detection', action='store_true')\n return parser.parse_args()", "def get_args():\n parser = argparse.ArgumentParser(\n description=\"Extract a sample of the data.\")\n\n parser.add_argument(\"userid\", help=\"User ID\")\n parser.add_argument(\"start\", help=\"Start\")\n parser.add_argument(\"end\", help=\"End\")\n parser.add_argument(\"source\", help=\"Source data file\")\n parser.add_argument(\"dest\", help=\"Destination file\")\n\n return parser.parse_args()", "def parseArgs():\n # Configure the option parser for CLI options to the script\n usage = \"usage: %prog [options] userName password configlet xlfile\"\n parser = argparse.ArgumentParser(description=\"Excel File to JSON Configlet Builder\")\n parser.add_argument(\"--userName\", help='Username to log into CVP')\n parser.add_argument(\"--password\", help='Password for CVP user to login')\n parser.add_argument(\"--target\", nargs=\"*\", metavar='TARGET', default=[],\n help='List of CVP appliances to get snapshot from URL,URL')\n parser.add_argument(\"--snapshot\", help='CVP Snapshot containing Show Inventory and Show LLDP neighbor data')\n parser.add_argument(\"--opticType\", default='PSM4', help=\"Optic Type to look for\")\n parser.add_argument(\"--verbose\", default=False, help='Return more information to the command line')\n args = parser.parse_args()\n return checkArgs( args )", "def get_args():\n parser = argparse.ArgumentParser(\n description=\"\"\"Given an input LASTZ file, output a BED-formatted file of locus coordinates\"\"\"\n )\n parser.add_argument(\n \"--alignments\",\n required=True,\n type=is_dir,\n action=FullPaths,\n help=\"\"\"The input directory containing LASTZ files\"\"\",\n )\n parser.add_argument(\n \"--output\",\n required=True,\n action=CreateDir,\n help=\"\"\"The output directory to hold BED-formatted files\"\"\",\n )\n parser.add_argument(\n \"--regex\",\n type=str,\n default=\"^(uce-\\d+)(?:_p\\d+.*)\",\n help=\"\"\"A regular expression to apply to the probe sequences for replacement\"\"\",\n )\n parser.add_argument(\n \"--verbosity\",\n type=str,\n choices=[\"INFO\", \"WARN\", \"CRITICAL\"],\n default=\"INFO\",\n help=\"\"\"The logging level to use.\"\"\",\n )\n parser.add_argument(\n \"--log-path\",\n action=FullPaths,\n type=is_dir,\n default=None,\n help=\"\"\"The path to a directory to hold logs.\"\"\",\n )\n return parser.parse_args()", "def _parse_args():\n parser = argparse.ArgumentParser(description='Pure-python command-line calculator.')\n\n parser.add_argument('EXPRESSION', action=\"store\", type=str, help=\"expression string to evaluate\")\n parser.add_argument('-m', '--use-modules', nargs='+', action=\"store\", dest=\"MODULE\", type=str,\n help=\"additional modules to use\")\n\n return parser.parse_args()", "def _parse_command_line(self):\n DESCRIPTION = (\n \"Application for searching PyLith .cfg parameter files.\"\n )\n\n parser = argparse.ArgumentParser(description=DESCRIPTION,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"--path\", action=\"store\",\n dest=\"searchpath\", default=\".\", help=\"Search path for .cfg files.\")\n parser.add_argument(\"--display\", action=\"store\",\n dest=\"display\", default=\"all\", help=\"List of metadata to display in search results.\")\n parser.add_argument(\"--verbose\", action=\"store_true\", dest=\"verbose\",\n help=\"Report missing metadata.\")\n\n parser.add_argument(\"--keywords\", action=\"store\", dest=\"keywords\",\n help=\"Comma delimited list of keywords for filtering search results.\")\n parser.add_argument(\"--features\", action=\"store\", dest=\"features\",\n help=\"Comma delimited list of features for filtering search results.\")\n parser.add_argument(\"--authors\", action=\"store\", dest=\"authors\",\n help=\"Comma delimited list of authors for filtering search results.\")\n parser.add_argument(\"--version\", action=\"store\", dest=\"version\",\n help=\"PyLith version for filtering search results.\")\n parser.add_argument(\"--incompatible\", action=\"store_true\", dest=\"incompatible\",\n help=\"Filter search results to show incompatible parameter files.\")\n parser.add_argument(\"--output-format\", action=\"store\", dest=\"output_format\", \n help=\"Output format\", default=\"txt\", choices=[\"text\", \"markdown\"])\n\n args = parser.parse_args()\n\n return args", "def arguments():\n\tparser = argparse.ArgumentParser(description=\"Integrate all columns of a data file. Time is in column 0.\")\n\tparser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", dest=\"verbose\", default=False, help=\"Print debug info.\")\n\tparser.add_argument(\"-k\", \"--kwh\", action=\"store_true\", dest=\"kwh\", default=False, help=\"output in kWh (instead of Ws)\")\n\tparser.add_argument(\"-f\", \"--file\", action=\"store\", dest=\"filename\", help=\"Path to file to read. Defaults to STDIN.\")\n\tparser.add_argument(\"-s\", \"--separator\", dest=\"separator\", default=\",\", help=\"Specify the separation character. Defaults to comma (,).\")\n\n\treturn parser.parse_args()", "def getArgs():\r\n parser = argparse.ArgumentParser(\r\n description = \"\"\"This program uses the validation data and a given model to do brain segmentation that will be sent to FeTs challenge to get evaluated \"\"\")\r\n parser.add_argument(\"-d\", type = str, help = \"d is the path to validation dataset, e.g: C:/Documents/MICCAI_FeTS2021_TrainingData/\")\r\n parser.add_argument(\"-m\", type = str, help = \"m is the path for the model to load, e.g: C:/Documents/MICCAI_FeTS2021_TrainingData/cpt/cpt_0_1\")\r\n parser.add_argument(\"-o\", type = str, help = \"o is the output path, e.g: C:/Documents/inferences\")\r\n # Get your arguments\r\n return parser.parse_args()", "def getargs(ver='%prog 0.0'):\n parser = argparse.ArgumentParser(\n description=open(__file__).read().split(\"'''\")[1],\n formatter_class=argparse.RawDescriptionHelpFormatter) \n #@todo: OptionParser is depreciated in Python 3.2. \n #Need to move to the new style of parser. \n parser.add_argument(\"--fasta_a_name\", \n required=True,\n help = \"REQUIRED: The name of the fasta file a.\")\n parser.add_argument(\"--fasta_b_name\", \n required=True,\n help = \"REQUIRED: The name of the fasta file a.\")\n parser.add_argument(\"--set_operation\", \n default=\"U\",\n help = \"The operation you want to do. \")\n \n args = parser.parse_args()\n \n fasta_a_name = args.fasta_a_name\n fasta_b_name = args.fasta_b_name\n set_operation = args.set_operation\n \n return fasta_a_name,fasta_b_name,set_operation", "def _read_cmd_args():\n\n # Check if argument count is correct.\n if len(sys.argv) != 4:\n print(\"[ERR] Invalid number of command line arguments!\")\n print(len(sys.argv))\n print(sys.argv[:])\n _usage()\n sys.exit(1)\n\n # Check if lis.config template exists.\n lis_config_template = sys.argv[1]\n if not os.path.exists(lis_config_template):\n print(f\"[ERR] {lis_config_template} does not exist!\")\n sys.exit(1)\n\n # Check if directory for restart files exists. Actual restart file\n # shall be checked later.\n restart_dir = sys.argv[2]\n if not os.path.exists(restart_dir):\n print(f\"[ERR] Directory {restart_dir} does not exist!\")\n sys.exit(1)\n\n # Get start date of new LIS run.\n yyyymmdd = sys.argv[3]\n if len(yyyymmdd) != 8:\n print(\"[ERR] Invalid length for YYYYMMDD, must be 8 characters!\")\n sys.exit(1)\n year = int(yyyymmdd[0:4])\n month = int(yyyymmdd[4:6])\n day = int(yyyymmdd[6:8])\n try:\n startdate = datetime.date(year, month, day)\n except ValueError:\n print(\"[ERR] Invalid YYYYMMDD passed to script!\")\n sys.exit(1)\n\n return lis_config_template, restart_dir, startdate", "def parse_args():\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--input_path\", required=True)\n parser.add_argument(\"-c\", \"--config\", required=True)\n return parser.parse_args()", "def parse_command_line():\n parser = argparse.ArgumentParser(description='Parses ID\\'s from the DDI compendium search results, and then downloads the html and puts them into a sqlite database.')\n parser.add_argument('-f', '--file', dest='file',\n action='store',\n help='Filenname to be read')\n arg_manager = parser.parse_args()\n return arg_manager", "def get_args():\n parser = argparse.ArgumentParser()\n arg = parser.add_argument\n\n arg('--raw_source_dir',\n default=os.path.expanduser(\n '~/Personal/Columbia/Applied_DL/Camelyon_Project/data/source_data'))\n arg('--meta_data_dir',\n default=os.path.expanduser(\n '~/Personal/Columbia/Applied_DL/Camelyon_Project/data/test_dir'))\n arg('--img_data_dir',\n default=os.path.expanduser(\n '~/Personal/Columbia/Applied_DL/Camelyon_Project/data/test_dir/input_data'))\n arg('--output_data_dir',\n default=os.path.expanduser(\n '~/Personal/Columbia/Applied_DL/Camelyon_Project/data/test_dir/output_data'))\n\n arg('--img_partition_option', default='zoom_1_256_256')\n\n input_args = parser.parse_known_args()[0]\n\n return input_args", "def get_args():\n\n parser = argparse.ArgumentParser(\n description=\"Script tests the HCSR04 sensor under different configurations\"\n )\n\n parser.add_argument(\n \"-t\",\n \"--trig\",\n type=int,\n help=\"Trig Pin (Required - must be an integer, must \\\n use BCM pin values)\",\n required=True,\n )\n\n parser.add_argument(\n \"-e\",\n \"--echo\",\n type=int,\n help=\"Echo Pin (Required - must be an integer, must \\\n use BCM pin values)\",\n required=True,\n )\n\n parser.add_argument(\n \"-sp\",\n \"--speed\",\n type=float,\n help=\"Time between individual reading samples \\\n (Optional - must be a float, default\\\n is 0.1 seconds)\",\n required=False,\n default=0.1,\n )\n\n parser.add_argument(\n \"-ss\",\n \"--samples\",\n type=int,\n help=\"Reading Sample Size (Optional - must be an \\\n integer, default is 11)\",\n required=False,\n default=11,\n )\n\n args = parser.parse_args()\n\n trig = args.trig\n echo = args.echo\n speed = args.speed\n samples = args.samples\n\n return trig, echo, speed, samples", "def parse_args():\n parser = argparse.ArgumentParser(description=\"Bandits algorithms on a click-through \"\n \"rate dataset.\")\n parser.add_argument('--plot', action='store_true')\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-t\", \"--thoughtspot_host\", required=True,\n help=\"domain or ip. E.g. http://1.1.1.1\")\n parser.add_argument(\"-u\", \"--username\", required=True,\n help=\"username - must have administrative privileges\")\n parser.add_argument(\"-p\", \"--password\", required=True,\n help=\"password - must have administrative privileges\")\n parser.add_argument(\"-d\", \"--delimiter\", default=',',\n help=\"character to seperate values by. Default to comma\")\n parser.add_argument(\"-c\", \"--csv\", action=\"store_true\",\n help=\"create csv file called permissions.csv\")\n parser.add_argument(\"-s\", \"--share\", action=\"store_true\",\n help=\"output usable format for share api\")\n return parser.parse_args()", "def get_command_line_args():\n\tparser: ArgumentParser = argparse.ArgumentParser()\n\tparser.add_argument('-t', '--t_blank', help=\"Number of blanks until delimiter\", default=20)\n\tparser.add_argument('-k', '--k_copynum', help=\"Number of numbers to copy\", default=3)\n\tparser.add_argument('-m', '--model', help=\"Choose model RNN, LSTM or MLP\", default='MLP')\n\targs = parser.parse_args()\n\treturn args", "def parse_args():\n \n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'config',\n help='Config file')\n parser.add_argument(\n '--quiet',\n '-q',\n action='store_true',\n help='do not print to console'\n )\n parser.add_argument(\n '--password',\n '-p',\n action='store_true',\n help='Set password in keyring.'\n )\n parser.add_argument(\n '--update',\n '-u',\n action='store_true',\n help='Only add transactions after last date in database.'\n )\n parser.add_argument(\n '--mark_seen',\n '-m',\n action='store_true',\n help='Mark fetched emails as seen.'\n )\n\n return parser.parse_args()", "def cli_arguments():\n\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n usage=f\"\\n{Color.DETAIL}pdforce.py [-p <pdf>] [-w <wordlist>] [-e <encoding>] [-o <output>] [-c] [-h/--help]{Color.END}\",\n description=f\"{Color.EMPHASIS}{TITLE}\\nLightweight PDF password cracker. USE FOR LEGAL INTENTS ONLY.{Color.END}\",\n epilog=f\"{Color.EMPHASIS}Made by @poponealex - https://github.com/poponealex{Color.END}\",\n )\n\n parser.add_argument(\n \"-p\",\n \"--pdf\",\n type=str,\n help=f\"{Color.INFORMATION}Path to the pdf file.{Color.END}\",\n action=\"store\",\n default=\"\",\n )\n\n parser.add_argument(\n \"-w\",\n \"--wordlist\",\n type=str,\n help=f\"{Color.INFORMATION}Path to the wordlist.{Color.END}\",\n action=\"store\",\n default=\"\",\n )\n\n parser.add_argument(\n \"-e\",\n \"--encoding\",\n type=str,\n help=f\"{Color.INFORMATION}Specify an encoding for the wordlist (https://docs.python.org/3/library/codecs.html#standard-encodings). The default encoding is platform dependent. Use 'iso8859_1' for rockyou. {Color.END}\",\n action=\"store\",\n default=None,\n )\n\n parser.add_argument(\n \"-o\",\n \"--output\",\n help=f\"{Color.INFORMATION}Output the cracked password to a new file.{Color.END}\",\n action=\"store\",\n )\n\n parser.add_argument(\n \"-c\",\n \"--copy\",\n help=f\"{Color.INFORMATION}Copy the password to the clipboard.{Color.END}\",\n action=\"store_true\",\n )\n\n return parser.parse_args()", "def get_arguments():\n\n # Creates the ArgumentParser\n parser = argparse.ArgumentParser(\n usage='Optimizes a boolean-based ensemble using Univariate Marginal Distribution Algorithm.')\n\n # Adds a dataset argument with pre-defined choices\n parser.add_argument('dataset', help='Dataset identifier', choices=['RSDataset', 'RSSCN7', 'UCMerced_LandUse'])\n\n # Adds a descriptor argument with pre-defined choices\n parser.add_argument('descriptor', help='Descriptor identifier', choices=['global', 'cnn', 'all'])\n\n # Adds an identifier argument to the desired fold identifier\n parser.add_argument('fold', help='Fold identifier', type=int, choices=range(1, 6))\n\n # Adds an identifier argument to the desired number of agents\n parser.add_argument('-n_agents', help='Number of meta-heuristic agents', type=int, default=10)\n\n # Adds an identifier argument to the desired number of iterations\n parser.add_argument('-n_iter', help='Number of meta-heuristic iterations', type=int, default=10)\n\n return parser.parse_args()", "def parse_args(self):\n #-----------------------------------------------------------------------\n #This code is based on code from the KR Toolkit by Christian Muise\n #URL: http://code.google.com/p/krtoolkit/\n try:\n argv, opts, flags = sys.argv[1:], {}, []\n while argv:\n if argv[0][0:2] == '--':\n flags.append(argv[0])\n argv = argv[1:]\n elif argv[0][0] == '-':\n opts[argv[0]] = argv[1]\n argv = argv[2:]\n else:\n raise InputException(\"Badly constructed arg: \" +argv[0])\n except IndexError:\n raise InputException(\"Badly constructed arg: \" + argv[0])\n #-----------------------------------------------------------------------\n for flag in flags:\n if flag in self.program_flags:\n vars(self)[self.program_flags[flag].var_name] = True\n if self.program_flags[flag].function:\n self.program_flags[flag].function(self)\n else:\n raise InputException(\"Invalid flag: \" + flag)\n \n if not self.quiet:\n min_width = max(len('Flags:'),\n max(map(lambda x : len(x.description),\n self.program_args.itervalues()))) + 1\n if len(flags) == 0:\n print \"{:<{}} {}\".format('Flags:', min_width,'<None>')\n else:\n print \"{:<{}} {}\".format('Flags:', min_width,\n ', '.join(filter(lambda f : f in flags,\n self.program_flags)))\n \n for arg in opts:\n if arg not in self.program_args:\n raise InputException(\"Invalid arg: \" + arg)\n \n for arg in self.program_arg_order:\n arg_def = self.program_args[arg]\n if arg not in opts:\n if arg_def.needed:\n raise InputException(\"Error needed arg is missing: \" + arg)\n vars(self)[arg_def.var_name] = arg_def.default_value\n else:\n if arg_def.validator == None:\n vars(self)[arg_def.var_name] = opts[arg]\n else:\n vars(self)[arg_def.var_name] = arg_def.validator(opts[arg],\n arg_def.validator_args)\n if not self.quiet:\n print \"{:<{}} {}\".format(arg_def.description + ':', min_width,\n vars(self)[arg_def.var_name])", "def parse_user_arguments():\n\n ap = argparse.ArgumentParser()\n\n ap.add_argument(\"-m\", \"--method\",\n help=\"Enter the type of baseline run, \"\n \"bm_25, tf_idf or jm_qlm\", required=True)\n\n ap.add_argument(\"-j\", \"--json_fname\", help=\"Enter the path to the json \"\n \"filename containing\"\n \"all the paths to the \"\n \"test_collection\",\n required=True)\n\n return vars(ap.parse_args())", "def get_command_line_args(argv):\n # Initialize the arguments to their default values \n\n args = {'startdate': '20200101',\n 'enddate': '20200102',\n 'outfile': 'test.nc',\n 'dt': 5,\n 'real': True,\n 'south': False,\n 'tcv': False,\n 'substorm': False,\n 'ions': False,\n 'move': False,\n 'cusp': False}\n\n arg_type = {'startdate': str,\n 'enddate': str,\n 'outfile': str,\n 'dt': float,\n 'real': bool,\n 'south': bool,\n 'tcv': bool,\n 'substorm': bool,\n 'ions': bool,\n 'move': bool,\n 'cusp': bool}\n \n # If there is input, set default help to False\n args['help'] = False if len(argv) > 0 else True\n \n # Cycle through all arguments except the first, saving input\n for arg in argv:\n # Treat the file list and formatting seperately\n if arg.find('-') == 0:\n # This is not a filename, remove the dash to get the key\n split_arg = arg.split('=')\n akey = split_arg[0][1:]\n # Get the argument value as the desired type\n if akey not in arg_type.keys():\n raise ValueError(''.join(['unknown command line input, ',\n arg, ', try -help for details']))\n\n if len(split_arg) == 1:\n if arg_type[akey] == bool:\n arg_val = True\n else:\n raise ValueError('expected equality after flag {:}'.format(\n akey))\n else:\n if arg_type[akey] == int:\n arg_val = int(split_arg[1])\n elif arg_type[akey] == float:\n arg_val = float(split_arg[1])\n elif arg_type[akey] == str:\n arg_val = split_arg[1]\n else:\n # This is boolean input\n arg_val = bool_string(split_arg[1])\n\n args[akey] = arg_val\n \n return args", "def parse_command_line():\n parser = argparse.ArgumentParser(\"Falcon Quick Scan\")\n parser.add_argument(\"-f\", \"--config\",\n dest=\"config_file\",\n help=\"Path to the configuration file\",\n required=False\n )\n parser.add_argument(\"-l\", \"--log-level\",\n dest=\"log_level\",\n help=\"Default log level (DEBUG, WARN, INFO, ERROR)\",\n required=False\n )\n parser.add_argument(\"-d\", \"--check-delay\",\n dest=\"check_delay\",\n help=\"Delay between checks for scan results\",\n required=False\n )\n parser.add_argument(\"-p\", \"--pattern\",\n dest=\"pattern\",\n help=\"Target file patterns to scan (defaults to *.*)\",\n required=False\n )\n parser.add_argument(\"-r\", \"--region\",\n dest=\"region\",\n help=\"Region the target bucket resides in\",\n required=False\n )\n parser.add_argument(\"-t\", \"--target\",\n dest=\"target\",\n help=\"Target folder or bucket to scan. Bucket must have 's3://' prefix.\",\n required=True\n )\n\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--zarr_dir',\n type=str,\n help='path to directory of zarr files',\n )\n parser.add_argument(\n '--tiff_dir',\n type=str,\n help='path to directory of tiff files',\n )\n parser.add_argument(\n '--output_dir',\n type=str,\n help='path to directory for writing',\n )\n parser.add_argument(\n '--config_path',\n type=str,\n default=None,\n help='path to yaml preprocess config file',\n )\n \n args = parser.parse_args()\n return args", "def get_args():\n parser = argparse.ArgumentParser('Find a featureclass, database, mxd, or service in ArcGIS Server',\n epilog='For search strings inlcuding spaces, enclose the query in double-quotes')\n parser.add_argument('name', help='string for which to search (blank returns info on all services)',\n nargs='?', default='')\n parser.add_argument('-q', '--quiet', help='only display service names and URLs', action='store_true')\n parser.add_argument('-qq', '--veryquiet', help='only display service URLs, comma delimited', action='store_true')\n parser.add_argument('-cs', '--configstore', help='explicitly provide full path to config store', action='store')\n parser.add_argument('-csv', '--tocsv', help='create csv output', action='store_true')\n parser.add_argument('-md', '--markdown', help='create Markdown output', action='store_true')\n return parser.parse_args()", "def main(argv,required_arg,required_arg_type,optional_arg):\n \n # add optional_arguments to the parser\n for option in optional_arg:\n parse_option_dictionary[option]()\n \n # parse the command line\n passed_optional_arg, passed_required_arg = parser.parse_args(argv)\n \n required_arg_values = grabRequiredArgs(passed_required_arg,required_arg,\n required_arg_type)\n\n return required_arg_values, passed_optional_arg", "def parse_args():\n\n parser = argparse.ArgumentParser(description='CLI to store Actisense-NGT Gateway values to InfluxDB and publish via MQTT')\n parser.add_argument('--config', '-c', type=str, required=True, help='JSON configuraton file with path')\n return parser.parse_args()", "def get_args():\n parser = argparse.ArgumentParser(description=__doc__.strip())\n parser.add_argument(\"sparkylist\", type=str,\n help=\"sparky file to be converted\")\n parser.add_argument(\"output\", type=str, help=\"name of output csv file\")\n parser.add_argument(\"--hch\", action=\"store_true\",\n help=\"hch instead of cch\")\n return parser.parse_args()", "def Get_Arguments():\n parser = argparse.ArgumentParser(description=\"Adds batch, species, subspecies \"\n \"columns to popmap file for summarizing ExDFOIL output\",\n add_help=False)\n\n required_args = parser.add_argument_group(\"Required Arguments\")\n optional_args = parser.add_argument_group(\"Optional Arguments\")\n\n ## Required Arguments\n required_args.add_argument(\"-p\", \"--popmap\",\n type=str,\n required=True,\n help=\"String; Tab-separated popmap file: indID\\tpopID\")\n\n ## Optional Arguments\n optional_args.add_argument(\"-b\", \"--batch\",\n type=str,\n required=False,\n default=None,\n nargs=\"?\",\n help=\"Filename containing batchIDs\")\n optional_args.add_argument(\"-S\", \"--species\",\n type=str,\n required=False,\n default=None,\n nargs=\"?\",\n help=\"Filename containing speciesIDs\")\n optional_args.add_argument(\"-s\", \"--subspecies\",\n type=str,\n required=False,\n default=None,\n nargs=\"?\",\n help=\"Filename containing subspeciesIDs\")\n optional_args.add_argument(\"-o\", \"--outfile\",\n type=str,\n required=False,\n default=\"mysampleinfo.txt\",\n nargs=\"?\",\n help=\"Specify output filename; default=mysampleinfo.txt\")\n optional_args.add_argument(\"-h\", \"--help\",\n action=\"help\",\n help=\"Displays this help menu\")\n\n\n args = parser.parse_args()\n\n return args", "def parse_args():\n parser = argparse.ArgumentParser(description=\"Run NCF..\")\n parser.add_argument(\n \"--config_file\",\n nargs=\"?\",\n type=str,\n default=\"../configs/ncf_default.json\",\n help=\"Specify the config file name. Only accept a file from ../configs/\",\n )\n # If the following settings are specified with command line,\n # These settings will used to update the parameters received from the config file.\n parser.add_argument(\n \"--dataset\",\n nargs=\"?\",\n type=str,\n help=\"Options are: tafeng, dunnhunmby and instacart\",\n )\n parser.add_argument(\n \"--data_split\",\n nargs=\"?\",\n type=str,\n help=\"Options are: leave_one_out and temporal\",\n )\n parser.add_argument(\n \"--root_dir\", nargs=\"?\", type=str, help=\"working directory\",\n )\n parser.add_argument(\n \"--emb_dim\", nargs=\"?\", type=int, help=\"Dimension of the embedding.\"\n )\n parser.add_argument(\"--lr\", nargs=\"?\", type=float, help=\"Intial learning rate.\")\n parser.add_argument(\"--max_epoch\", nargs=\"?\", type=int, help=\"Number of max epoch.\")\n parser.add_argument(\n \"--batch_size\", nargs=\"?\", type=int, help=\"Batch size for training.\"\n )\n parser.add_argument(\"--optimizer\", nargs=\"?\", type=str, help=\"OPTI\")\n parser.add_argument(\"--activator\", nargs=\"?\", type=str, help=\"activator\")\n parser.add_argument(\"--alpha\", nargs=\"?\", type=float, help=\"ALPHA\")\n return parser.parse_args()", "def readArgs():\n args = sys.argv\n if len(args) != 3:\n print(\"ERROR - Wrong number of arguments! \\n\")\n print(\"Usage: plotGantt.py TYPE path/to/result/file.gantt \\n where TYPE is : MTS / SCH\")\n exit(5)\n if args[1] != \"MTS\" and args[1] != \"SCH\":\n print(\"ERROR - Wrong type specified! : \" + args[1])\n print(\"Usage: plotGantt.py TYPE path/to/result/file.gantt \\n where TYPE is : MTS / SCH\")\n return args", "def setup_cmd_args():\n parser = argparse.ArgumentParser(description=\"This program will query G-POD and COPHUB on the same datasets, in order to obtain the number of data results, compare them compile a report with the differences.\", formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n # parser.add_argument(\"root_dir\", help=\"The root directory containing data to check\")\n # parser.add_argument(\"--workspace\", help=\"Set Workspace manually\")\n parser.add_argument(\"--outputlist\", help=\"Folder to write the output lists with the un-synced products.\", default=\"c:\\\\temp\\\\\")\n parser.add_argument(\"--daysback\", help=\"Report with a given number of days back from today\", default=0)\n parser.add_argument(\"--dataset\", help=\"Set which dataset to query (chose S3A_SR_1_SRA_A_PREOPS or S3B_SR_1_SRA_A_NTC)\")\n parser.add_argument(\"--startdate\", help=\" The Start Date (format: YYYY-MM-DD) \", default=\"2016-06-01\")\n parser.add_argument(\"--enddate\",help=\" The End Date (format: YYYY-MM-DD)\")\n parser.add_argument(\"--cphubuser\",help=\"COPHUB username\", required=True)\n parser.add_argument(\"--cphubpw\",help=\"COPHUB password\", required=True)\n parser.add_argument(\"-email\", type=str, help=\"Email to send the results\", action=\"append\")\n parser.add_argument('-t', action='store_true', help=\"Today as enddate. Otherwise the last day of the previous month is considered.\")\n parser.add_argument('-n', action='store_true', help=\"Normal numeric check\")\n parser.add_argument('-m', action='store_true', help=\"Monthly check with product listing.\")\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(\n description=\"Check the ERC 20 conformance\", usage=\"fortress-check-erc project contractName\",\n )\n\n parser.add_argument(\"project\", help=\"The codebase to be tested.\")\n\n parser.add_argument(\n \"contract_name\",\n help=\"The name of the contract. Specify the first case contract that follow the standard. Derived contracts will be checked.\",\n )\n\n parser.add_argument(\n \"--erc\",\n help=f\"ERC to be tested, available {','.join(ERCS.keys())} (default ERC20)\",\n action=\"store\",\n default=\"erc20\",\n )\n\n parser.add_argument(\n \"--json\",\n help='Export the results as a JSON file (\"--json -\" to export to stdout)',\n action=\"store\",\n default=False,\n )\n\n # Add default arguments from crytic-compile\n cryticparser.init(parser)\n\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(\n description='Convert environment variables in to a configuration file')\n parser.add_argument('-p',\n '--prefix',\n help='Prefix of env vars to parse',\n required=True)\n parser.add_argument('-f',\n '--format',\n help='Output file format',\n default='ini',\n choices=['ini', 'json'])\n parser.add_argument('-o',\n '--output-file',\n help='Outfile file path',\n default='/dev/stdout')\n parser.add_argument(\n '-r',\n '--reference-file',\n type=argparse.FileType('r'),\n help='Load this reference file for existing/hard coded values')\n\n return parser.parse_args()", "def __parse_args(self):\n for argument in self.args:\n source_arg = re.match(\"^(--source=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n input_arg = re.match(\"^(--input=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n stats_arg = re.match(\"^(--stats=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n help_arg = re.match(\"^--help$\", argument)\n vars_arg = re.match(\"^--vars$\", argument)\n insts_arg = re.match(\"^--insts$\", argument)\n if source_arg:\n self.sourceFile = source_arg.group(2)\n self.passedArgs.append(\"source\")\n elif input_arg:\n self.inputFile = input_arg.group(2)\n self.passedArgs.append(\"input\")\n elif help_arg:\n print(\"napoveda\")\n sys.exit(0)\n elif stats_arg:\n self.statsFile = stats_arg.group(2)\n self.passedArgs.append(\"stats\")\n elif vars_arg:\n self.passedArgs.append(\"vars\")\n if self.first_stat_arg is None:\n self.first_stat_arg = \"vars\"\n elif insts_arg:\n self.passedArgs.append(\"insts\")\n if self.first_stat_arg is None:\n self.first_stat_arg = \"insts\"\n else:\n raise ArgError(\"Unknown argument or format of the argument! (\" + argument + \")\")", "def parse_arguments():\n parser = argparse.ArgumentParser(description='Scraper')\n parser.add_argument('--prefix', help='Prefix for saving files', default=\"\")\n parser.add_argument('--path', help='Dir path', default=\"\")\n parser.add_argument('--urls_path', help='Url path', default=False)\n parser.add_argument('--url', help='Url', default=False)\n parser.add_argument('--disney', dest='disney', action='store_true', help=\"Choose all disney movies\")\n parser.add_argument('--ngram', help='Max ngram', default=2)\n\n args = parser.parse_args()\n return args", "def get_args():\n if len(sys.argv) == 3:\n return sys.argv[1:]\n print(\"USAGE: python3 extract_cds.py infile outfile\\n\\n\")\n exit()", "def main_argv():\n main_parse_args(sys.argv[1:])", "def main_argv():\n main_parse_args(sys.argv[1:])", "def check_argv():\n parser = argparse.ArgumentParser(description=__doc__.strip().split(\"\\n\")[0], add_help=False)\n parser.add_argument(\"keywordslist\", help=\"Numpy output file\")\n parser.add_argument(\"dirs\", help=\"Numpy output file\")\n parser.add_argument(\"npz_train\", help=\"Numpy output file\")\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n return parser.parse_args()", "def parse_command_line():\n parser = argparse.ArgumentParser()\n\n # All reference encoders\n parser.add_argument(\"--step\", dest=\"step\", default=\"10\", type=int, help=\"step size\")\n parser.add_argument(\"--repeats\", dest=\"repeats\", type=int, default=1, help=\"repeats\")\n\n parser.add_argument(dest=\"image\", default=None,\n help=\"select the test image to run\")\n\n args = parser.parse_args()\n return args", "def parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-f\", \"--file\", help=\"file with the cohort you want to check / fix\", type=str, required=True)\n parser.add_argument(\"-o\", \"--outdir\", help=\"where should the files and the result readme be stored?\", type=str, required=True)\n return parser.parse_args()", "def get_args():\n parser = argparse.ArgumentParser(\n description=\"\"\"Assemble raw reads using ABySS\"\"\"\n )\n parser.add_argument(\n \"--output\",\n required=True,\n action=FullPaths,\n default=None,\n help=\"\"\"The directory in which to store the assembly data\"\"\",\n )\n parser.add_argument(\n \"--kmer\", type=int, default=31, help=\"\"\"The kmer value to use\"\"\"\n )\n parser.add_argument(\n \"--cores\",\n type=int,\n default=1,\n help=\"\"\"The number of compute cores/threads to run with Trinity\"\"\",\n )\n parser.add_argument(\n \"--subfolder\",\n type=str,\n default=\"\",\n help=\"\"\"A subdirectory, below the level of the group, containing the reads\"\"\",\n )\n parser.add_argument(\n \"--verbosity\",\n type=str,\n choices=[\"INFO\", \"WARN\", \"CRITICAL\"],\n default=\"INFO\",\n help=\"\"\"The logging level to use\"\"\",\n )\n parser.add_argument(\n \"--log-path\",\n action=FullPaths,\n type=is_dir,\n default=None,\n help=\"\"\"The path to a directory to hold logs.\"\"\",\n )\n parser.add_argument(\n \"--clean\",\n action=\"store_true\",\n default=False,\n help=\"\"\"Cleanup all intermediate Trinity files\"\"\",\n )\n parser.add_argument(\n \"--abyss-se\",\n action=\"store_true\",\n default=False,\n help=\"\"\"Only use abyss-se\"\"\",\n )\n # one of these is required. The other will be set to None.\n input_data = parser.add_mutually_exclusive_group(required=True)\n input_data.add_argument(\n \"--config\",\n type=is_file,\n action=FullPaths,\n default=None,\n help=\"\"\"A configuration file containing reads to assemble\"\"\",\n )\n input_data.add_argument(\n \"--dir\",\n type=is_dir,\n action=FullPaths,\n default=None,\n help=\"\"\"A directory of reads to assemble\"\"\",\n )\n return parser.parse_args()", "def _parse_args():\n parser = argparse.ArgumentParser(description='main.py')\n \n # General system running and configuration options\n parser.add_argument('--do_nearest_neighbor', dest='do_nearest_neighbor', default=False, action='store_true', help='run the nearest neighbor model')\n\n parser.add_argument('--train_path', type=str, default='data/geo_train.tsv', help='path to train data')\n parser.add_argument('--dev_path', type=str, default='data/geo_dev.tsv', help='path to dev data')\n parser.add_argument('--test_path', type=str, default='data/geo_test.tsv', help='path to blind test data')\n parser.add_argument('--test_output_path', type=str, default='geo_test_output.tsv', help='path to write blind test results')\n parser.add_argument('--domain', type=str, default='geo', help='domain (geo for geoquery)')\n \n # Some common arguments for your convenience\n parser.add_argument('--seed', type=int, default=0, help='RNG seed (default = 0)')\n parser.add_argument('--epochs', type=int, default=100, help='num epochs to train for')\n parser.add_argument('--lr', type=float, default=.001)\n parser.add_argument('--batch_size', type=int, default=2, help='batch size')\n # 65 is all you need for GeoQuery\n parser.add_argument('--decoder_len_limit', type=int, default=65, help='output length limit of the decoder')\n\n # Feel free to add other hyperparameters for your input dimension, etc. to control your network\n # 50-200 might be a good range to start with for embedding and LSTM sizes\n args = parser.parse_args()\n return args", "def get_args_from_command_line():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--country_code\", type=str,\n help=\"Country code\",\n default=\"US\")\n parser.add_argument(\"--n_workers\", type=int, help=\"number of workers\",\n default=20)\n parser.add_argument(\"--survey_link\", type=str)\n parser.add_argument(\"--block_size\", help='number of tweets per worker', type=int)\n parser.add_argument(\"--version_number\", type=str)\n parser.add_argument(\"--mode\", type=str, help='Whether to create HIT in sandbox or in production')\n parser.add_argument(\"--language_qualification\", type=int, help='')\n\n args = parser.parse_args()\n return args", "def main():\n args = parse_args()\n process_args(args)", "def parse_arguments():\n\n text_folder = \"the unzipped Slack export directory\"\n text_remote_name = \"keep Slack file IDs instead of using the file names\"\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"folder\", help=text_folder)\n parser.add_argument(\"--remote-name\", help=text_remote_name,\n action=\"store_true\")\n\n arguments = parser.parse_args()\n return arguments", "def command_line_args(parser):\n AbyssAssembler.command_line_args(parser)\n SpadesAssembler.command_line_args(parser)\n TrinityAssembler.command_line_args(parser)\n VelvetAssembler.command_line_args(parser)", "def get_args():\n parser = argparse.ArgumentParser(\n description = \"Make consensus taxonomy out of a usearch tophits map\")\n # Add arguments\n parser.add_argument(\"input\",\n help = \"input file in usearch's UC format.\")\n parser.add_argument(\"-t\",\n \"--tax_separator\",\n help = \"character separating taxonomic levels.\",\n required = True)\n parser.add_argument(\"-s\",\n \"--tax_sense\",\n choices = ['asc', 'desc'],\n help = \"sense of taxonomic levels in your database. 'asc' for lower to higher levels (e.g. ID_Diatomea_Stramenopiles_SAR_Eukaryota), 'desc' for higher to lower levels (e.g. Eukaryota_SAR_Stramenopiles_Diatomea_ID).\",\n required = True)\n parser.add_argument(\"-p\",\n \"--pair_separator\",\n help = \"pair (forward & reverse) character separator. Use this argument to remove redundancies from your dataset (i.e. reads that are represented for both forward and reverse pairs).\",\n required = False,\n default = None)\n parser.add_argument(\"-o\",\n \"--output_file\",\n help = \"path to output file where filtered map should be written. It defaults to `filtered_map.uc`\",\n required = False,\n default = 'filtered_map.uc')\n # Array for all arguments passed to script\n args = parser.parse_args()\n # Assign args to variables\n input = args.input\n tax_separator = args.tax_separator\n tax_sense = args.tax_sense\n pair_separator = args.pair_separator\n outfile = args.output_file\n # Return all variable values\n return input, tax_separator, tax_sense, pair_separator, outfile", "def parse_arguments():\n description = 'Code checkout script for NEMSfv3gfs'\n parser = argparse.ArgumentParser(description=description)\n parser.add_argument('--config', action='store', help='name of checkout config', \n default='checkout_nemsfv3gfs.cfg')\n args = parser.parse_args()\n return args.config", "def parse_arguments():\n p = argparse.ArgumentParser(description='Prepare the dataset for use by neural models.')\n p.add_argument(\"json_file\", type=argparse.FileType('r'), help=\"json file with all the data\")\n p.add_argument(\"prefix\", type=str, help=\"prefix for all the generated files\")\n p.add_argument(\"data_type\", type=str, choices=[\"names\", \"comments\", \"nc\"],\n default=\"nc\", help=\"type of the information recorded in the dataset\")\n p.add_argument(\"labels\", type=str, choices=[\"PROG\", \"ALL\", \"TOP\"],\n default=\"PROG\", help=\"method by which to choose the labels for the dataset\")\n p.add_argument(\"-other_label\", type=str, required=False, default=\"\",\n help=\"label to use instead of all infrequent labels. \"\n \"This can be left blank to ignore infrequent labels altogether\")\n p.add_argument(\"-label_num\", type=int, default=100, required=False,\n help=\"Number of most frequent labels to keep. Works with label_choice=TOP\")\n p.add_argument(\"-min_prog_labels\", type=int, default=5, required=False,\n help=\"Minimal number of programs a label has to appear in for it to be included \"\n \"in the dataset. Works with label_choice=PROG\")\n p.add_argument(\"-test_prog_list\", type=argparse.FileType('r'), default=None, required=False,\n help=\"file with the list of programs in the test set (optional)\")\n\n return p.parse_args(sys.argv[1:])", "def get_args():\n parser = argparse.ArgumentParser(\n description=\"\"\"Remove the UCE locus name from nexus alignments.\"\"\"\n )\n parser.add_argument(\n \"--alignments\",\n required=True,\n action=FullPaths,\n type=is_dir,\n help=\"\"\"The input directory containing nexus files to filter\"\"\",\n )\n parser.add_argument(\n \"--output\",\n required=True,\n action=CreateDir,\n help=\"\"\"The output directory to hold the converted nexus files\"\"\",\n )\n parser.add_argument(\n \"--input-format\",\n choices=[\n \"fasta\",\n \"nexus\",\n \"phylip\",\n \"phylip-relaxed\",\n \"clustal\",\n \"emboss\",\n \"stockholm\",\n ],\n default=\"nexus\",\n help=\"\"\"The input alignment format.\"\"\",\n )\n parser.add_argument(\n \"--output-format\",\n choices=[\n \"fasta\",\n \"nexus\",\n \"phylip\",\n \"phylip-relaxed\",\n \"clustal\",\n \"emboss\",\n \"stockholm\",\n ],\n default=\"nexus\",\n help=\"\"\"The output alignment format.\"\"\",\n )\n parser.add_argument(\n \"--verbosity\",\n type=str,\n choices=[\"INFO\", \"WARN\", \"CRITICAL\"],\n default=\"INFO\",\n help=\"\"\"The logging level to use.\"\"\",\n )\n parser.add_argument(\n \"--log-path\",\n action=FullPaths,\n type=is_dir,\n default=None,\n help=\"\"\"The path to a directory to hold logs.\"\"\",\n )\n parser.add_argument(\n \"--cores\",\n type=int,\n default=1,\n help=\"\"\"Process alignments in parallel using --cores for alignment. \"\"\"\n + \"\"\"This is the number of PHYSICAL CPUs.\"\"\",\n )\n parser.add_argument(\n \"--binary\",\n action=\"store_true\",\n default=False,\n help=\"\"\"Output alignments in binary encoding (A or G) = 1 and (C or T) = 0\"\"\",\n )\n return parser.parse_args()", "def get_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--training', type=argparse.FileType(), default='./data/cdt.epe',\n help='epe file with training sentences.')\n parser.add_argument('--validation', type=argparse.FileType(), default='./data/cdd.epe',\n help='epe file with validation sentences.')\n parser.add_argument('--metrics', type=argparse.FileType(\"w\"), default=sys.stdout,\n help='where to output metrics computed on validation.')\n parser.add_argument('--mode', choices=['BASELINE', 'BASELINE_C', 'BASELINE_C_POS'], default='BASELINE',\n help='mode in which model is trained, C adds cue information, POS adds pos tags.')\n parser.add_argument('--arch', choices=['LSTM', 'GRU'], default='LSTM',\n help='which recurrent network architecture to use.')\n parser.add_argument('--word_vectors', default=None,\n help='file with word embeddings')\n parser.add_argument('--epochs', type=int, default=10,\n help='number of epochs to train the model.')\n parser.add_argument('--lr', type=float, default=1e-4,\n help='learning rate used in adam optimizer.')\n parser.add_argument('--max_len', type=int, default=100,\n help='maximum length of sentence.')\n parser.add_argument('--hidden_size', type=int, default=200,\n help='memory size of recurrent neural network.')\n parser.add_argument('--negations_only', action='store_true', default=False,\n help='if present, only sentences with at least one negation are used.')\n parser.add_argument('--tensorboard', action='store_true', default=False)\n parser.add_argument('--save', default=None, help='directory where to save model.')\n\n return parser.parse_args()", "def get_args():\n parser = argparse.ArgumentParser(description='\"Some Input argu for Generate Fragsize Distribution.')\n parser.add_argument('-i', '--input', required=True, action='store',\n help=\"Input File of .molucule.table\")\n parser.add_argument('-o', '--output', required=True, action='store',\n help=\"Output File of .fragsize_distribution.txt\")\n \n return parser.parse_args()", "def parse_arguments():\n # shift away script name\n scriptname=sys.argv[0]\n shift()\n ncl_cmd=list()\n quali_cmd=list()\n id_cmd=list() \n while(len(sys.argv)>0):\n carg = sys.argv[0]\n shift()\n if(carg == \"--nucleotide\"):\n ncl_cmd = mungeArgs(sys.argv)\n elif(carg == \"--quality\"):\n quali_cmd = mungeArgs(sys.argv)\n elif(carg == \"--id\" ):\n id_cmd = mungeArgs(sys.argv)\n elif(carg in [\"-h\", \"--help\"]):\n usage()\n else:\n usage(error=True)\n # Excess arguments which are not processed \n if(len(sys.argv) > 0):\n sys.stdout.write(\"Excess arguments!\\n\")\n sys.stdout.flush()\n usage(error=True)\n\n # external modules rely on non-empty argv array, \n # re-append the script name as first command line argument\n sys.argv.append(scriptname)\n return (id_cmd, ncl_cmd, quali_cmd)", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--version',\n metavar=\"<str>\",\n help=\"Input data version number\",\n type=str,\n required=True\n )\n args = parser.parse_args()\n return args", "def parse_cmdline_args():\n parser = argparse.ArgumentParser(description=\"Guesses the functional element for host.\")\n ##\n ## Internal options\n ##\n parser.add_argument(\"--json\", dest=\"json\", action='store_true', help=\"output in JSON\")\n\n ##\n ## PuppetDB options\n ##\n pdbconf = PdbConfig()\n pdbconf.add_standard_args(parser)\n\n parser.add_argument(\"host\", metavar=\"HOST\",\n help=\"hostnames to query for FE\")\n\n return parser.parse_args()", "def parse_command_line():\n\n desc = \"Perform fluid dynamics simulations.\"\n parser = argparse.ArgumentParser(description=desc)\n\n # Parameter file\n help_txt = \"name of the configuration file (default is 'config.ini.')\"\n parser.add_argument(\"-f\", \"--file\", metavar=\"FILE\", default=\"config.ini\",\n required=False, dest=\"config_file\", help=help_txt)\n\n return parser.parse_args()", "def get_args(argv):\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"design\", help=\"design file name\")\r\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\",\r\n help=\"print a verbose report\")\r\n return parser.parse_args(argv)", "def parse_args():\n parser = argparse.ArgumentParser(\n description=\"pop-nedry Win64 shellcode build script\"\n )\n\n parser.add_argument(\n '--url', type=str, required=True,\n help='URL for web page hosting the Nedry GIF'\n )\n\n return parser.parse_args()", "def get_args():\n parser = argparse.ArgumentParser(\n description=\"Obstacle avoidance python script.\"\n )\n\n # Required arguments\n parser.add_argument(\"-n\", \"--number\",\n action=\"store\",\n required=False,\n help=\"Add a pheeno number namespace.\",\n default=\"\")\n\n # The rationale behind rospy.myargv()[1:] is provided here:\n # https://groups.google.com/a/rethinkrobotics.com/forum/#!topic/brr-users/ErXVWhRmtNA\n return parser.parse_args(rospy.myargv()[1:])", "def handle_cmdline():\n\n cmdline = ArgumentParser(init_args=['address', 'arch', 'file'],\n address_required=True, address_default=None,\n file_required=True,\n file_help='Flash or memory image to inspect',\n formatter_class=RawDescriptionHelpFormatter,\n usage=_USAGE, description=_DESCRIPTION, epilog=_EPILOG)\n\n cmdline.add_argument('--longhelp',\n choices=['Y', 'N'],\n default=None,\n help=_LONGHELP_TEXT)\n\n cmdline.add_argument('--autocomplete',\n choices=['Y', 'N'],\n default=None,\n help=_AUTOCOMPLETE_TEXT)\n\n cmdline.add_argument('--threshold',\n type=int,\n default=5,\n help='Minimum table size to report. Default: 5')\n\n cmdline.add_argument('--subcmds',\n action='store_true',\n default=False,\n help='Include sub-command tables in displayed results')\n\n cmdline.add_argument('--details',\n action='store_true',\n default=False,\n help='Display more detailed output')\n\n args = cmdline.parse_args()\n\n if args.longhelp is not None:\n args.longhelp = args.longhelp == 'Y'\n\n if args.autocomplete is not None:\n args.autocomplete = args.autocomplete == 'Y'\n\n return args", "def get_args():\n parser = argparse.ArgumentParser(description='Simple dmenu launcher for passwords, notes and application shortcuts.')\n group = parser.add_mutually_exclusive_group()\n\n group.add_argument('--pass', dest='passw', action='store_true',\n help='Copy password from password store.')\n group.add_argument('--apps', action='store_true',\n help='Quick launches a desktop application with exo-open.')\n group.add_argument('--notes', action='store_true',\n help='Opens a text/markdown note from a given directory with exo-open.')\n group.add_argument('--search', action='store_true',\n help='Quick search and launch from a given directory with exo-open.')\n\n if not len(sys.argv) > 1:\n parser.print_help()\n sys.exit(1)\n\n return parser.parse_args()", "def parseArguments():\n # Create argument parser\n parser = argparse.ArgumentParser()\n\n # Optional arguments\n parser.add_argument(\"-t\", \"--test\", help=\"Optionally test algorithm on subsample of the data. Set to 1 for testing\", type=int, default=0)\n\n parser.add_argument(\"--cores\", help=\"Optimized code for a server with a lot of RAM, set to the number of available cores\", type=int, default=40)\n\n\n # Print version\n parser.add_argument(\"--version\", action=\"version\", version='%(prog)s - Version 2.0') #version 1.0 is for the observations in June 2018\n #version 1.1 contains the optimizations made after the june observations (mainly the switch to stackmags)\n #version 1.2 changed sim class to NOT include the list of failed candidates (not qsos)\n #... copied changes made to crossval version\n #version 1.5 added check for duplicate quasars and remove them\n #version 1.6 new simulated quasars (december)\n ##-------------------\n #version 2.0: combined training of classifier and regressor, streamlined input\n #version 2.1: Tryied to updates excluded area to a little more than stripe 82 but decided not to keep it, so no change\n\n # Parse arguments\n args = parser.parse_args()\n\n return args", "def parse_arguments():\n parser = argparse.ArgumentParser(prog='AdapterRunner', description='Adapter Runner Application')\n parser.add_argument('-a', '--application', action='store', dest='app_name', help='Application Name',\n metavar='<application_name>')\n parser.add_argument('-fi', '--fetch_interval', action='store', dest='fetch_stats_interval', help='Fetch Stats Interval',\n metavar='<fetch_interval in seconds>')\n return parser.parse_args()", "def get_arguments() -> argparse.Namespace:\n\n parser = argparse.ArgumentParser(\n description=\"\"\"\n train a network for image classification with Flowers Recognition Dataset.\n \"\"\"\n )\n parser.add_argument(\"config\", type=str, help=\"path of a config file\")\n parser.add_argument(\n \"--resume\",\n action=\"store_true\",\n help=\"Add --resume option if you start training from checkpoint.\",\n )\n parser.add_argument(\n \"--use_wandb\",\n action=\"store_true\",\n help=\"Add --use_wandb option if you want to use wandb.\",\n )\n parser.add_argument(\n \"--debug\",\n action=\"store_true\",\n help=\"Add --debug option if you want to see debug-level logs.\",\n )\n parser.add_argument(\n \"--seed\",\n type=int,\n default=42,\n help=\"random seed\",\n )\n\n return parser.parse_args()", "def parseArgs ():\n independentBaseName = None\n dependentBaseName = None\n independentTSID = None\n dependentTSID = None\n statisticsFile = None\n nEquations = None\n logFile = None\n #\n # Loop through command line arguments\n for arg in sys.argv:\n parts = arg.split('=')\n if ( (parts == None) or (len(parts) != 2) ):\n # Not an arg=value command line argument\n continue\n argName = parts[0].upper()\n argValue = parts[1]\n if ( argName == 'DEPENDENTBASENAME' ):\n dependentBaseName = argValue\n elif ( argName == 'DEPENDENTTSID' ):\n dependentTSID = argValue\n elif ( argName == 'INDEPENDENTBASENAME' ):\n independentBaseName = argValue\n elif ( argName == 'INDEPENDENTTSID' ):\n independentTSID = argValue\n elif ( argName == 'LOGFILE' ):\n logFile = argValue\n elif ( argName == 'NUMBEROFEQUATIONS' ):\n nEquations = int(argValue)\n elif ( argName == 'STATISTICSFILE' ):\n statisticsFile = argValue\n return ( independentBaseName, dependentBaseName, independentTSID, dependentTSID,\n statisticsFile, nEquations, logFile )" ]
[ "0.76032376", "0.7410202", "0.7344581", "0.7313167", "0.7288181", "0.7207524", "0.7184706", "0.71445954", "0.71318215", "0.7104654", "0.70891064", "0.70799434", "0.7070761", "0.7066492", "0.704149", "0.70361483", "0.7026928", "0.7025998", "0.70166886", "0.7002696", "0.70009625", "0.6995386", "0.6970411", "0.69629854", "0.6961684", "0.695171", "0.6934007", "0.69259906", "0.6923635", "0.69190735", "0.6915411", "0.6907029", "0.69052905", "0.6895559", "0.6895273", "0.68860304", "0.6870893", "0.6870171", "0.68686956", "0.686709", "0.6866107", "0.68621945", "0.6860393", "0.68593293", "0.6855721", "0.6850251", "0.68480295", "0.6846014", "0.68434656", "0.68428916", "0.6840806", "0.6838989", "0.68376315", "0.6824273", "0.68240225", "0.6816259", "0.6811111", "0.6810989", "0.68077147", "0.68065774", "0.6803242", "0.6802983", "0.6801771", "0.6801389", "0.68009025", "0.6799708", "0.6799688", "0.67996675", "0.6798891", "0.6797676", "0.67962104", "0.6795101", "0.6795101", "0.6790043", "0.678985", "0.6789695", "0.67883146", "0.6780663", "0.67800266", "0.677976", "0.6778977", "0.6777294", "0.6777132", "0.6776821", "0.6775984", "0.67734987", "0.677336", "0.67733306", "0.67727774", "0.6771131", "0.677112", "0.67675704", "0.6763096", "0.67609656", "0.6759626", "0.6753658", "0.67532927", "0.6753226", "0.6751637", "0.67454183", "0.6743543" ]
0.0
-1
Parse arguments, filter regulon, and print to stdout.
def main(): # get commmand line args args = parse_arguments() adj_file = args.adj # open("UCSC_VIPER/pathways/extended_pathways_transcriptional.adj", "r") # this set isn't actually used in the script, but I was curious... adj_gene_set = set() cutoff_number = args.cutoff_number #cutoff_percent = args.cutoff_percent expr_gene_file = args.expr_genes #open("stanford_batchK1-12.HUGO_only_genes.lst", 'r') expr_genes = [line.strip() for line in expr_gene_file] # for each line, check that the regulator and other genes are in the # expression matrix gene set. if not, remove them, or remove the whole # line if the regulator isn't in the set or if too few genes remain for line in adj_file: line_list = line.strip().split() regulator_plus_gene_list = [x for x in line_list if x !="1.0"] regulator = regulator_plus_gene_list[0] if regulator not in expr_genes: # remove the whole regulator + regulon print("Skipped a line (regulator not in expr genes): ", line_list[0], file=sys.stderr) continue gene_list = regulator_plus_gene_list[1:] list_size = len(gene_list) adj_gene_set.update(gene_list) how_many_to_remove= 0 good_genes = [] for gene in gene_list: if gene not in expr_genes: how_many_to_remove += 1 else: good_genes.append(gene) #print("\n") #pdb.set_trace() #if (100-how_many_to_remove/list_size*100 < cutoff_percent) and (list_size-how_many_to_remove < cutoff_number): if (list_size-how_many_to_remove < cutoff_number): print("Skipped a line (too many removed): ", line_list[0], file=sys.stderr) else: # re-generate the new line of the .adj file with kept genes #genes_to_print = good_genes.insert(0, regulator) regulated_genes = "\t1.0\t".join(good_genes) print(regulator+"\t"+regulated_genes+"\t1.0")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')", "def main():\n filter_freq = 1.e4\n re_sample_freq = 1.e5\n glob_search = '*.log'\n\n # parse the command line arguments\n parser = argparse.ArgumentParser(description=\"Filters files in a directory based on a file extension.\")\n parser.add_argument('-d', '--directory', type=str, nargs=1,\n help=\"directory of files to filter. Default is the current directory.\")\n parser.add_argument('-ff', '--filter-freq', type=float, nargs=1,\n help=\"low-pass filter frequency cutoff. Default is {0} Hz\".format(filter_freq))\n parser.add_argument('-osr', '--out-sample-rate', type=float, nargs=1,\n help=\"output sample rate. Default is {0} Hz\".format(re_sample_freq))\n parser.add_argument('-g', '--glob', type=str, nargs=1,\n help=\"Unix pattern to search for files in the directory. Default is \\'*.log\\', which finds all\"\n \" files with a '.log' extension. Must surround with quotes.\")\n parser.add_argument('-r', '--recursive', action='store_true',\n help=\"search for files recursively.\")\n args = parser.parse_args()\n\n directory = '.'\n # Use the command line arguments to set our variables, if necessary.\n if args.directory is not None:\n directory = args.directory[0]\n\n if args.filter_freq is not None:\n filter_freq = args.filter_freq[0]\n\n if args.out_sample_rate is not None:\n re_sample_freq = args.out_sample_rate[0]\n\n if args.glob is not None:\n glob_search = args.glob[0]\n print glob_search\n\n # find all of the files in the current directory with .log extension.\n files = []\n for root, dirname, filenames in os.walk(directory):\n for filename in fnmatch.filter(filenames, glob_search):\n files.append(os.path.join(root, filename))\n # Only do top level directory, unless recursive is specified.\n if not args.recursive:\n break\n\n print \"Filter frequency: {0} Hz\".format(filter_freq)\n print \"Output sample frequency: {0} Hz\".format(re_sample_freq)\n print \"Glob search: {0}\".format(glob_search)\n print \"Recursive: {0}\".format(args.recursive)\n print \"Filtering these files:\", files\n print \"\\n----------------------------\\n\"\n\n p = Pool()\n\n # add the file names and filter frequency and output sample rate to a tuple to pass in multiprocessing\n pool_args = []\n for filename in files:\n tup = (filename, filter_freq, re_sample_freq)\n pool_args.append(tup)\n\n # filter each file\n output_file_names = p.map(_filter_wrap, pool_args)\n\n print \"\\n----------------------------\\n\"\n print \"Output files:\", output_file_names", "def usage():\r\n print(\"Usage:\", end=' ')\r\n print(\"lab4.py <search directory> <regular expression filter>\")", "def output_main(args):\n\t#clean input file (fold and remove escape chars)\n\treference = clean_fasta(args.infile)\n\tfilterthreshold = args.threshold\n\t#look up proper readset using readset module\n\treadset = args.readset\n\t#if readset is in fasta format, inject fake quality scores\n\t\n\t#run bwa\n\tsamfile = run_bwa(reference, readset)\n\t#convert sam to bam file, and sort\n\tsortedbam = sam_to_sorted_bam(reference, samfile)\n\t#run variant caller freebayes\n\tvcffile = run_var_caller(reference, sortedbam)\n\t#run hapcut suite\n\thapoutfile = run_haplotyper(reference, vcffile, sortedbam, filterthreshold)\n\t#convert hapcut output to sequence and gff\n\tcalls_to_gff(reference, hapoutfile)", "def main():\r\n arguments = collections.deque(sys.argv)\r\n arguments.popleft()\r\n file_name = arguments.popleft()\r\n if len(arguments) == 0:\r\n print(\"Please enter filter commands to run the program. (host, port, ip, tcp, udp, icmp, net)\")\r\n sys.exit()\r\n packets_info = read_and_store_pcap(file_name)\r\n handle_commands(packets_info, arguments)", "def main():\n parser = argparse.ArgumentParser(\n description='Executes a filter from the command-line. Calls JVM start/stop automatically.')\n parser.add_argument(\"-j\", metavar=\"classpath\", dest=\"classpath\", help=\"additional classpath, jars/directories\")\n parser.add_argument(\"-X\", metavar=\"heap\", dest=\"heap\", help=\"max heap size for jvm, e.g., 512m\")\n parser.add_argument(\"-i\", metavar=\"input1\", dest=\"input1\", required=True, help=\"input file 1\")\n parser.add_argument(\"-o\", metavar=\"output1\", dest=\"output1\", required=True, help=\"output file 1\")\n parser.add_argument(\"-r\", metavar=\"input2\", dest=\"input2\", help=\"input file 2\")\n parser.add_argument(\"-s\", metavar=\"output2\", dest=\"output2\", help=\"output file 2\")\n parser.add_argument(\"-c\", metavar=\"classindex\", default=\"-1\", dest=\"classindex\", help=\"1-based class attribute index\")\n parser.add_argument(\"filter\", help=\"filter classname, e.g., weka.filters.AllFilter\")\n parser.add_argument(\"option\", nargs=argparse.REMAINDER, help=\"additional filter options\")\n parsed = parser.parse_args()\n if parsed.input2 is None and not parsed.output2 is None:\n raise Exception(\"No second input file provided ('-r ...')!\")\n\n jars = []\n if not parsed.classpath is None:\n jars = parsed.classpath.split(os.pathsep)\n params = []\n if not parsed.input1 is None:\n params.extend([\"-i\", parsed.input1])\n if not parsed.output1 is None:\n params.extend([\"-o\", parsed.output1])\n if not parsed.input2 is None:\n params.extend([\"-r\", parsed.input2])\n if not parsed.output2 is None:\n params.extend([\"-s\", parsed.output2])\n if not parsed.classindex is None:\n params.extend([\"-c\", parsed.classindex])\n\n jvm.start(jars, max_heap_size=parsed.heap, packages=True)\n\n logger.debug(\"Commandline: \" + utils.join_options(sys.argv[1:]))\n\n try:\n flter = Filter(parsed.filter)\n if len(parsed.option) > 0:\n flter.set_options(parsed.option)\n loader = Loader(classname=\"weka.core.converters.ArffLoader\")\n in1 = loader.load_file(parsed.input1)\n cls = parsed.classindex\n if str(parsed.classindex) == \"first\":\n cls = \"0\"\n if str(parsed.classindex) == \"last\":\n cls = str(in1.num_attributes() - 1)\n in1.set_class_index(int(cls))\n flter.set_inputformat(in1)\n out1 = flter.filter(in1)\n saver = Saver(classname=\"weka.core.converters.ArffSaver\")\n saver.save_file(out1, parsed.output1)\n if not parsed.input2 is None:\n in2 = loader.load_file(parsed.input2)\n in2.set_class_index(int(cls))\n out2 = flter.filter(in2)\n saver.save_file(out2, parsed.output2)\n except Exception, e:\n print(e)\n finally:\n jvm.stop()", "def main():\n\n try:\n args = parse()\n\n is_match = match(args.regex, args.text)\n print(is_match)\n except Exception as err:\n # Print catch-all error message\n print(f\"[{type(err).__name__}]: {err}.\", file=sys.stderr)", "def main():\n lines = read_syslog()\n if len(sys.argv) > 1:\n lines = filter_logs(sys.argv[1], lines)\n for line in lines:\n print(line)", "def do_scan(self, args):\n args = args.split()\n if len(args) != 1:\n print 'usage: scan pat'\n return\n pat = args[0]\n print 'pat: \"%s\"' % pat\n self.regexprutils.scan(pat)", "def commandEcho(*args, addFilter: List[AnyStr]=None, filter: Union[List[AnyStr], bool]=None,\n lineNumbers: bool=True, state: bool=True, q=True, query=True,\n **kwargs)->Union[None, Any]:\n pass", "def main():\n # Default input parameters\n nelx, nely, volfrac, penalty, rmin, ft = cli.parse_args()\n cli.main(nelx, nely, volfrac, penalty, rmin, ft)\n # Vary the filter radius\n for scaled_factor in [0.25, 2]:\n cli.main(nelx, nely, volfrac, penalty, scaled_factor * rmin, ft)\n # Vary the penalization power\n for scaled_factor in [0.5, 4]:\n cli.main(nelx, nely, volfrac, scaled_factor * penalty, rmin, ft)\n # Vary the discreization\n for scale_factor in [0.5, 2]:\n cli.main(int(scale_factor * nelx), int(scale_factor * nely),\n volfrac, penalty, rmin, ft)", "def __main__():\n parser = argparse.ArgumentParser(description='basic output parser', usage='%(prog)s -i input.xml -o output.csv')\n parser.add_argument('--input', '-i', dest='infile', help='file to input xml from')\n parser.add_argument('--output', '-o', dest='outfile', default='output.csv', help='file to output csv to')\n parser.add_argument('--version', '-v', action='version', version='%(prog)s 0.1')\n args = parser.parse_args()\n axmlfile = args.infile\n acsvfile = args.outfile\n\n if not args.infile:\n sys.exit(parser.print_help())\n\n dosomeworkslacker(axmlfile, acsvfile)", "def main(self):\n\n argprs = argvparse.Argparse()\n\n lines = []\n\n if not argprs.files:\n self.read_user_input()\n lines.append(self.commandline)\n self.send_lines_to_finditer(argprs.regex, lines,\n argprs.underscore, argprs.color, argprs.machine)\n else:\n # print argprs.files\n for fl in argprs.files:\n try:\n filerd = fileread.Fileread(fl)\n self.send_lines_to_finditer(argprs.regex, filerd.lines,\n argprs.underscore, argprs.color, argprs.machine,\n filerd.shortfilename)\n except Exception as e:\n print str(e), \"\\n\"", "def filter(self, *args):\n exps = []\n\n #this ones can be improved. some are not getting extracted\n if \"retweets\" in args:\n exps.append(re.compile(\"^RT ?(?<=^|(?<=[^a-zA-Z0-9-_\\.]))@([A-Za-z]+[A-Za-z0-9-_]+):\"))\n if \"emoticons\" in args:\n exps.append(\"emoticons\")\n if \"flags\" in args:\n exps.append(re.compile(u\"[\\U0001F1E6-\\U0001F1FF]\"))\n if \"handles\" in args:\n # Handles at start of string\n exps.append(re.compile(\"^\\s*((?<=^|(?<=[^\\S]))@([\\S]+)\\s*)*\"))\n # Handles at end of string\n exps.append(re.compile(\"\\s+((?<=^|(?<=[^\\S]))@(\\S+)\\s*)*$\"))\n if \"urls\" in args:\n exps.append(re.compile(\"(https?|ftp)://[^\\s/$.?#].[^\\s]*\"))\n if \"hashtags\" in args:\n # Hastags at start of string\n exps.append(re.compile(\"^\\s*((?<=^|(?<=[^\\S]))#([\\S]+)\\s*)*\"))\n # Hashtags at end of string\n exps.append(re.compile(\"\\s+((?<=^|(?<=[^\\S]))#(\\S+)\\s*)*$\"))\n\n\n # Use all filters\n if \"*\" in args and not exps:\n return self.filter(\"retweets\", \"emoticons\", \"flags\", \"handles\", \"urls\", \"hashtags\")\n\n filtering_text = self.raw_text\n\n for expression in exps:\n if expression == \"emoticons\":\n filtering_text = ''.join(c for c in filtering_text if c not in emoji.UNICODE_EMOJI)\n else:\n filtering_text = re.sub(expression, \"\", filtering_text)\n\n # Remove extra spaces\n self.clean_text = re.sub(r\"\\s\\s+\", ' ', filtering_text.strip())\n return self.clean_text", "def main():\n inputs = []\n files = set()\n\n args = parseArguments()\n\n # Configure the stdout logger\n logging.basicConfig(format=\"%(filename)s: %(levelname)s: %(message)s\",\n level=logging.DEBUG)\n\n try:\n # Create a list of input format objects\n for gcsv in args.gcsv.split():\n inputs.append(GoogleCSV(gcsv))\n for plain in args.plain.split():\n inputs.append(Plain(plain))\n\n # Get the URLs\n urls = mergeURLS(inputs)\n\n # Get the files\n for dir in args.dirs.split():\n files = files.union(formatFiles(dir, args.utc, args.ext))\n\n # Search for matches\n redirects = fuzzySearch(urls, files, args.matches, args.cutoff)\n\n except Exception as e:\n logging.error(e)\n\n if args.output == \"csv\":\n out = CSV(redirects, args.subdomain)\n elif args.output == \"rack\":\n out = Rack(redirects, args.subdomain)\n else:\n out = OutputFormat(redirects, args.subdomain)\n\n print(out)", "def argParse():\n p = ap.ArgumentParser()\n p.add_argument('field',\n help='Name of field')\n p.add_argument('telescope',\n help='Name of telescope',\n choices=['io', 'callisto', 'europa',\n 'ganymede', 'artemis', 'saintex',\n 'nites', 'rcos20'])\n p.add_argument('filt',\n help='Name of filter')\n return p.parse_args()", "def filter_macro(out, scope, args, children):\n len(args) == 2 or syntax_error(\"'filter' macro takes exactly 2 arguments.\")\n regex, l = args\n if not isinstance(l, list):\n syntax_error(\"Invalid list in 'filter' macro: '%s'\" % str(list))\n if not isinstance(regex, str):\n syntax_error(\"Invalid regex in 'filter' macro: '%s'\" % str(regex))\n def match(s):\n return re.search(regex, s)\n return list(filter(match, l))", "def main():\n\tparser = setup_argument_parser()\n\targuments = parser.parse_args()\n\tto_print = arguments.to_print\n\techo(to_print)", "def main():\n args = parse_args()\n process_args(args)", "def pipeline_runner():\n # file_parser() # take raw data file and extract columns of interest. remove contaminants.\n entry_parser() # remove duplicates, faulty lines and format the whole thing normally.\n lfq_parser() # replace 0s in lfq reading with random small numbers for t testing purposes\n # open Rstudio and do T testing there\n ROutputFormatter() # reformat R output to something more appealing, add FDR and fold change values", "def main():\n usage = \"usage: %prog [options] input domain ... domain2\"\n parser = OptionParser(usage=usage)\n\n (options, args) = parser.parse_args()\n\n if len(args) < 2:\n parser.print_help()\n return 2\n\n # do stuff\n wl = makewhitelist(args[0])\n for domain in args[1:]:\n print('%s is %s' % (domain, 'whitelisted' if whitelisted(domain) else 'not whitelisted'))", "def main():\n widget = ParseGrypeJSON()\n logging.debug(f'argv {\",\".join(sys.argv)}')\n\n if len(sys.argv) > 1:\n widget.filename(sys.argv[1])\n\n sys.exit(widget.report())", "def main():\n program_name = os.path.basename(sys.argv[0])\n\n _initialize_debugging(program_name)\n _handle_signals()\n _process_environment_variables()\n arguments = _process_command_line()\n\n exit_status = 1 # no match\n if arguments:\n for filename in arguments:\n if os.path.isfile(filename):\n if not parameters[\"No formatting\"]:\n print(filename + \":\")\n for _, printable_string in strings.strings(filename):\n if what_in_string(printable_string):\n exit_status = 0 # match found\n if parameters[\"First match only\"]:\n break\n else:\n logging.error('\"%s\": No such file or directory', filename)\n elif parameters[\"Stdin unused\"]:\n logging.critical(\"At least one filename expected\")\n else:\n for _, printable_string in strings.strings():\n if what_in_string(printable_string):\n exit_status = 0 # match found\n if parameters[\"First match only\"]:\n break\n\n sys.exit(exit_status)", "def run_fenrichment(args):\n filter_enrichment(args)", "def main():\n\t\n\tglobal debug\n\tct = 0\n\tfor opt in sys.argv[1:]:\n\t\tif opt[0] != \"-\": break\n\t\tct = ct + 1\n\t\tif opt == \"-d\": debug = True\n\tif len(sys.argv) < 2+ct:\n\t\tprint (\"Usage: %s filename\" % sys.argv[0])\n\t\treturn\n\tparse(\"\".join(mklines(sys.argv[1+ct])))\n\treturn", "def main():\n if len(sys.argv) > 1:\n CFS3Site().help()\n else:\n print(sceptre_handler(dict()))", "def get_filters():\n \n \"\"\"\"\"\"\"\"\n \n \"\"\"Messeges to genrate filters\"\"\"\n\tnote_messege = 'In this project, we make use of Python to explore data related to bike share systems for three major cities in the United States\\n'\n welcome_messege = 'Hello! Let\\'s explore some US bikeshare data!\\n'\n enter_city_name_messege = 'Which city would you like to filter by? Chicago, New York City or Washington? '\n filter_definition_messege = '\\nWould you like to filter the data by - \\n1. Month\\n2. Day\\n3. Both\\n4. No Filter\\n\\nPlease choose the appropriate filter name.\\nNote: Incorrect filter name will result as \\'no filter selected\\' by the user.\\n'\n enter_filter_messege = 'Desired filter (e.g: Month, Day, Both or No Filter): '\n enter_month_name_messege = 'Enter month name (e.g: january, february, march, april, may or june): '\n enter_day_name_messege = 'Enter day of the week (e.g: monday, tuesday, wednesday, thursday, friday, saturday, sunday): '\n exception_messege = '\\nWarning! That is not a valid input.\\n'\n warning_city_name_messege = '\\nWarning! Invalid city name. Select city name from the following cities only - Chicago, New York City or Washington.' \n warning_month_name_messege = '\\nWarning! Invalid month name. Select month name from the following months only - january, february, march, april, may or june'\n warning_day_name_messege = '\\nWarning! Invalid day name. Select day name from the following days only - monday, tuesday, wednesday, thursday, friday, saturday, sunday'\n \"\"\"\"\"\"\"\"\n \n \"\"\"City, Month and Day List\"\"\"\n city_list = ['chicago', 'new york city', 'washington']\n month_list = ['january', 'february', 'march', 'april', 'may', 'june']\n day_list = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']\n \"\"\"\"\"\"\"\"\n \n\tprint(note_messege)\n print(welcome_messege)\n \n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs \n while True:\n try:\n city = input(enter_city_name_messege)\n break\n except:\n print(exception_messege)\n \n while city.lower() not in city_list:\n while True:\n try: \n print(warning_city_name_messege)\n city = input(enter_city_name_messege)\n break\n except:\n print(exception_messege)\n \n print(filter_definition_messege)\n while True:\n try:\n filter_choice = input(enter_filter_messege)\n break\n except:\n print(exception_messege)\n while True: \n if filter_choice.lower() == 'month':\n # TO DO: get user input for month (all, january, february, ... , june)\n while True:\n try:\n month = input(enter_month_name_messege) \n break\n except:\n print(exception_messege)\n while month.lower() not in month_list:\n while True:\n try: \n print(warning_month_name_messege)\n month = input(enter_month_name_messege) \n break\n except:\n print(exception_messege)\n day = 'all'\n break\n \n elif filter_choice.lower() == 'day':\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday) \n while True:\n try:\n day = input(enter_day_name_messege)\n break\n except:\n print(exception_messege)\n while day.lower() not in day_list:\n while True:\n try: \n print(warning_day_name_messege)\n day = input(enter_day_name_messege) \n break\n except:\n print(exception_messege)\n month = 'all'\n break\n \n elif filter_choice.lower() == 'both':\n # TO DO: get user input for month (all, january, february, ... , june)\n while True:\n try:\n month = input(enter_month_name_messege)\n break\n except:\n print(exception_messege)\n while month.lower() not in month_list:\n while True:\n try: \n print(warning_month_name_messege)\n month = input(enter_month_name_messege) \n break\n except:\n print(exception_messege)\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n while True:\n try:\n day = input(enter_day_name_messege)\n break\n except:\n print(exception_messege)\n while day.lower() not in day_list:\n while True:\n try: \n print(warning_day_name_messege)\n day = input(enter_day_name_messege) \n break\n except:\n print(exception_messege)\n break\n \n else:\n month = 'all'\n day = 'all'\n break\n \n\n print('-'*40)\n return city.lower(), month.lower(), day.lower()", "def main():\n amplitute_variation = [0.98, 1.02]\n frequency_variation = [0, 0.06]\n transition_band = [(0.1*math.pi), (0.4*math.pi)]\n (passband, stopband, transition_band_diff) = set_diffs(\n amplitute_variation, frequency_variation, transition_band)\n omega_c = np.mean(transition_band)\n dB = to_dB(stopband)\n windowing_type = choose_window_type(dB)\n M = get_magnetude(transition_band_diff, windowing_type)\n result_filter = create_filter(M, omega_c, windowing_type)\n print('Filter: {0}\\nNormalized_filter: {1}'.format(\n result_filter, normalize(result_filter)))", "def get_data_filter(args):\n diff_data(args, \"filter\")", "def main(args):\n parser = create_parser()\n\n if not args:\n parser.print_usage()\n sys.exit(1)\n\n parsed_args = parser.parse_args(args)\n\n urls = scrape_urls(parsed_args.webpage)\n emails = scrape_emails(parsed_args.webpage)\n phones = scrape_phones(parsed_args.webpage)\n\n if urls:\n print(\"\\nURLS:\\n\\n\", '\\n'.join(urls))\n else:\n print(\"\\nURLS:\\n\\nNone\")\n\n if emails:\n print(\"\\nEMAILS:\\n\\n\", '\\n'.join(emails))\n else:\n print(\"\\nEMAILS:\\n\\nNone\")\n\n if phones:\n print(\"\\nPHONE NUMBERS:\\n\\n\", '\\n'.join(phones))\n else:\n print(\"\\nPHONE NUMBERS:\\n\\nNone\")", "def main(args):\n p = OptionParser()\n p.add_option('-d', '--debug',\n action='store_true', default=False, dest='debug',\n help='debug')\n p.add_option('-w', '--w3c',\n action='store_true', default=False, dest='w3c',\n help='send file to validator.w3.org')\n p.add_option('-r', '--rm',\n action='store_true', default=False, dest='passrm',\n help='rm validation output on pass')\n p.add_option('-v', '--verbose',\n action='store_true', default=False, dest='verbose',\n help='more output')\n (o, a) = p.parse_args(args)\n \n if o.debug: pdb.set_trace()\n\n verbose(o.verbose)\n \n if 1 < len(a):\n flist = a[1:]\n else:\n flist = glob.glob(\"*.html\")\n\n for filename in flist:\n if verbose(): print filename\n if o.w3c:\n w3c_validate(filename)\n else:\n check_file(filename)\n\n sys.exit(exit_value())", "def set_filter():\n try:\n #=======================================================================\n # isofilter=[arg.partition('=')[-1] for arg in argv if 'atomfilter=' in arg][0][1:-1].split(',')\n #=======================================================================\n isofilter = config.arg('atomfilter')[1:-1].split(',')\n isofilter = [f.split(':') for f in isofilter]\n for f in isofilter:\n if len(f) < 2:\n f.append('True')\n if len(f) < 3:\n f.append('True')\n if len(f) < 4:\n f.append('None')\n except:\n isofilter = [['element', 'H', 'True', 'None']]\n try:\n #=======================================================================\n # isopartnerfilter=[arg.partition('=')[-1] for arg in argv if 'partnerfilter=' in arg][0][1:-1].split(',')\n #=======================================================================\n isopartnerfilter = config.arg('partnerfilter')[1:-1].split(',')\n isopartnerfilter = [f.split(':') for f in isopartnerfilter]\n for f in isopartnerfilter:\n if len(f) < 2:\n f.append('True')\n if len(f) < 3:\n f.append('True')\n if len(f) < 4:\n f.append('None')\n except:\n isopartnerfilter = [['None', 'None', 'None', 'None']]\n return isofilter, isopartnerfilter\n isofilterlist = []\n isopartnerfilterlist = []\n for i in xrange(len(isofilter) / 2):\n isofilterlist.append(tuple(isofilter[2 * i:2 * i + 2]))\n for i in xrange(len(isopartnerfilter) / 2):\n isopartnerfilterlist.append(tuple(isopartnerfilter[2 * i:2 * i + 2]))\n\n return [isofilterlist, isopartnerfilterlist]", "def main():\n arg_parser = argparse.ArgumentParser(description=\"\"\"\n This utility will take a SAM alignment file from paired end reads \n and filter the original read FASTQ files do those reads without\n high-likelihood alignments to human.\n For gzipped alignments, consider using pipes: \n gunzip -c ref.fna.gz | strip_mt_ebv.py | gzip > ref.nomtebv.fna.gz\n \"\"\")\n\n arg_parser.add_argument(\n '--alnfile', '-A',\n type=argparse.FileType('r'),\n help='Alignment File. Can be stdin. For gzip, consider pipes',\n default=sys.stdin\n )\n arg_parser.add_argument(\n '--r1in', '-1',\n required=True,\n help='Input fastq file for R1'\n )\n arg_parser.add_argument(\n '--r2in', '-2',\n required=True,\n help='Input fastq file for R2'\n )\n arg_parser.add_argument(\n '--r1out', '-o1',\n required=True,\n help='Output fastq file for R1'\n )\n arg_parser.add_argument(\n '--r2out', '-o2',\n required=True,\n help='Output fastq file for R2'\n )\n arg_parser.add_argument(\n '--mapq',\n default=30,\n type=int,\n help='Minimum mapq required to be considered a valid read'\n )\n arg_parser.add_argument(\n '--cov_min',\n type=float,\n default=0.9\n )\n\n args = arg_parser.parse_args()\n\n passed_ids = get_passing_ids(\n args.alnfile,\n args.mapq,\n args.cov_min,\n )\n\n filter_fastq(\n passed_ids,\n args.r1in,\n args.r2in,\n args.r1out,\n args.r2out\n )", "def filterRansac():\n pass", "def filt(rec):\n return True # Show everything", "def main(argv):\n ret = 0\n parser = utils.parser.createParser()\n\n opt = parser.parse_args(argv)\n\n try:\n # Stop execution if help flag is on\n if opt.help:\n raise ShowHelpException()\n\n # Read materials we need for later processing\n cfg = auxiliary.readConfig(opt.config)\n rawJson = auxiliary.readJson(opt.input[0])\n\n # List we are going to manipulate :)\n commands = rawJson['commands']\n\n # Pump commands and instruction for filtering, sorting etc\n rawJson['commands'] = pump(commands, cfg)\n\n # Write final result\n auxiliary.writeJSONFile(rawJson, opt.output[0])\n\n except ShowHelpException:\n parser.print_help()\n ret = 0\n except FilterException, e:\n ret = 1\n print('ERROR')\n print(e)\n\n return ret", "def main():\n argument_parser = argparse.ArgumentParser()\n argument_parser.add_argument(\"name\", nargs=\"+\",\n help=\"DNS name(s) to look up\")\n argument_parser.add_argument(\"-v\", \"--verbose\",\n help=\"increase output verbosity\",\n action=\"store_true\")\n program_args = argument_parser.parse_args()\n fuckall = []\n for a_domain_name in program_args.name:\n if a_domain_name not in fuckall:\n print_results(collect_results(a_domain_name))\n fuckall.append(a_domain_name)", "def filter(self, filters):", "def main(args):\n # Results: print to console and also write to output file\n pass", "def main():\n try:\n (whitelist, strict, lists, addressbooks, folders, exclusions) = parseOptions()\n except Exception, e:\n usage(e)\n sys.exit(1)\n\n try:\n addresses = set()\n for i in lists:\n result = parseHandList(i)\n addresses |= result\n for i in addressbooks:\n result = parseMuttAddressbook(i)\n addresses |= result\n for i in folders:\n result = parseMboxFolder(i)\n addresses |= result\n for i in exclusions:\n result = parseHandList(i)\n addresses -= result\n if strict:\n open(whitelist, \"w\").writelines([\"^\"+key.replace('.', '\\\\.')+'$\\n' for key in sorted(addresses)])\n else:\n open(whitelist, \"w\").writelines([key+'\\n' for key in sorted(addresses)])\n except Exception, e:\n print \"Error generating whitelist: %s\" % e\n sys.exit(2)", "def run(argv=sys.argv[1:]):\n clparser = argparse.ArgumentParser(description='Determine whether there' +\n ' are traces of helium in a given spectrum.')\n clparser.add_argument('-v', '--version', action='version',\n version='%(prog)s ' + __version__)\n clparser.add_argument('-a', '--plot-all', action='store_true',\n help='draw plot showing all the lines found in spectrum')\n clparser.add_argument('-p', '--plot', action='store_true',\n help='draw plot showing helium lines in spectrum')\n clparser.add_argument('filenames', nargs='+',\n help='spectrum files to process')\n clparser.add_argument('--verbose', action='store_true',\n help='verbose output (prints lines and signal to noise ratio)')\n clparser.add_argument('-t', '--threshold', nargs='?', type=float,\n const=1.0, default=1.0,\n help='a signal raises that many times above the background noise')\n args = clparser.parse_args(argv)\n\n for fname in args.filenames:\n find_helium(fname, plot=args.plot, plot_all=args.plot_all,\n verbose=args.verbose, threshold=args.threshold)", "def main(args):", "def main(args):", "def testUsingFilterTool(self):\n pass", "def __main__(*args):\n config = parse_args(args)\n validate_config(config)\n apply_registration_settings(config)", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n text_OK = False\n city_conf_text = 'Chosen city is {}? Enter \"y\" to confirm. '\n while True:\n city = input('\\nFor which city would you like to run an evaluation? Chicago (CH), New York City (NY) or Washington (WA)? \\n\\\nPlease Enter abbreviation. ')\n if city[0:2].lower() == 'ch':\n city = 'chicago'\n text_OK = True\n elif city[0:2].lower() == 'ny':\n city = 'new york city'\n text_OK = True\n elif city[0:2].lower() == 'wa':\n city = 'washington'\n text_OK = True\n else:\n print('\\nInput for city selection could not be evaluated. Please enter valid abbreviation and check for typing errors. ')\n conf_request = input(cancel_text)\n if conf_request.lower() == 'n':\n return\n if text_OK == True:\n conf_input = input(city_conf_text.format(city.title()))\n if conf_input.lower() == 'y':\n break\n else:\n text_OK = False\n conf_request = input(cancel_text)\n if conf_request.lower() == 'n':\n return\n\n # get user input for month (all, january, february, ... , june)\n text_OK = False\n month_conf_text = 'Chosen month(s) is {}? Enter \"y\" to confirm. '\n while True:\n month = input('\\nFor which month(s) would you like to run an evaluation?\\n\\\nThe following months are available: January (Jan), February (Feb), March (Mar), April (Apr), May (May), June (Jun) or all (all). \\n\\\nPlease enter abbreviation. ')\n # months list transferred to main program head in order to make it available for call of other functions\n for i in range(len(months)):\n if month[0:3].lower() == months[i][0:3]:\n month_name = months[i].title()\n month = i+1\n text_OK = True\n break\n if text_OK == True:\n conf_input = input(month_conf_text.format(month_name.title()))\n if conf_input.lower() == 'y':\n break\n else:\n text_OK = False\n conf_request = input(cancel_text)\n if conf_request.lower() == 'n':\n return\n else:\n print('\\nInput for month selection could not be evaluated. Please check for typing errors. ')\n conf_request = input(cancel_text)\n if conf_request.lower() == 'n':\n return\n\n # get user input for day of week (all, monday, tuesday, ... sunday)\n text_OK = False\n day_conf_text = 'Chosen day(s) of week is {}? Enter \"y\" to confirm. '\n day_except_text = 'Please enter an integer number. '\n days = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday', 'all']\n while True:\n day = input('\\nFor which day(s) would you like to run an evaluation?\\n\\\nPlease enter specific day as number (Monday = 1 ... Sunday = 7) or enter \"8\" to select all. ')\n try:\n for i in range(len(days)):\n if (int(day)-1) == i:\n week_day = days[int(day)-1].title()\n text_OK = True\n break\n if text_OK == True:\n conf_input = input(day_conf_text.format(week_day.title()))\n if conf_input.lower() == 'y':\n if day == '5':\n print(\"Thank god it's friday !\")\n time.sleep(2)\n break\n else:\n text_OK = False\n conf_request = input(cancel_text)\n if conf_request.lower() == 'n':\n return\n else:\n print('\\nInput for day selection could not be evaluated. Please check for typing errors.')\n conf_request = input(cancel_text)\n if conf_request.lower() == 'n':\n return\n except Exception as e:\n print('Exception occurred: {}'.format(e))\n print(day_except_text)\n conf_request = input(cancel_text)\n if conf_request.lower() == 'n':\n return\n print('-'*40)\n return city, month, week_day", "def do_extract(self, args):\n args = args.split()\n if len(args) != 1:\n print 'usage: scan pat'\n return\n pat = args[0]\n self.regexprutils.extract(pat)", "def main(argv):\n\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n parser = parse.parse_agglo_from_labelmask(parser)\n parser = parse.parse_common(parser)\n args = parser.parse_args()\n\n agglo_from_labelmask(\n args.inpufile,\n args.labelvolume,\n args.ratio_threshold,\n args.outputfile,\n args.save_steps,\n args.protective,\n )", "def office_prefilter_data(parser, args, params):\n local_args = parser.parse_known_args(args)\n \n control.prefilter_data(params)", "def main():\n processor.custom_config = parse_arguments()\n processor.process()\n logger.info(processor.statistics)\n logger.info(processor.custom_config)", "def main():\n parser = argparse.ArgumentParser(\n description=\"\"\"Print the unique terms\n across Tweet messages in the db. Leave\n arguments unset to show all data.\"\"\"\n )\n parser.add_argument(\n \"-s\",\n \"--search\",\n metavar=\"TEXT\",\n help=\"\"\"Filter the Tweet records to those which contain the input\n TEXT anywhere in their message text, ignoring case. Enclose the\n argument in single quotes to escape a hashtag or to include\n spaces.\"\"\",\n )\n parser.add_argument(\n \"-f\",\n \"--filter\",\n action=\"store_true\",\n help=\"\"\"If flag is supplied, filter the unique terms in the *output*\n list to only those which contain the input term (requires TEXT to\n be set). This will tend to provide much shorter lists, but is\n useful for identifying hashtags or handles which are similar\n because they share a common string. When using --filter, it is\n recommended to keep TEXT input short and general (excluding\n @ or # sign) in order to provide the broadest range of related\n results.\"\"\",\n )\n parser.add_argument(\n \"-l\",\n \"--limit\",\n type=int,\n default=0,\n help=\"\"\"Max count of tweets to select, selected from tweets order\n by most recent post time first. The terms will be derived from\n this sample of tweets. Omit argument or set to 0 to use all tweets\n in the db.\"\"\",\n )\n\n args = parser.parse_args()\n\n printHashtagsAndMentions(\n searchText=args.search, filterTerms=args.filter, tweetLimit=args.limit\n )", "def main():\n\n try:\n people = Parser.read_file(sys.argv[1])\n print(\"\\nResult:\")\n for email, person in people.items():\n print(\"{}: {}\".format(email, person))\n except RuntimeError as error:\n print(error)\n exit(1)", "def filter(self, *args, **kwargs):", "def parse_args(arglist):\n help = dedent(\"\"\"\n Run FIR model on subject data\n \"\"\")\n parser = tools.parser\n parser.description = help\n parser.formatter_class = argparse.RawDescriptionHelpFormatter\n parser.add_argument(\"-extract_info\", help=\"info for experiment to extract\")\n parser.add_argument(\"-mask_type\", help=\"mask or func?\")\n parser.add_argument(\"-mask_name\", help=\"name of mask in sub's mask directory\")\n return parser.parse_args(arglist)", "def pipe(*args, **kwargs):\n return parser(*args, **kwargs)", "def handle_filter(packets, arg, arguments):\r\n matched_packets = []\r\n if arg == \"host\":\r\n if len(arguments) == 0:\r\n print(\"A host IP address should be followed by the host command.\")\r\n sys.exit()\r\n else:\r\n # ip address here\r\n arg = arguments.popleft()\r\n for pkt in packets:\r\n dest_ip = pkt[1][10]\r\n src_ip = pkt[1][9]\r\n if arg == dest_ip or arg == src_ip:\r\n matched_packets.append(pkt)\r\n elif arg == \"ip\":\r\n for pkt in packets:\r\n if str(pkt[0][3]) == \"0800\":\r\n matched_packets.append(pkt)\r\n elif arg == \"port\":\r\n if len(arguments) == 0:\r\n print(\"\\\"port\\\" cannot be the last argument.\")\r\n sys.exit()\r\n else:\r\n # port number\r\n arg = arguments.popleft()\r\n\r\n for pkt in packets:\r\n if pkt[1][7] == 6 or pkt[1][7] == 17:\r\n if str(pkt[2][0]) == arg or str(pkt[2][1]) == arg:\r\n matched_packets.append(pkt)\r\n\r\n elif arg == \"tcp\":\r\n for pkt in packets:\r\n if pkt[1][7] == 6:\r\n matched_packets.append(pkt)\r\n elif arg == \"udp\":\r\n for pkt in packets:\r\n if pkt[1][7] == 17:\r\n matched_packets.append(pkt)\r\n elif arg == \"icmp\":\r\n for pkt in packets:\r\n if pkt[1][7] == 1:\r\n matched_packets.append(pkt)\r\n elif arg == \"net\":\r\n if len(arguments) == 0:\r\n print(\"\\\"net net\\\" is required. \")\r\n sys.exit()\r\n else:\r\n # ip prefix\r\n arg = arguments.popleft()\r\n if len(arg.split(\".\")) != 4:\r\n print(\"Please enter a valid ip address format. (x.x.x.x)\")\r\n sys.exit()\r\n prefix_length = 0\r\n length = len(arg)\r\n if arg == \"0.0.0.0\":\r\n prefix_length = 0\r\n elif arg[length - 6:length] == \".0.0.0\":\r\n prefix_length = length - 6\r\n elif arg[length - 4:length] == \".0.0\":\r\n prefix_length = length - 4\r\n elif arg[length - 2:length] == \".0\":\r\n prefix_length = length - 2\r\n else:\r\n prefix_length = length\r\n\r\n for pkt in packets:\r\n if pkt[1][9][0:prefix_length] == arg[0:prefix_length] or pkt[1][10][0:prefix_length] == \\\r\n arg[0:prefix_length]:\r\n matched_packets.append(pkt)\r\n\r\n elif arg == \"not\":\r\n if len(arguments) == 0:\r\n print(\"\\\"not\\\" cannot be the last argument.\")\r\n sys.exit()\r\n else:\r\n arg = arguments.popleft()\r\n if arg == \"host\":\r\n if len(arguments) == 0:\r\n print(\"A host IP address should be followed by the host command.\")\r\n sys.exit()\r\n else:\r\n # ip address here\r\n arg = arguments.popleft()\r\n for pkt in packets:\r\n dest_ip = pkt[1][10]\r\n src_ip = pkt[1][9]\r\n if arg != dest_ip and arg != src_ip:\r\n matched_packets.append(pkt)\r\n elif arg == \"ip\":\r\n for pkt in packets:\r\n if str(pkt[0][3]) != \"0800\":\r\n matched_packets.append(pkt)\r\n elif arg == \"port\":\r\n if len(arguments) == 0:\r\n print(\"\\\"port\\\" cannot be the last argument.\")\r\n sys.exit()\r\n else:\r\n # port number\r\n arg = arguments.popleft()\r\n for pkt in packets:\r\n if pkt[1][7] == 6 or pkt[1][7] == 17:\r\n if str(pkt[2][0]) != arg and str(pkt[2][1]) != arg:\r\n matched_packets.append(pkt)\r\n elif arg == \"tcp\":\r\n for pkt in packets:\r\n if pkt[1][7] != 6:\r\n matched_packets.append(pkt)\r\n elif arg == \"udp\":\r\n for pkt in packets:\r\n if pkt[1][7] != 17:\r\n matched_packets.append(pkt)\r\n elif arg == \"icmp\":\r\n for pkt in packets:\r\n if pkt[1][7] != 1:\r\n matched_packets.append(pkt)\r\n elif arg == \"net\":\r\n if len(arguments) == 0:\r\n print(\"\\\"net net\\\" is required. \")\r\n sys.exit()\r\n else:\r\n # ip prefix\r\n arg = arguments.popleft()\r\n if len(arg.split(\".\")) != 4:\r\n print(\"Please enter a valid ip address format. (x.x.x.x)\")\r\n sys.exit()\r\n prefix_length = 0\r\n\r\n length = len(arg)\r\n if arg == \"0.0.0.0\":\r\n prefix_length = 0\r\n\r\n elif arg[length - 6:length] == \".0.0.0\":\r\n\r\n prefix_length = length - 6\r\n elif arg[length - 4:length] == \".0.0\":\r\n prefix_length = length - 4\r\n elif arg[length - 2:length] == \".0\":\r\n prefix_length = length - 2\r\n else:\r\n prefix_length = length\r\n for pkt in packets:\r\n if pkt[1][9][0:prefix_length] != arg[0:prefix_length] and pkt[1][10][0:prefix_length] != \\\r\n arg[0:prefix_length]:\r\n matched_packets.append(pkt)\r\n\r\n return matched_packets, arg", "def main():\r\n# Checking if argument was provided\r\n if len(sys.argv) <=1:\r\n print_usage()\r\n sys.exit(1)\r\n \r\n for arg in sys.argv:\r\n # Checking if help was called\r\n if arg == \"-h\" or arg == \"--help\":\r\n print_usage()\r\n sys.exit(1)\r\n \r\n # Checking for verbose mode \r\n if arg == \"-v\" or arg == \"--verbose\":\r\n global verbose_flag\r\n verbose_flag=1\r\n\r\n # Checking for input file\r\n if arg == \"-f\" or arg == \"--file\":\r\n global default_input_path\r\n global default_output_path\r\n default_input_path = sys.argv[2]\r\n default_output_path=default_input_path[:-4] + \"_results.txt\"\r\n\r\n #if arg == \"-u\" or arg == \"--url\":\r\n # input_url = sys.argv[2]\r\n\t \r\n if os.name == \"nt\":\r\n os.system('cls')\r\n else:\r\n os.system('clear')\r\n \r\n process_from_file()", "def main(\n args: Sequence[str] = sys.argv[1:],\n) -> None:\n options = argument_parser().parse_args(args)\n\n valid_passports = 0\n\n for record in parse_passport_records(options.source):\n if options.verbose:\n print(record, file=sys.stderr)\n try:\n validate_passport(record, permissive=options.permissive)\n except ValueError as exc:\n if options.verbose:\n print(str(exc), file=sys.stderr)\n else:\n valid_passports += 1\n\n print(valid_passports)", "def action(arguments):\n if arguments.quality_window_mean_qual and not arguments.quality_window:\n raise ValueError(\"--quality-window-mean-qual specified without \"\n \"--quality-window\")\n\n if trie is None or triefind is None:\n raise ValueError('Missing Bio.trie and/or Bio.triefind modules. Cannot continue')\n\n # Always filter with a quality score\n qfilter = QualityScoreFilter(arguments.min_mean_quality)\n filters = [qfilter]\n\n output_type = fileformat.from_handle(arguments.output_file)\n with arguments.input_fastq as fp:\n if arguments.input_qual:\n sequences = QualityIO.PairedFastaQualIterator(fp,\n arguments.input_qual)\n else:\n sequences = SeqIO.parse(fp, 'fastq')\n\n listener = RecordEventListener()\n if arguments.details_out:\n rh = RecordReportHandler(arguments.details_out, arguments.argv,\n arguments.details_comment)\n rh.register_with(listener)\n\n # Track read sequences\n sequences = listener.iterable_hook('read', sequences)\n\n # Add filters\n if arguments.max_length:\n max_length_filter = MaxLengthFilter(arguments.max_length)\n filters.append(max_length_filter)\n if arguments.min_length:\n min_length_filter = MinLengthFilter(arguments.min_length)\n filters.append(min_length_filter)\n if arguments.max_ambiguous is not None:\n max_ambig_filter = MaxAmbiguousFilter(arguments.max_ambiguous)\n filters.append(max_ambig_filter)\n if arguments.ambiguous_action:\n ambiguous_filter = AmbiguousBaseFilter(\n arguments.ambiguous_action)\n filters.append(ambiguous_filter)\n if arguments.quality_window:\n min_qual = arguments.quality_window_mean_qual or \\\n arguments.min_mean_quality\n window_filter = WindowQualityScoreFilter(arguments.quality_window,\n min_qual)\n filters.insert(0, window_filter)\n\n if arguments.barcode_file:\n with arguments.barcode_file:\n tr = parse_barcode_file(arguments.barcode_file,\n arguments.primer, arguments.barcode_header)\n f = PrimerBarcodeFilter(tr)\n filters.append(f)\n\n if arguments.map_out:\n barcode_writer = csv.writer(arguments.map_out,\n quoting=getattr(csv, arguments.quoting),\n lineterminator='\\n')\n def barcode_handler(record, sample, barcode=None):\n barcode_writer.writerow((record.id, sample))\n listener.register_handler('found_barcode', barcode_handler)\n for f in filters:\n f.listener = listener\n sequences = f.filter_records(sequences)\n\n # Track sequences which passed all filters\n sequences = listener.iterable_hook('write', sequences)\n\n with arguments.output_file:\n SeqIO.write(sequences, arguments.output_file, output_type)\n\n rpt_rows = (f.report_dict() for f in filters)\n\n # Write report\n with arguments.report_out as fp:\n writer = csv.DictWriter(fp, BaseFilter.report_fields,\n lineterminator='\\n', delimiter='\\t')\n writer.writeheader()\n writer.writerows(rpt_rows)", "def main():\n # Initialize\n args = get_args()\n profiles = get_profiles(args)\n running_filter = [{'Name': 'instance-state-name', 'Values': ['running']}]\n\n # Sort through servers for a matching name\n matching_list = []\n for profile in profiles:\n filtered_instances = get_ec2_reservations(profile, running_filter)\n for reservation in filtered_instances:\n # Filter for instances with a 'Name' tag that matches filter_string\n instances = [\n instance for instance in reservation['Instances']\n if instance.get('Tags') and [\n tag for tag in instance['Tags']\n if tag['Key'] == 'Name' and args.filter_string in tag['Value']\n ]\n ]\n # Add matching instances to matching_list\n for instance in instances:\n matching_list.append({\n 'Name': [tag['Value'] for tag in instance['Tags'] if tag['Key'] == 'Name'][0],\n 'InstanceId': instance['InstanceId'],\n 'PublicDnsName': instance['PublicDnsName'] if instance.get('PublicDnsName')\n else 'No Public DNS',\n 'PrivateIpAddress': instance['PrivateIpAddress']\n if instance.get('PrivateIpAddress')\n else 'No Private IP'\n })\n\n # If flag for full run not added, exit one there instances are found\n if matching_list and not args.no_early_exit:\n stop_and_tabulate(matching_list)\n\n # Tabulate output once done\n stop_and_tabulate(matching_list)", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"input\", help=\"Fasta rDNA input\")\n parser.add_argument(\"output\", help=\"GFF annotation\")\n parser.add_argument(\"kingdom\", help=\"Choose kingdom\")\n args = parser.parse_args()\n command(args)", "def usage(print_code_name=True):\n\tprint(\"*********************************************************************\")\n\tprint(\"* Scanner and Flooder Tool *\")\n\tprint(\"*********************************************************************\")\n\tprint()\n\tprint(\"ex, scan usage: scanner.py -s <target_host> <start_port> <end_port>\")\n\tprint(\"-h, -help\t- print out the description of usage\")\n\tprint(\"-s\t - scan a target host and a range of ports\\n\"\n\t\t\t\" Requires three args, <host> and <port start> and <port end>\")\n\tprint(\"-l - list the sets of ports found open for all hosts scanned\")\n\tprint(\"-pf - flood a target host with an ICMP PING flood.\\n\" \n\t\t\t\" Requires three args, <host> and <port start> and <port end>\")\n\tprint(\"-syn - flood a target host with an SYN ACK flood.\\n\"\n\t\t \" Requires two arguments: <host>, <ports> in format of 'p1,p2,p3,...,pn'. Has optional third argument, <amount> \")\n\tprint(\"-udp - DDOS a target host with UPD Packets.\\n\"\n\t\t \" Requires 3 arguments: <host>, <port>, <amount> (default =1)\")\n\tprint(\"-a - save hosts and open ports to a .txt file\")\n\tprint(\"-r - read in hosts and open ports from a .txt file\")\n\tprint()\n\tprint()\n\tprint(\"Examples: \")\n\tprint(\"-l\")\n\tprint(\"-s 192.168.0.1 0 500 # host, port range (space delimited)\")\n\tprint(\"-pf 192.168.0.1 100 # host, num of pings (optional, defaults to 1)\")\n\tprint(\"-syn 192.168.0.1 80,8080 100 # host, ports (comma delimited), and amount (optional)\")\n\tprint(\"-udp 192.168.0.1 80 100 # host, port, amount (optional, defaults to 1)\")", "def parse_arguments(args):", "def main(args=None):", "def main(args=None):", "def get_filters():\n\n print('Hello! Let\\'s explore some US bikeshare data!')\n\n # ADD : available analysis parameters\n cities_list=['chicago','new york city','washington']\n months_list=['all','january','february','march','april','may','june']\n days_list=['all','monday','tuesday','wednesday','thursday','friday','saturday','sunday']\n\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n city=''\n while city not in cities_list:\n city=str(input(\"Enter the name of the city to analyze: \")).lower()\n if city not in cities_list:\n print(\"!Warning : cities available for analysis : {}\".format(cities_list))\n\n # TO DO: get user input for month (all, january, february, ... , june)\n month=''\n while month not in months_list:\n month=str(input(\"Enter the month to analyze (enter 'all' if you want all the months): \")).lower()\n if month not in months_list:\n print(\"!Warning : months available for analysis : {}\".format(months_list))\n\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n day=''\n while day not in days_list:\n day=str(input(\"Enter the day to analyze (enter 'all' if you want all the days): \")).lower()\n if day not in days_list:\n print(\"!Warning : days available for analysis : {}\".format(days_list))\n\n print('-'*40)\n return city, month, day", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n #Invalid input is administered to by using a while loop.\n while True:\n city=input(\"Choose a city name between Chicago, New York City or Washington:!\").lower()\n if city not in CITY_DATA:\n print(\"\\n Not a valid city\\n\")\n continue\n else:\n break\n\n # TO DO: get user input for month (all, january, february, ... , june)\n while True:\n try\n month=str(input('Enter name of one month(from January to June) to filter by or \"all\" ,for no filter :')).lower()\n months=['january', 'february', 'march', 'april', 'may', 'june']\n if month == 'january':\n month = months[0]\n elif month == 'february':\n month = months[1]\n elif month == 'march':\n month = months[2]\n elif month == 'april':\n month = months[3]\n elif month == 'may':\n month = months[4]\n elif month == 'june':\n month = months[5]\n elif month == 'all':\n print('all')\n else:\n raise(Exception)\n\t\t\texcept Exception as error:\n print('Invalid Input!,please restart again!.')", "def test(ctx, filter=\"*\", verbose=False):\n test_python(ctx, filter, verbose)", "def main():\n\t\n\tfilename = optParse()\n\t\n\ttry:\n\t\tinput = loadFile(filename)\n\texcept IOError, (errno, msg):\n\t\tprint >>sys.stderr, msg\n\t\tsys.exit(-1)\n\t\n\toutput = parse(input)\n\tprint output", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-data_dir\", required=True, help=\"Directory containing original data set in requisite folder structure (small part or all data)\")\n parser.add_argument(\"-features_filename\", required=True, help=\"Features cloudpickle file that provides that pruning information\")\n parser.add_argument(\"-start_seed\", type=int, default=1284171779)\n parser.add_argument(\"-num_datasets\", type=int, default=20)\n parser.add_argument(\"-modes\", choices=[PREPROCESS, TRAIN, EVALUATE], nargs=\"+\", required=True)\n args = parser.parse_args()\n return pipeline(args)", "async def parse_input_args_filters(ctx, commands, args) -> (discord.Member, bool, str, list, list, list):\n user = None\n has_all = False\n group_by_key = 'set_code'\n affiliation_names = []\n rarity_codes = []\n card_codes = []\n\n # Parse all the arguments\n for arg in args:\n # Check if the argument is a user\n try:\n converter = commands.MemberConverter()\n user = await converter.convert(ctx=ctx, argument=arg)\n # Check if the argument is an affiliation\n except commands.errors.MemberNotFound:\n argLowerCase = arg.lower()\n if argLowerCase == 'all':\n has_all = True\n elif argLowerCase in ['a', 'affiliation', 'affiliations']:\n group_by_key = 'affiliation_name'\n elif argLowerCase in ['f', 'faction', 'factions']:\n group_by_key = 'faction_name'\n elif argLowerCase in ['rar', 'rarity']:\n group_by_key = 'rarity_code'\n elif argLowerCase in ['nogroup', 'nogroups']:\n group_by_key = ''\n elif argLowerCase in ['v', 'villain', 'villains']:\n affiliation_names.append('Villain')\n elif argLowerCase in ['h', 'hero', 'heroes']:\n affiliation_names.append('Hero')\n elif argLowerCase in ['n', 'neutral', 'neutrals']:\n affiliation_names.append('Neutral')\n elif argLowerCase in ['s', 'starter', 'starters']:\n rarity_codes.append('S')\n elif argLowerCase in ['c', 'common']:\n rarity_codes.append('C')\n elif argLowerCase in ['u', 'uncommon']:\n rarity_codes.append('U')\n elif argLowerCase in ['r', 'rare']:\n rarity_codes.append('R')\n elif argLowerCase in ['l', 'legendary']:\n rarity_codes.append('L')\n elif is_valid_card_number_format(arg):\n card_codes.append(arg)\n else:\n raise ValueError('Invalid argument: {}'.format(arg))\n\n if card_codes and (has_all or affiliation_names or rarity_codes):\n raise ValueError('Invalid arguments. You can\\'t mix card numbers and batch.')\n elif has_all and (affiliation_names or rarity_codes):\n raise ValueError('Invalid arguments. Use either \\\"all\\\" or affiliation/rarity name but not both.')\n\n return user, has_all, group_by_key, affiliation_names, rarity_codes, card_codes", "def AddFilterOptions(self, argument_group):\n names = [u'date_filters', u'filter_file']\n helpers_manager.ArgumentHelperManager.AddCommandLineArguments(\n argument_group, names=names)\n\n argument_group.add_argument(\n u'-x', u'--extensions', dest=u'extensions_string', action=u'store',\n type=str, metavar=u'EXTENSIONS', help=(\n u'Filter on file name extensions. This option accepts multiple '\n u'multiple comma separated values e.g. \"csv,docx,pst\".'))\n\n argument_group.add_argument(\n u'--names', dest=u'names_string', action=u'store',\n type=str, metavar=u'NAMES', help=(\n u'Filter on file names. This option accepts a comma separated '\n u'string denoting all file names, e.g. -x '\n u'\"NTUSER.DAT,UsrClass.dat\".'))\n\n argument_group.add_argument(\n u'--signatures', dest=u'signature_identifiers', action=u'store',\n type=str, metavar=u'IDENTIFIERS', help=(\n u'Filter on file format signature identifiers. This option '\n u'accepts multiple comma separated values e.g. \"esedb,lnk\". '\n u'Use \"list\" to show an overview of the supported file format '\n u'signatures.'))", "def main():\n parse_file(sys.argv[1])", "def main():\n options = get_options()\n data_path = str(options.data_dir)\n bloom = BloomCategory(malicious_path=data_path + '/malicious-ips.bloom',\n predicted_path=data_path + '/predicted-ips.bloom',\n has_intel_path=data_path + '/ip-threat-intel.bloom')\n with FileInput(sys.stdin) as file_handle:\n for line in file_handle:\n ip_address = line.strip()\n if bloom.check_ip(ip_address,\n check_malicious=options.malicious,\n check_predicted=options.predicted,\n check_suspicious=options.suspicious):\n print(ip_address)", "def main():\n parser = ArgumentParser(\n description='Category Filter: Filter a List of Categories from a JSON')\n parser.add_argument('json_file_path', help='JSON file path')\n parser.add_argument('out_file', help='Output filename')\n args = parser.parse_args()\n\n ann_file = open(args.json_file_path)\n category_names = [\"sports ball\", \"cell phone\", \"couch\", \"elephant\", \"tie\", \"spoon\", \"skis\", \"apple\", \"giraffe\", \"laptop\", \"tennis racket\", \"sink\", \"dog\", \"fork\", \"cat\", \"teddy bear\", \"train\", \"skateboard\", \"toilet\", \"sandwich\", \"bed\", \"keyboard\", \"baseball glove\", \"baseball bat\", \"airplane\", \"oven\", \"hot dog\", \"refrigerator\", \"frisbee\", \"mouse\", \"fire hydrant\", \"stop sign\", \"bear\", \"snowboard\", \"parking meter\", \"toothbrush\", \"microwave\", \"scissors\", \"hair drier\", \"toaster\"]\n\n json_coco = json.load(ann_file)\n new_json = deepcopy(json_coco)\n\n for ann in json_coco['annotations']:\n if return_cat_name(json_coco, ann['category_id']) in category_names:\n new_json['annotations'].remove(ann)\n\n for cat in json_coco['categories']:\n if cat['name'] in category_names:\n new_json['categories'].remove(cat)\n\n output = open(args.out_file, \"w\")\n json.dump(new_json, output)\n output.close()", "def _makeParser_search() :\n parser = argparse.ArgumentParser(\n description = SCRIPT_DESCRIPTION_SEARCH)\n parser.add_argument(\"-c\", \"--count\", action = \"store_true\",\n help = \"Just return the number of records, no fetch\")\n # Required named arguments (http://stackoverflow.com/questions/24180527/argparse-required-arguments-listed-under-optional-arguments)\n required = parser.add_argument_group(\"required named arguments\")\n # --email\n required.add_argument(\"-e\", \"--email\", type = str,\n help = \"User's email (required by Entrez)\")\n # --listId\n required.add_argument(\"-l\", \"--listId\", type = str,\n help = \"File containing one GenBank identifier per \"\n \"line. Use - for reading from stdin. \"\n \"Exactly one of --listId or \"\n \"--query must be specified, but not both.\")\n # --query\n required.add_argument(\"-q\", \"--query\", type = str,\n help = \"Query string for GenBank search. \"\n \"Exactly one of --listId or \"\n \"--query must be specified, but not both.\",\n metavar = \"SEARCH_TERM\")\n # Download options\n download = parser.add_argument_group(\"download-related options\")\n # --retmax\n download.add_argument(\"-r\", \"--retmax\", type = int, default = 0,\n help = \"Maximum number of entries to retrieve from \"\n \"GenBank, comprised between 1 and 10000. Use 0 for \"\n \"unlimited number of returned entries. (default: 0)\")\n # --download\n download.add_argument(\"-d\", \"--download\", action = \"store_true\",\n help = \"Download the full GenBank records\")\n # --forceDownload\n download.add_argument(\"-f\", \"--forceDownload\", action = \"store_true\",\n help = \"Download record even if file already exists \"\n \"(implies --download)\")\n # --fullWGS\n download.add_argument(\"--fullWGS\", action = \"store_true\",\n help = \"Also download full WGS sequence data when \"\n \"WGS trace reference is present in a GenBank record \"\n \"(only works if the original GenBank record is to be \"\n \"downloaded too or if --forceDownload is used)\")\n # --outputDir\n download.add_argument(\"-o\", \"--outputDir\", type = str, default = \".\",\n help = \"Destination folder for downloaded records \"\n \"(default: current directory)\")\n # --batchSize\n download.add_argument(\"-b\", \"--batchSize\", type = int, default = 5,\n help = \"Batch size for full record retrieval \"\n \"(default: 5)\")\n # --delay\n download.add_argument(\"--delay\", type = int, default = 15,\n help = \"Delay in seconds between successive batch \"\n \"retrieval of the full records (default: 15)\")\n return parser", "def cli(argv):\r\n args = get_args(argv)\r\n verbosity = \"summary\"\r\n if args.verbose:\r\n verbosity = \"report\"\r\n report = evaluate(args.design, verbosity)\r\n print json.dumps(report, indent=4)", "def parseArgs( argv ):\r\n\r\n parser = OptionParser()\r\n parser.add_option(\"-o\", \"--order\", type=\"choice\", action=\"store\", choices=[\"p\",\"h\",\"g\",\"c\"], default=\"p\", dest=\"orderType\",\r\n help=\"specify a display sorted by (p)lugin, (c)ve id, (h)ost, or just (g)enerate a hostfile\")\r\n parser.add_option(\"-f\", \"--odf\", type=\"string\", action=\"store\", dest=\"odfOutputFilename\",\r\n help=\"output to this file in ODF format\", default=\"\")\r\n parser.add_option(\"-p\", \"--portlist\", type=\"string\", action=\"store\", dest=\"portList\",\r\n help=\"specify specific ports to show\")\r\n parser.add_option(\"-r\", \"--riskfactors\", type=\"string\", action=\"store\", dest=\"riskFactorsList\", default=\"critical,high,moderate,medium,low,none\",\r\n help=\"specify list of allowable risk factors (default is any of critical,high,moderate,medium,low,none\")\r\n parser.add_option(\"-t\", \"--hostlist\", type=\"string\", action=\"store\", dest=\"hostList\",\r\n help=\"specify specific hosts to show\")\r\n parser.add_option(\"-s\", \"--severities\", type=\"string\", action=\"store\", dest=\"severityList\", default=\"critical_hole,hole,warn,note,info,openport\",\r\n help=\"specify specific list of severity codes to show (default is any of critical_hole,hole,warn,note,info,openport\")\r\n parser.add_option(\"-q\", \"--query\", type=\"string\", action=\"store\", dest=\"contentQuery\",\r\n help=\"show all results whose synopses match this regular expression\")\r\n parser.add_option(\"-i\", \"--idlist\", type=\"string\", action=\"store\", dest=\"pluginIDList\",\r\n help=\"display only results that match these Nessus plugin IDs\")\r\n parser.add_option(\"-c\", \"--csv\", type=\"string\", action=\"store\", dest=\"csvOutputFilename\", default=\"\",\r\n help='output CSV-friendly text delimitted by default or overriden delimiter to a given filename (use \"0\" for standard output)')\r\n# parser.add_option(\"-c\", \"--csv\", action=\"store_true\", dest=\"CSV\", default=False,\r\n# help=\"output CSV-friendly text delimitted by |++| (overridable with the 'd' option)\")\r\n parser.add_option(\"-d\", \"--delimiter\", type=\"string\", action=\"store\", dest=\"delimiter\", default=\"|++|\", \r\n help=\"override CSV delimiter default of |++|\")\r\n\r\n (options, args) = parser.parse_args() \r\n \r\n if options.orderType: options.orderType = options.orderType.lower()\r\n\r\n return (options,args)", "def main():\n if len(sys.argv) < 2:\n print_usage()\n args = sys.argv[1:]\n\n #Declare and initialize the variables controlled by switch\n check = False\n view = False\n debug = False\n kills = []\n\n #Eat any switches from the front\n while args and args[0].startswith('-'):\n arg = args.pop(0).lower()\n print(\"eating \" + arg)\n mko = re.search(r\"-k=([1-9]+)$\", arg)\n if mko is not None:\n kills.append(int(mko.groups()[0]))\n elif arg == '-c':\n check = True\n elif arg == '-v':\n view = True\n elif arg == '-d':\n debug = True\n elif arg == '--help':\n print_usage()\n else:\n die(\"ERROR: Switch '{}' not recognized\".format(arg))\n\n # Do we have enough parameters left?\n if len(args) not in range(4, 6):\n print(args)\n die(\"ERROR: Wrong number of parameters supplied\")\n dest = os.path.join(SCAN_PATH, args[4]) if len(args) == 5 else None\n\n scan_core.perform_scan(dest, args[0], args[1], args[2], args[3],\n view=view, check=check, kills=kills, debug=debug)", "def get_filters():\n\n city = prompts.city_prompt.launch()\n\n _filter = prompts.filter_prompt.launch()\n\n if _filter == \"Month\":\n month = prompts.month_prompt.launch()\n day = \"All\"\n\n elif _filter == \"Day\":\n day = prompts.day_prompt.launch()\n month = \"All\"\n\n elif _filter == \"Both\":\n month = prompts.month_prompt.launch()\n day = prompts.day_prompt.launch()\n\n else:\n month, day = \"All\", \"All\"\n\n print(\"-\" * 40)\n return city, month, day", "def main():\n licensify(_parse_args())", "def InitializeFiltersFromArgs(args):\n test_filters = []\n if args.isolated_script_test_filters:\n args.test_filters = [\n isolated_script_test_filter.replace('::', ':')\n for isolated_script_test_filter in args.isolated_script_test_filters\n ]\n if args.test_filters:\n for filt in args.test_filters:\n test_filters.append(\n _CMDLINE_NAME_SEGMENT_RE.sub('', filt.replace('#', '.')))\n\n if not args.test_filter_files:\n return test_filters\n\n # At this point it's potentially several files, in a list and ; separated\n for test_filter_files in args.test_filter_files:\n # At this point it's potentially several files, ; separated\n for test_filter_file in test_filter_files.split(';'):\n # At this point it's individual files\n with open(test_filter_file, 'r') as f:\n positive_patterns, negative_patterns = ParseFilterFile(f)\n filter_string = AppendPatternsToFilter('', positive_patterns,\n negative_patterns)\n test_filters.append(filter_string)\n\n return test_filters", "def processArgs(self, argv):\n parser = OptionParser(usage=usage)\n parser.add_option(\"-a\", \"--show_ADT\", action=\"store_true\", dest=\"show_ADT\",\n default=self.show_ADT, help=\"Display ADT value if set\")\n parser.add_option(\"-f\", \"--show_file\", action=\"store_true\", dest=\"show_file\",\n default=self.show_file, help=\"Display matching filename if set\")\n parser.add_option(\"-t\", \"--show_time\", action=\"store_true\", dest=\"show_time\",\n default=self.show_time, help=\"Display message time\")\n parser.add_option(\"-v\", \"--show_visitID\", action=\"store_true\", dest=\"show_visitID\",\n default=self.show_visitID, help=\"Display visit ID\")\n parser.add_option(\"-p\", \"--show_pc\",\n action=\"store_true\",\n dest=\"show_pc\",\n default=self.show_pc,\n help=\"Display patient class\")\n\n (options, pargs) = parser.parse_args()\n if len(pargs) < 3:\n parser.error(\"incorrect number of arguments\")\n\n self.show_ADT = parser.values.show_ADT\n self.show_file = parser.values.show_file\n self.show_time = parser.values.show_time\n self.show_visitID = parser.values.show_visitID\n self.show_pc = parser.values.show_pc\n \n self.segments_of_interest = pargs.pop(0)\n if len(self.segments_of_interest) != 3:\n parser.error(\"segment '%s' looks incorrect, expected something like 'PV1'\"\n % self.segments_of_interest)\n\n try:\n nums = pargs.pop(0).split(\",\")\n for num in nums:\n if 'MSH' == self.segments_of_interest:\n num = int(num) - 1\n self.sequences.append(int(num))\n except:\n parser.error(\"sequence must be an integer, separate multiple w/ comma and no spaces\")\n\n for patternOrFile in pargs:\n for file in glob.glob(patternOrFile):\n if not os.path.isfile(file):\n parser.error(\"can't open input file %s\" % file)\n self.filelist.append(file)\n \n # Require at least one file\n if not len(self.filelist):\n parser.error(\"at least one input file is required\")", "def run(self, *args, **kwargs):\n\n def filters_show():\n \"\"\"\n Show the filters used\n \"\"\"\n log = slog()\n log.title_set('Filters applied')\n if self.args['table3D']: log.render3D()\n log('Input directory: %s\\n' % self.str_inputDir)\n log('Output directory: %s\\n' % self.str_outputDir)\n for filter in ['file', 'dir']:\n log('%sFilter: ' % filter)\n sl_ffilter = ['%s %s' % (x, self.args['%sFilterLogic' % filter]) \\\n for x in self.args['%sFilter' % filter].split(',')]\n str_ffilter = ' '.join(sl_ffilter)\n sl_ffilter = str_ffilter.split()\n str_ffilter = ' '.join(sl_ffilter[:-1])\n log('%s\\n ' % str_ffilter)\n return log\n\n def stats_process():\n \"\"\"\n Call the dir/files stats processing\n \"\"\"\n nonlocal d_stats, b_status\n log = slog()\n d_stats = self.stats_compute()\n if self.toConsole() or self.args['duf'] or self.args['du']:\n self.dp.qprint(d_stats['report'], level = self.debugLevel)\n slog_filter = filters_show()\n log.title_set('Size statistics')\n if self.args['table3D']: log.render3D()\n log('Total size (raw): %d\\n' % d_stats['totalSize'] )\n log('Total size (friendly): {:,}\\n'.format(d_stats['totalSize']) )\n log('Total size (human): %s\\n' % d_stats['totalSize_human'] )\n log('Total files: %s\\n' % d_stats['files'] )\n log('Total dirs: %s\\n' % d_stats['dirs'] )\n log('Total runtime: %5.3f s' % other.toc() )\n b_status = b_status and d_stats['status']\n return {\n 'status': b_status,\n 'filterLog': slog_filter,\n 'bodyLog': log\n }\n\n def tree_resolveRoot():\n \"\"\"\n Set the 'rootDir' for the tree structure. This is either a\n '.' indicating a relative tree, or the inputDir\n \"\"\"\n nonlocal str_rootDir\n if self.b_relativeDir:\n os.chdir(self.str_inputDir)\n str_rootDir = '.'\n else:\n str_rootDir = self.str_inputDir\n return str_rootDir\n\n def timer_startIfNeeded():\n \"\"\"\n Determine if the timer should start\n \"\"\"\n nonlocal b_timerStart\n for k, v in kwargs.items():\n if k == 'timerStart': b_timerStart = bool(v)\n if b_timerStart:\n other.tic()\n\n def postProcess_check() -> dict:\n \"\"\"\n Once a tree has been constructed, run some\n in-line post processing filtering and other\n operations as desired.\n \"\"\"\n nonlocal d_test, b_status, d_filter, d_stats\n\n if len(self.args['fileFilter']) or len(self.args['dirFilter']):\n d_filter = self.filterFileHitList()\n b_status = d_filter['status']\n if self.b_test:\n d_test = self.test_run(*args, **kwargs)\n b_status = b_status and d_test['status']\n if self.b_stats or self.b_statsReverse or \\\n self.b_jsonStats or self.args['du'] or self.args['duf']:\n d_stats = stats_process()\n b_status = b_status and d_stats['status']\n self.verbosityLevel = 1\n if self.toConsole():\n if not self.args['du'] and not self.args['duf']:\n print(d_stats['filterLog'].border_draw())\n print(d_stats['bodyLog'].border_draw())\n elif self.args['du'] or self.args['duf']:\n print(d_stats['bodyLog'])\n else:\n d_stats['filterLog'] = d_stats['filterLog'].json_dump()\n d_stats['bodyLog'] = d_stats['bodyLog'].json_dump()\n\n return {\n 'status': b_status,\n 'filter': d_filter,\n 'test': d_test,\n 'stats': d_stats\n }\n\n b_status = True\n d_probe = {}\n d_tree = {}\n d_stats = {}\n d_post = {}\n str_error = ''\n b_timerStart = False\n d_test = {}\n d_env = {}\n d_filter = {}\n str_rootDir = ''\n\n timer_startIfNeeded()\n b_status, str_error = self.unpack(self.env_check(), 'status', 'error')\n\n if b_status:\n str_origDir = os.getcwd()\n d_tree = self.tree_construct(\n d_probe = self.tree_probe(root = tree_resolveRoot()),\n constructCallback = self.dirsize_get\n )\n b_status = d_tree['status']\n d_post = postProcess_check()\n if self.b_jsonStats:\n print(json.dumps(d_post['stats'], indent = 4, sort_keys = True))\n\n if self.b_relativeDir:\n os.chdir(str_origDir)\n\n d_ret = {\n 'status': b_status,\n 'd_tree': d_tree,\n 'd_stats': d_stats,\n 'd_test': d_test,\n 'd_post': d_post,\n 'str_error': str_error,\n 'runTime': other.toc()\n }\n\n if self.b_json:\n print(json.dumps(d_ret, indent = 4, sort_keys = True))\n\n return d_ret", "def main():\n analyze_perturbations()", "def parse(self, args):\n pass", "def run_app():\n global interface\n global filter_results\n\n description = 'Simple Wifi scanner for 2.4 GHz range'\n epilog = 'The author of this code take no responsibility for your use or misuse'\n parser = argparse.ArgumentParser(prog='ScanWifi.py', description=description, epilog=epilog)\n parser.add_argument(\"interface\", help='Your interface in monitor mode')\n parser.add_argument('-c', '--channel', help='Channel number for 2.4 GHz range (min 1/max 14)', default=1, type=int)\n parser.add_argument('--all', help='Scan on all channels for 2.4 GHz range', default=False, action='store_true')\n parser.add_argument('--filter', help='Filter results only for STA (Probe Req)', default=False, action='store_true')\n args = parser.parse_args()\n\n if len(args.interface) < 1:\n print('You did not provide any interface?')\n exit(1)\n else:\n interface = args.interface\n\n if not args.all and (args.channel < 1 or args.channel > 14):\n print('You will scan on channel {}?'.format(args.channel))\n exit(1)\n\n if not args.all and args.channel in range(1, 14):\n set_specific_channel(args.channel)\n\n if args.all:\n channel_changer = Thread(target=change_channel)\n channel_changer.daemon = True\n channel_changer.start()\n\n if args.filter:\n print(\"-\" * 85)\n sniff(prn=evaluate_sniffing_packet_sta, iface=interface)\n else:\n print(\"-\" * 85)\n print(\"{:<24} {:<35} {:<5} {:<7} {}\".format(\"BSSID\", \"SSID\", \"dbm\", \"CH\", \"ENC\"))\n print(\"-\" * 85)\n sniff(prn=evaluate_sniffing_packet_ap, iface=interface)", "def test_filtered_scan(self):\n self.run_scan(self.tempdir, self.root_fcount + self.nest_fcount, ext=\".txt\")", "def main(argv):\n parsed = parse_args(argv)\n instream = sys.stdin\n name = parsed.name\n if parsed.input_file != \"-\":\n instream = open(parsed.input_file, 'r')\n name = parsed.input_file.split('.')[1]\n print pfm_as_meme_str(parse_scer_pfm(instream, handle_passed=True), name)", "def preprocess_main():", "def main(args):\n cli = CLI()\n # Check arguments\n cli.parse_arguments(args)", "def test_filter_regex(re_arg, re_src, re_dest):\n args = parser.parse_args(['-re', *re_arg])\n filters = renamer.initfilters(args)\n dest = renamer.get_renames(re_src, filters, args.extension, args.raw)\n assert dest == re_dest", "def main():\n outfile = 'result.txt'\n\n if os.path.exists(outfile):\n os.remove(outfile)\n\n for arg in sys.argv[1:]:\n get_info(arg, outfile)", "def main():\n\n (options, args) = parse_options(sys.argv)\n\n iterator = GFFParser.GFFAddingIterator() \n examiner = GFFParser.GFFExaminer()\n\n exon_map = dict()\n\n id_dict = examiner.available_limits(options.anno)['gff_id']\n intron_lists = dict()\n\n ### collect all available sources from gff-file\n source_dict = examiner.available_limits(options.anno)['gff_source_type']\n taken_sources = set()\n #types = ['gene', 'mRNA', 'exon', 'CDS']\n types = ['exon']\n\n ### parse only for exons and let the GFFparser \n ### infer the respective parents (otherwise doubled entries occured)\n ### we sanitize the structure later on anyways\n for key in [source[0] for source in source_dict.keys() if source[1] in types]:\n taken_sources.add(key)\n\n ### try different type, if sources are empty \n if len(taken_sources) == 0:\n types = ['CDS']\n for key in [source[0] for source in source_dict.keys() if source[1] in types]:\n taken_sources.add(key)\n\n ### print taken_sources\n if len(taken_sources) == 0:\n print >> sys.stderr, 'No suitable sources found!'\n sys.exit(-1)\n\n ### only show available sources - if neccessary\n if options.show_sources:\n print 'Parsed file %s\\n' % options.anno\n print 'Following sources are available:\\n'\n for source in taken_sources:\n print source \n print '\\nUse option -s to specify a comma-separated list of sources (-s source1,source2,source3), otherwise all sources are taken'\n sys.exit(0)\n\n if options.sources != '':\n user_sources = set(options.sources.split(','))\n taken_sources = taken_sources.intersection(user_sources)\n if len(taken_sources) == 0:\n print >> sys.stderr, 'The specified sources do not match any of the available sources - Please use option -S to get a list of available sources'\n sys.exit(-1)\n\n if options.verbose:\n print \"take sources %s\" % str(list(taken_sources))\n\n ### build up gff-parsing filter\n gff_sources = []\n for source in taken_sources:\n gff_sources.extend(zip([source] * len(types), types))\n\n ### parse gff-file\n for idx in id_dict.keys():\n print 'parsing chromosome %s' % idx\n if len(gff_sources) > 0:\n trans_dict = iterator.get_all_features(options.anno, {'gff_source_type':gff_sources, 'gff_id':idx})\n else:\n trans_dict = iterator.get_all_features(options.anno, {'gff_id':idx})\n ### since we parse only one chromosome, this loop is evaluated only once\n for chrm in trans_dict.keys():\n ### verify/sanitize the created dictionairy\n fix_structure(trans_dict[chrm])\n intron_lists[chrm] = dict()\n for gene in trans_dict[chrm].features:\n for trans in gene.sub_features:\n if trans.type == 'exon':\n print \"WARNING: Exon on transcript level:\"\n print trans\n print 'will continue\\n'\n continue\n elif len(trans.sub_features) > 1: ### at least two exons for one intron ...\n strand = trans.sub_features[0].strand\n contig_list = [(trans.sub_features[i].location.nofuzzy_start, trans.sub_features[i].location.nofuzzy_end) for i in range(len(trans.sub_features))]\n contig_list.sort(lambda u, v:u[0]-v[0])\n for exon in range(len(contig_list) - 1):\n ### update intron lists\n if contig_list[exon][1] - contig_list[exon + 1][0] == 0:\n continue\n try:\n assert(contig_list[exon][1] < contig_list[exon + 1][0])\n except AssertionError:\n print >> sys.stderr, 'exon_1 %i, exon_2 %i' % (contig_list[exon][1], contig_list[exon + 1][0]) \n print >> sys.stderr, contig_list[exon]\n print >> sys.stderr, contig_list[exon+1]\n print >> sys.stderr, exon\n sys.exit(-1)\n ### for now strand information is only dummy\n intron_lists[chrm][(0, contig_list[exon][1], contig_list[exon + 1][0])] = strand\n \n ### update exon map\n for exon in range(len(contig_list)):\n if not exon_map.has_key(chrm):\n exon_map[chrm] = dict()\n\n if not exon_map[chrm].has_key(trans.id):\n exon_map[chrm][trans.id] = dict()\n ### we assume, that an exon cannot occurr twice in the same transcript!\n ### the value in the dict is a binary encoding, if the left/right end is intronic 10 = 2 means, 5' end is intronic\n if len(contig_list) == 1:\n exon_map[chrm][trans.id][contig_list[exon]] = 0 ### 00 -> should never occurr\n elif exon == 0:\n exon_map[chrm][trans.id][contig_list[exon]] = 2 ### 10\n elif exon == len(contig_list) - 1:\n exon_map[chrm][trans.id][contig_list[exon]] = 1 ### 01\n else:\n exon_map[chrm][trans.id][contig_list[exon]] = 3 ### 11 \n\n outfile = open(options.outfile, 'w')\n cPickle.dump(intron_lists, outfile)\n outfile.close()\n \n outfile = open(options.outfile + '.' + 'cov', 'w')\n cPickle.dump(exon_map, outfile)\n outfile.close()", "def get_filters():\r\n print('Hello! Let\\'s explore some US bikeshare data!\\n')\r\n\r\n # ref https://stackabuse.com/getting-user-input-in-python/\r\n\r\n # Get user input for city (chicago, new york city, washington).\r\n cities = ['Chicago', 'New York city', 'Washington']\r\n city = get_user_input(cities,\"city\")\r\n\r\n # Get user input for month (all, january, february, ... , june)\r\n months = ['All', 'Jan', 'Feb', 'Mar', 'Apr', 'Jun']\r\n month = get_user_input(months,\"month\")\r\n\r\n # Get user input for day of week (all, monday, tuesday, ... sunday)\r\n days = ['All', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']\r\n day = get_user_input(days,\"day\")\r\n\r\n print('-'*40)\r\n return city, month, day", "def process_arguments():\n # Create ArgumentParser object. Description message will be displayed as part of help message if script is run with -h flag\n parser = argparse.ArgumentParser(description='Prints tier 1 and 2 variant details to stdout for a given 100k case')\n # Define the arguments that will be taken.\n parser.add_argument('-i', '--ir_id', required=True, help='GeL Interpretation Request ID in format 12345-1')\n parser.add_argument('-p', '--proband_id', required=True, help='GeL participant ID for proband')\n # Return the arguments\n return parser.parse_args()", "def main():\n opt = parse_opts()\n run(opt)", "def main():\n opt = parse_opts()\n run(opt)", "def main(args):\n\n tools = []\n for tool in args.tools: # Parse tools, their subs and args\n logging.info(\"Parsing tool: %r\", tool)\n\n tsig = parse_tool_sig(tool)\n if not tsig or not tsig.get(\"snames\", None):\n logging.error(\"failed parsing snames from tool: '%s'\", tool)\n continue\n\n tools.append(tsig)\n\n return args.gen(args, tools)", "def _filter(*args, **kwargs):\n if kwargs.pop('full_output', True):\n return filter(*args, full_output=True, **kwargs)\n return IteratorContextManager(*args, parser_func=filter, **kwargs)", "def ls(filter=None):" ]
[ "0.6211614", "0.6186331", "0.6099759", "0.60936326", "0.60703206", "0.5910186", "0.5906957", "0.5856465", "0.5850204", "0.58114266", "0.577527", "0.5722053", "0.57207435", "0.56683946", "0.56594044", "0.5633427", "0.55837566", "0.5580852", "0.55734587", "0.5561661", "0.5559251", "0.55260324", "0.5514741", "0.55137146", "0.55015826", "0.54741895", "0.5468557", "0.5452889", "0.54381937", "0.5434017", "0.54307747", "0.5420126", "0.5419542", "0.5417271", "0.5389711", "0.5388656", "0.5361672", "0.5346575", "0.5341392", "0.5340365", "0.533319", "0.53297305", "0.53297305", "0.53282756", "0.53180635", "0.53164583", "0.5297567", "0.5289482", "0.5283966", "0.52802604", "0.5278876", "0.52581865", "0.5244084", "0.52380437", "0.52347225", "0.5230069", "0.52274835", "0.52246463", "0.52219576", "0.52168435", "0.52095586", "0.51996833", "0.51954806", "0.5191294", "0.5191294", "0.51888746", "0.5178221", "0.51664966", "0.51656693", "0.51645625", "0.5163621", "0.51634854", "0.51602966", "0.515903", "0.5151599", "0.5146925", "0.5141434", "0.5141158", "0.51391435", "0.5123934", "0.51110184", "0.5110646", "0.5110404", "0.5100879", "0.50911003", "0.50885016", "0.5075604", "0.50717986", "0.5070808", "0.5069421", "0.50693524", "0.5069152", "0.5064336", "0.50558126", "0.5055651", "0.5048993", "0.5043289", "0.5043289", "0.5035224", "0.50321215", "0.50302786" ]
0.0
-1
Converts the Csv file into required data format for Time Series prediction
def preprocess(dataframe_csvpath, cols_x, cols_y, window_in, window_out, data_div_frac, popu_size): #Loading .CSV file and creating dataframe df = pd.read_csv(dataframe_csvpath) len_ser = len(df[df['Series_No'] == 1]) #randomly shuffle different series permute = np.random.permutation(range(1, len(set(df['Series_No'])))) train_series_seq = permute[: int(len(set(df['Series_No'])) * data_div_frac)] test_series_seq = permute[int( len(set(df['Series_No'])) * data_div_frac):] #taking relevent columns from dataframe df_x = df[cols_x] df_y = df[cols_y] #Innitialize empty lists which are later to be appended train_seq, test_seq = [], [] x_test = [] y_true =[] #Creating time series data for series_no in train_series_seq: #new dataframe variable assignment for particular series drom df_x, df_y series_df_x = df_x[df_x['Series_No'] == series_no] series_df_y = df_x[df_y['Series_No'] == series_no] #converting into numpy arrays array_x = np.array(series_df_x) array_y = np.array(series_df_y) #for loop to append to x_train y_train arrays according to window_in, window_out for idx in range(len(series_df_x) - window_in - window_out + 1): #'len(series_df_x) - window_in - window_out + 1' needs to be checked arrayx = array_x.copy() x = arrayx [idx:idx + window_in, : len(cols_x) - 1] #print(x) x[:,0:3] = x[:,0:3] / popu_size #print(x) arrayy = array_y.copy() y = arrayy[idx + window_in :idx + window_in + window_out, : len(cols_y) - 1] y = y / popu_size train_seq.append((x, y)) #out col_x and col_y has last item 'Series number' so to remove that [, : len(cols_x)] #y_train.append(array_y[idx + window_in :idx + window_in + window_out, : len(cols_y) - 1]) #print(train_seq) #repeat for test sequence for series_no in test_series_seq: #new dataframe variable assignment for particular series drom df_x, df_y series_df_x = df_x[df_x['Series_No'] == series_no] series_df_y = df_x[df_y['Series_No'] == series_no] #converting into numpy arrays array_x = np.array(series_df_x) array_y = np.array(series_df_y) #for loop to append to x_train y_train arrays according to window_in, window_out for idx in range(len(series_df_x) - window_in - window_out + 1): #'len(series_df_x) - window_in - window_out + 1' needs to be checked arrayx = array_x.copy() x = arrayx[idx:idx + window_in, : len(cols_x) - 1] x[:,0:3] = x[:,0:3] / popu_size x_test.append(x) arrayy = array_y.copy() y = arrayy[idx + window_in :idx + window_in + window_out, : len(cols_y) - 1] y = y / popu_size y_true.append(y) test_seq.append((x, y)) #test_seq.append((array_x[idx:idx + window_in, : len(cols_x) - 1], array_y[idx + window_in :idx + window_in + window_out, : len(cols_y) - 1])) #out col_x and col_y has last item 'Series number' so to remove that [, : len(cols_x)] #y_test.append(array_y[idx + window_in :idx + window_in + window_out, : len(cols_y) - 1]) win_len_per_ser = len_ser - window_in - window_out + 1 return np.array(train_seq), np.array(test_seq), len_ser, win_len_per_ser, np.array(x_test), np.array(y_true)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_training_data(self, csv_path):\n data = pd.read_csv(csv_path)\n data[['Hashrate', 'Addresses', 'Supply', 'Trx_Fee', 'Daily_Trx']] = data[['Hashrate', 'Addresses', 'Supply', 'Trx_Fee', 'Daily_Trx']].apply(pd.to_numeric)\n data[['Timestamp']] = data[['Timestamp']].apply(pd.to_datetime)\n data = data[data['Timestamp'] < self.end_time]\n data = data[data['Timestamp'] > self.start_time]\n\n return data", "def loadCSV(input_file):", "def open_convert_and_clean_csv(csv_data_file):\n imported_data = tablib.Dataset().load(open(csv_data_file).read())\n dataset = []\n for row in imported_data:\n if float(row[1]) > 0 and float(row[2]) > 0:\n dataset.append((row[0], float(row[1]), float(row[2])))\n return dataset", "def preprocessing(name_file):\n\n db_data = pd.read_csv(name_file).dropna()\n db_data['Timestamp'] = pd.to_datetime(db_data['Timestamp'], unit='s')\n db_data = db_data[db_data['Timestamp'].dt.year >= 2017]\n db_data.reset_index(inplace=True, drop=True)\n db_data = db_data.drop(['Timestamp'], axis=1)\n db_data = db_data[0::60]\n\n n = len(db_data)\n\n # Split data\n train = db_data[0:int(n * 0.7)]\n validation = db_data[int(n * 0.7):int(n * 0.9)]\n test = db_data[int(n * 0.9):]\n\n # Normalize data\n train_mean = train.mean()\n train_std = train.std()\n train = (train - train_mean) / train_std\n validation = (validation - train_mean) / train_std\n test = (test - train_mean) / train_std\n\n return train, validation, test", "def csv_loader(csv_file):\n df = pd.read_csv(csv_file, sep=';', parse_dates=['Data_Alteraçao'])\n pd.set_option('display.float_format', '{:.0f}'.format)\n\n df = df.fillna(0)\n df = df.drop(columns=['Cod. Pareamento', 'Cod. UF', 'Sigla UF', 'Cod. Subarea',\n 'Nome Subarea', 'Cod. Municipio', 'Nome Municipio', 'Codigo Agencia',\n 'Nome Agencia', 'Cod. Setor', 'Cod. Logradouro CNEFE',\n 'Tipo Logradouro CNEFE', 'Titulo Logradouro CNEFE',\n 'Nome Logradouro CNEFE', 'Nome Tratado CNEFE', 'Tipo Logradouro DNE',\n 'Titulo Logradouro DNE', 'Nome Logradouro DNE', 'Nome Tratado DNE',\n 'Logradouro Completo DNE', 'Distancia', 'Cod. Match', 'Motivo Match',\n 'CEPs Face', 'Localidade Face',\n 'Alterar Logradouro para DNE?', 'Observaçao', 'SIAPE Alteração',\n 'Nome Alteraçao', 'Data_Alteraçao', 'Status', 'Unnamed: 33'])\n\n # df.astype({'CEP Logradouro CNEFE': 'int32'}).dtypes\n\n df['CEP'] = df['CEP'].str.replace(' ', '', regex=False)\n\n ceps_dne = []\n for index, row in df.iterrows():\n if type(row.CEP) == str:\n for cep in row.CEP.split(','):\n # print(index, cep)\n ceps_dne.append(int(cep))\n\n ceps_cnefe = df['CEP Logradouro CNEFE'].astype(int).tolist()\n ceps = ceps_dne + ceps_cnefe\n ceps = list(set(ceps))\n return pd.Series(ceps)", "def load_data(filename):\n evidence = []\n labels = []\n with open(filename) as csvfile:\n file_rows = csv.reader(csvfile)\n next(file_rows)\n for row in file_rows:\n values = []\n\n # - Administrative, an integer\n values.append(int(row.pop(0)))\n # - Administrative_Duration, a floating point number\n values.append(float(row.pop(0)))\n # - Informational, an integer\n values.append(int(row.pop(0)))\n # - Informational_Duration, a floating point number\n values.append(float(row.pop(0)))\n # - ProductRelated, an integer\n values.append(int(row.pop(0)))\n # - ProductRelated_Duration, a floating point number\n values.append(float(row.pop(0)))\n # - BounceRates, a floating point number\n values.append(float(row.pop(0)))\n # - ExitRates, a floating point number\n values.append(float(row.pop(0)))\n # - PageValues, a floating point number\n values.append(float(row.pop(0)))\n # - SpecialDay, a floating point number\n values.append(float(row.pop(0)))\n # - Month, an index from 0 (January) to 11 (December)\n values.append(month_to_index(row.pop(0)))\n # - OperatingSystems, an integer\n values.append(int(row.pop(0)))\n # - Browser, an integer\n values.append(int(row.pop(0)))\n # - Region, an integer\n values.append(int(row.pop(0)))\n # - TrafficType, an integer\n values.append(int(row.pop(0)))\n # - VisitorType, an integer 0 (not returning) or 1 (returning)\n visitor_type = row.pop(0)\n if visitor_type == \"Returning_Visitor\":\n values.append(1)\n else:\n values.append(0)\n # - Weekend, an integer 0 (if false) or 1 (if true)label = row.pop(0)\n weekend = row.pop(0)\n if weekend == \"TRUE\":\n values.append(1)\n else:\n values.append(0)\n\n evidence.append(values)\n\n label = row.pop(0)\n if label == \"TRUE\":\n labels.append(1)\n else:\n labels.append(0)\n\n return evidence, labels", "def get_data(file_name):\n csv_file = open(file_name, 'rb')\n train_content = csv.reader(csv_file)\n\n # ignore header\n train_content.next()\n\n # preprocessing functions for each column index\n # Several preprocessing can be defined for each column.\n # A new variable is associated to EACH preprocessing function\n preproc_funcs = {0: ['get_hour']}\n\n # Read data from file, store it as an integer\n data = []\n for row in train_content:\n data_row = []\n for n, col in enumerate(row):\n # if the current column requires preprocessing functions, apply them\n if preproc_funcs.has_key(n):\n # Each preprocessing give a new column\n for preproc_func in preproc_funcs[n]:\n func = globals().get(preproc_func)\n data_row.append(int(float(func(col))))\n # If no preprocessing, do nothing\n else:\n data_row.append(int(float(col)))\n\n data.append(data_row)\n\n csv_file.close()\n\n return data", "def csv_Predictions_to_MistkDataRecord(csv_file, set_id):\n _logger.debug('Converting csv file ' + str(csv_file) + ' to DataRecords')\n recordList = [] \n with open(csv_file) as fp:\n # Check if the file has a header line, skip if necessary\n has_header = csv.Sniffer().has_header(fp.read(2048)) # Size of buffer for header\n fp.seek(0) # Rewind.\n reader = csv.reader(fp)\n header = None\n # header\n if has_header:\n header = next(reader)\n indices = {}\n id_index = None\n label_index = None\n bounds_index = None\n # check for defined headers\n for col in header:\n if col == 'id':\n id_index = header.index('id')\n elif col == 'recordId':\n id_index = header.index('recordId')\n elif col == 'label':\n label_index = header.index('label')\n elif col == 'labels':\n label_index = header.index('labels')\n elif col == 'bounds':\n bounds_index = header.index('bounds')\n else:\n indices[col] = [i for i, x in enumerate(header) if x == col] \n \n # check indices exist\n if id_index is None:\n err = \"CSV header does not contain 'id' or 'recordId' column for label id in file: \" + str(csv_file)\n _logger.debug(err)\n raise Exception(err)\n if label_index is None:\n err = \"CSV header does not contain 'label' or 'labels' column for labels in file: \" + str(csv)\n _logger.debug(err)\n raise Exception(err)\n \n # pull data\n for data in reader:\n if not len(data) > 0:\n continue\n recordId = data[id_index].strip()\n record_data = []\n labels = data[label_index].strip()\n i = 0 \n # labeled data may have more than one label per recordId\n for label in labels.split(' '):\n label_dict={}\n label_dict['label'] = label\n # append data related to each label\n for column, inds in indices.items(): \n val = data[inds[0]].strip()\n label_dict[column] = val.split(' ')[i] \n # bounds are special due to 4 values per bounding box\n if bounds_index:\n bounds = _split_bounds(data[bounds_index].strip())\n label_dict['bounds'] = bounds[i] \n record_data.append(label_dict) \n i += 1\n record = MistkDataRecord(record_id=recordId, referenced_set_id=set_id, values=record_data)\n recordList.append(record) \n # no header\n else:\n for data in reader:\n recordId = data[0].strip()\n record_data = []\n labels = data[1].strip()\n confs = None\n bounds = None\n if len(data) > 2:\n confs = data[2].strip()\n if len(data) > 3:\n bounds = data[3].strip()\n i = 0\n for label in labels.split(' '):\n label_dict={}\n label_dict['label'] = label\n if confs:\n label_dict['confidence'] = confs.split(' ')[i] \n if bounds:\n label_dict['bounds'] = _split_bounds(bounds)[i]\n record_data.append(label_dict) \n i += 1 \n record = MistkDataRecord(record_id=recordId, referenced_set_id=set_id, values=record_data)\n recordList.append(record)\n return recordList", "def from_csv(self,path):\n self.csv_path = path\n\n try:\n fh = open(self.csv_path, \"r\")\n except IOError:\n print(\"Error: no such file or directory\") \n\n self.csv_dataframe = pd.DataFrame(pd.read_csv(self.csv_path, header=0, keep_default_na=False)).dropna(axis=0, how='any')\n test = pd.DataFrame(pd.read_csv(self.csv_path)).dropna(axis=0, how='any')\n types = [0 for i in range(len(test.dtypes))]\n a = fh.readline()\n a = a[:-1] # remove '\\n'\n x = a.split(',') # x stores the name of each column\n fh.close()\n\n #type transformation\n for i in range(len(test.dtypes)):\n if test.dtypes[i].name[0:3] == 'int' or test.dtypes[i].name[0:5] == 'float':\n if (x[i][0] == \"'\" or x[i][0] == '\"'):\n x[i] = x[i].replace('\\'', '').replace('\"', '')\n for j in test[x[i]]:\n if not (j == 0 or (j > 1000 and j < 2100)):\n types[i] = test.dtypes[i].name[0:5]\n break\n else:\n types[i] = 'year'\n elif test.dtypes[i].name[0:6] == 'object':\n if (x[i][0] == \"'\" or x[i][0] == '\"'):\n x[i] = x[i].replace('\\'', '').replace('\"', '')\n for j in test[x[i]]:\n if j != 0 and not(re.search(r'\\d+[/-]\\d+[/-]\\d+', j)):\n types[i] = 'varchar'\n break\n else:\n types[i] = 'date'\n \n name = path.rsplit('/', 1)[-1][:-4]\n self.table_info(name, x, types)\n self.import_method = methods_of_import[2] # = 'csv'\n\n self.show_csv_info()", "def data_preprocessing(dataset):\r\n df = pd.read_csv(dataset)\r\n df.head()\r\n df.describe()\r\n df.isnull().sum()\r\n df= df.drop(['instant'], axis=1)\r\n df['dteday'] = pd.to_datetime(df['dteday'].apply(str) + ' ' + df['hr'].apply(str) + ':00:00')\r\n return df", "def load_data_to_predict(self, filepath, sep=\",\"):\n if filepath.split('.')[-1] == 'csv':\n self.data_to_predict = pd.read_csv(filepath, sep=sep)\n elif filepath.split('.')[-1] == 'json':\n self.data_to_predict = pd.read_json(filepath)\n else:\n print 'Please select a csv or json file'", "def format_data(file):\r\n \r\n \r\n data = pd.read_csv(file)\r\n data.index = list(data.iloc[:,0])\r\n data = data.iloc[:,1:]\r\n \r\n return data", "def from_csv(self, path):\n for model, table in [(self.Dataset, 'dataset'),\n (self.Datarun, 'datarun'),\n (self.Hyperpartition, 'hyperpartition'),\n (self.Classifier, 'classifier')]:\n df = pd.read_csv(os.path.join(path, '%ss.csv' % table))\n\n # parse datetime columns. This is necessary because SQLAlchemy can't\n # interpret strings as datetimes on its own.\n # yes, this is the easiest way to do it\n for c in inspect(model).attrs:\n if type(c) != ColumnProperty:\n continue\n col = c.columns[0]\n if type(col.type) == DateTime:\n df[c.key] = pd.to_datetime(df[c.key],\n infer_datetime_format=True)\n\n for _, r in df.iterrows():\n # replace NaN and NaT with None\n for k, v in list(r.iteritems()):\n if pd.isnull(v):\n r[k] = None\n\n # insert the row into the database\n create_func = getattr(self, 'create_%s' % table)\n create_func(**r)", "def process_fx_file(path):\n data = pd.read_csv(path, delimiter=';', decimal=',', parse_dates=[\"Date\"], date_parser=fx_dateparse)\n data.to_csv(path.split(\".\")[0] + \"_pr.\" + path.split(\".\")[1], float_format='%.6f', index=False)\n \n return data", "def __load(self, file_path):\n self.series = read_csv(file_path, header=0)\n self.series = self.__extract_series_per_country(self.countries).transpose().iloc[1:]\n self.series.index.names = ['Date']\n self.series.index = pd.to_datetime(self.series.index)\n self.series.columns = self.countries", "def load_data(train_file, test_file):\n\n data = np.asarray(pd.read_csv(train_file, header=0))\n data_ts = np.asarray(pd.read_csv(test_file, header=0))\n\n x_tra = data[:, :-1]\n y_tra = data[:, -1]\n\n return x_tra, y_tra, data_ts", "def _load_data(filename):\n\n def str2date(s):\n \"\"\"Converts a string to a datetime\"\"\"\n return datetime.strptime(s.decode(), \"%Y-%m-%d %H:%M:%S\")\n\n # Load the data\n return np.recfromcsv(filename, converters={0: str2date}, comments=\"#\")", "def load_data(input_dir, file_name, forecast_col='Close'):\n # read in csv\n df = pd.read_csv('{}/{}'.format(input_dir, file_name), parse_dates=['Date'], index_col=0)\n # select & add feature columns\n df.fillna(0, inplace=True)\n df = df[['Open', 'High', 'Low', 'Close']]\n df['HL_PCT'] = (df['High'] - df['Low']) / df['Close'] * 100.\n df['PCT_Change'] = (df['Close'] - df['Open']) / df['Open'] * 100.\n df = df.iloc[::-1]\n df.fillna(value=-9999, inplace=True)\n # set # of days to forecast out and shift column to be used as labels\n days_forecast = 15\n df['label'] = df[forecast_col].shift(-days_forecast)\n # set up feature & label matrices\n X = np.array(df.drop(['label'], 1))\n X = preprocessing.scale(X)\n x_recent = X[-days_forecast:]\n X = X[:-days_forecast]\n df.dropna(inplace=True)\n y = np.array(df['label'])\n # split data 80/20 for train & test respectively\n x_train, x_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2)\n return x_train, x_test, x_recent, y_train, y_test, df", "def read_input(path):\n data = pd.read_csv(path)\n for each_column in data.columns:\n if each_column != \"class\":\n mean = data[each_column].mean(); std = data[each_column].std()\n data[each_column] = (data[each_column]-mean)/std\n\n data['intercept'] = np.ones((len(data)))\n\n return data", "def parse_data(filename):\n x, y = [], []\n with open(filename) as f:\n reader = csv.reader(f)\n for row in reader:\n x.append(datetime.strptime(row[1], DATE_FORMAT))\n y.append(row[0])\n\n return x, y", "def import_csv_dataset():\n import_fields = pd.read_csv('redacted-2020-june-30-wprdc-.csv', header=None).to_numpy()[0, :]\n import_values = pd.read_csv('redacted-2020-june-30-wprdc-.csv').to_numpy()\n import_values = clean_values(import_values)\n return import_fields, import_values", "def data_from_csv(self, filepath):\n self.dataframe = pd.load_csv(filepath, separator='')", "def parse_csvfile(self, csvfile):\n\n logging.info(\"Parseing csvfile: %s\" % basename(csvfile))\n fields = []\n data = {}\n try:\n with open(csvfile) as f:\n for line in f:\n line = line.strip()\n # Skip empty or commented line\n if not line or line[0] == \"#\":\n continue\n if not fields:\n # The first valid line defines fields.\n fields = [x.strip() for x in line.split(\",\")]\n for f in self.REQUIRED_FIELDS:\n if f not in fields:\n logging.error(\"Failed to find %s field. \"\n \"Aborted.\" % f)\n sys.exit(1)\n else:\n # The rest lines are data\n values = [x.strip() for x in line.split(\",\")]\n record = {}\n for k, v in zip(fields, values):\n record[k] = v\n # Convert date time string to epoch seconds\n record[\"time_h\"] = self.parse_timestr(record[\"time_h\"])\n node = record[\"name\"]\n if data.get(node, None):\n data[node].append(record)\n else:\n data[node] = [record]\n except Exception as e:\n logging.exception(\"Failed to parsing the csvfile. \"\n \"See stack trace below:\")\n sys.exit(1)\n\n # While it didn't occur often, I observed that data in CSV files\n # generated by cbtool monextrac command were not in time order.\n # So sort them.\n logging.debug(\"Sorting the data\")\n for node in data.keys():\n data[node].sort(lambda x, y: cmp(int(x[\"time\"]), int(y[\"time\"])))\n\n return data, fields", "def readdata(filename):\n\tdt = np.dtype([('date','int'),('val','<f8')])\n\tdata = np.loadtxt(filename,dtype = dt,skiprows = 1)\n\treturn data", "def load_data(filename):\n\n evidance = list()\n labels = list()\n row = list()\n\n with open(filename) as file:\n for j, item in enumerate(file):\n if(j == 0):\n continue\n row = []\n for i, el in enumerate(item.split(',')):\n # print(el)\n if(i in {0,2,4,11,12,13,14}):\n row.append(int(el))\n elif(i in {1,3,5,6,7,8,9}):\n row.append(float(el))\n elif(i==10):\n if(el == \"Jan\"):\n row.append(int(0))\n elif(el == \"Feb\"): \n row.append(int(1))\n elif(el == \"Mar\"): \n row.append(int(2))\n elif(el == \"Apr\"): \n row.append(int(3))\n elif(el == \"May\"): \n row.append(int(4))\n elif(el == \"June\"): \n row.append(int(5))\n elif(el == \"Jul\"): \n row.append(int(6))\n elif(el == \"Aug\"): \n row.append(int(7))\n elif(el == \"Sep\"): \n row.append(int(8))\n elif(el == \"Oct\"): \n row.append(int(9))\n elif(el == \"Nov\"): \n row.append(int(10))\n elif(el == \"Dec\"): \n row.append(int(11))\n else:\n print(\"wrong month Exit\")\n quit()\n elif(i==15):\n if(el == 'Returning_Visitor'):\n row.append(int(1))\n else:\n row.append(int(0))\n elif(i==16):\n if(el == 'TRUE'):\n row.append(int(1))\n else:\n row.append(int(0)) \n elif(i==17):\n if(el == 'TRUE\\n' or el == 'TRUE'):\n row.append(int(1))\n else:\n row.append(int(0))\n else:\n print(\"Incorrect value\")\n\n evidance.append(row)\n # print(item.split(',')[-1])\n if(item.split(',')[-1] == 'FALSE\\n'):\n labels.append(0)\n else:\n labels.append(1)\n\n # print(labels)\n return (evidance, labels)", "def import_data():\n import pandas as pd\n \n df = pd.read_csv('Company_Bankruptcy_Prediction.csv')\n return df", "def read_csv():", "def convert(self):\n with open(self.path) as csvfile:\n reader = csv.reader(csvfile)\n d = []\n for row in reader:\n d+=[row]\n if self.n_class==2:\n dd = []\n for i in range(len(d)):\n if d[i][0] in ['EI','IE']:\n dd+=[d[i]]\n d = dd\n\n random.seed(0)\n random.shuffle(d)\n\n self.x = np.zeros((len(d),len(d[0][2].strip()),4))\n self.y = np.zeros((len(d),self.n_class))\n self.count = Counter([x[0] for x in d])\n for i in range(len(d)):\n self.x_raw += [d[i][2].strip()]\n tmp = [self.base[x] for x in d[i][2].strip()]\n for j in range(len(tmp)):\n if tmp[j]==4:\n # N: A or G or C or T\n self.x[i][j][0] = .25\n self.x[i][j][1] = .25\n self.x[i][j][2] = .25\n self.x[i][j][3] = .25\n elif tmp[j]==5:\n # D: A or G or T\n self.x[i][j][0] = .33\n self.x[i][j][1] = .33\n self.x[i][j][2] = .33\n elif tmp[j]==6:\n # R: A or G\n self.x[i][j][0] = .50\n self.x[i][j][2] = .50\n elif tmp[j]==7:\n # S: C or G\n self.x[i][j][2] = .50\n self.x[i][j][3] = .50\n else:\n self.x[i][j][tmp[j]] = 1\n\n #self.x[i][range(len(tmp)),tmp] = 1\n self.y[i][self.result[d[i][0]]] = 1", "def upload_csv_data(self, upload_file):\n db = DataBase(self.DATABASE_DATA)\n db.insert_data_from_file(\n 'triagedata.historicdata',\n ('clinic_id', 'severity', 'date_received', 'date_seen'),\n upload_file,\n ','\n )", "def convert_data(data_path='data/data.csv'):\n nba_stats = pd.read_csv(data_path)\n\n # get all rows from cols data and beyond\n nba_stats = nba_stats.loc[:, 'data':] \n\n # removing the final score and date. just want quarters\n nba_stats = nba_stats.drop(['date', 'final_score', 'opponent_points',\n 'diff_points'], axis=1)\n\n nba_stats = pd.get_dummies(nba_stats)\n nba_stats.loc[nba_stats['points'] < 20, 'points'] = 1\n nba_stats.loc[nba_stats['points'] >= 20, 'points'] = 0\n y_values = nba_stats['points']\n nba_stats = nba_stats.drop('points', axis=1)\n\n return nba_stats, y_values", "def load_data(filename):\n data = pd.read_csv(filename)\n\n labels = data['Revenue'].map(lambda x: 1 if x is True else 0).tolist()\n\n data['Month'] = data['Month'].map(lambda x: convert_month_to_number(x))\n data.drop(data[data['Month'] == -1].index)\n\n data['VisitorType'] = data['VisitorType'].map(lambda x: 1 if x == 'Returning_Visitor' else 0)\n data['Weekend'] = data['Weekend'].map(lambda x: 1 if x is True else 0)\n\n for i in data.keys():\n if i == 'Administrative' or i == 'Informational' or i == 'ProductRelated' or i == 'OperatingSystems' or \\\n i == 'Browser' or i == 'Region' or i == 'TrafficType' or i == 'VisitorType' or i == 'Weekend':\n if not pd.api.types.is_integer_dtype(data[i].dtype):\n data = data[i].apply(lambda x: not isinstance(x, int))\n if i == 'Administrative_Duration' or i == 'Informational_Duration' or i == 'ProductRelated_Duration' or \\\n i == 'BounceRates' or i == 'ExitRates' or i == 'PageValues' or i == 'SpecialDay':\n if not pd.api.types.is_float_dtype(data[i].dtype):\n data = data[data[i].apply(lambda x: isinstance(x, float))]\n data.drop(\"Revenue\", axis=1, inplace=True)\n evidence = data.values.tolist()\n return evidence, labels", "def import_dataset(fpath):\r\n data = read_csv(fpath)\r\n print(data.head())\r\n print(data.shape)\r\n return data", "def prepare_data(filename='data/DOT_timeSeries.csv'):\n\n # read data file into pandas dataframe\n df = pd.read_csv(filename)\n\n # extract unwanted 'countries' from dataframe\n countries = ['Europe', 'Emerging and Developing Europe', 'Emerging and Developing Asia',\n 'Middle East, North Africa, and Pakistan', 'Export earnings: nonfuel',\n 'Sub-Saharan Africa', 'Export earnings: fuel', 'Western Hemisphere',\n 'World', 'Special Categories', 'Advanced Economies', 'CIS',\n 'Emerging and Developing Economies']\n for country in countries:\n df = extract_relevant_rows(df, column_name='Country Name', column_value=country, not_equal=True)\n df = extract_relevant_rows(df, column_name='Counterpart Country Name', column_value=country, not_equal=True)\n\n # extract exports only from data\n exports = extract_relevant_rows(df, column_name='Indicator Code', column_value='TXG_FOB_USD')\n # extract value attributes only from exports\n export_values = extract_relevant_rows(exports, column_name='Attribute', column_value='Value')\n\n return export_values", "def load_data(path, train=True):\n COLUMNS = ['utterance_ID', 'dialog_act', 'utterance_t-3', \n 'utterance_t-2', 'utterance_t-1', 'utterance_t']\n\n if not train:\n COLUMNS.remove('dialog_act')\n \n df = (pd.read_csv(path, sep='\\t|;', engine='python', names=COLUMNS)\n .set_index('utterance_ID')\n .astype(str))\n df[COLUMNS[2:]] = df[COLUMNS[2:]].apply(preprocess)\n return df", "def dataset_from_csv(self, filename, time_column='point_in_time'):\n return pd.from_csv(filename, parse_dates=[time_column])", "def csv_parser(lines): \n\n data_points = []\n for line in lines:\n items = line.strip().split(\",\")\n try: #will fail on header line in file\n data_points.append(map(float, items[1:])) #first item is the label\n except ValueError: #must be the header\n continue\n return data_points", "def read_data(filename):\n data = np.genfromtxt(filename, delimiter=',', dtype = str)\n X = data[1:,2:].astype(np.float)\n y = data[1:,0]\n y[y==label0]='0' \n y[y==label1]='1' \n y[y==label2]='2'\n y.astype(np.float) \n return X, y", "def create_data(path):\n\n with open(path, newline='') as f:\n reader = csv.reader(f)\n final_data = list(reader)\n final_data.pop(0)\n random.shuffle(final_data)\n\n for i in range(len(final_data)):\n for j in range(len(final_data[i])):\n final_data[i][j] = float(final_data[i][j])\n\n return final_data", "def import_data(csv_file):\n # skips bad lines\n data = pd.read_csv(csv_file, error_bad_lines=False)\n return data", "def data_input(self):\n path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'data'))\n if not os.path.isfile('{0}/{1}.csv'.format(path, self.data_file)):\n print 'Error: Dataset file is not exist.'\n exit()\n # Uplead Dataset.csv file.\n f = open('{0}/{1}.csv'.format(path, self.data_file), 'r')\n print 'Now uploading dataset File.....'\n f = list(f)\n # The Dataset contains heading, number of lines - heading\n self.number_of_VOCs = sum(1 for row in f)-1\n # Count number of columns, last column's value is empty, that is why -1.\n self.number_of_columns = len(f[0].split(',')) -1\n self.first_m_z = int(f[0].split(',')[3]) # find the first m/z value.\n self.last_m_z = int(f[0].split(',')[-2]) # find the last m/z value.\n print 'dataset includes ', self.number_of_VOCs, 'VOCs in all samples '\n print ('dataset includes ', self.number_of_columns, ' Columns, ',\n 'm/z values start from ', self.first_m_z,\n 'and end ', self.last_m_z)\n # Create a matrix with a shape of (number_of_VOCs X number_of_columns) filled with zeros.\n self.dataset = np.zeros((self.number_of_VOCs,\n self.number_of_columns))\n for line in range(1, len(f)):\n if int(float(f[line].strip().split(',')[0])) not in self.loaded_samples:\n self.loaded_samples.append(int(float(f[line].strip().split(',')[0])))\n for column in range(self.number_of_columns):\n self.dataset[line-1][column] = int(float(f[line].strip().split(',')[column]))", "def __load_csv(filename):\n fp = open(Parser.DATA_FOLDER_PATH + filename + '.csv', 'r')\n records = []\n for line in fp:\n items = line.strip().split(',')\n x, y, z = '0', '0', '0'\n if len(items) > 1:\n x = items[1]\n if len(items) > 2:\n y = items[2]\n if len(items) > 3:\n z = items[3]\n\n values = [x, y, z]\n records.append(values)\n\n # Discard some beginning data which may be noisy\n # del records[:int(len(records) / 30)]\n n = len(records)\n\n for i in range(n):\n rec = []\n # Consider X, Y, Z axes\n for k in range(3):\n # If can convert string to float\n try:\n val = float(records[i][k])\n except ValueError:\n val = 0\n rec.append(val)\n\n # Replace it\n records[i] = rec\n return records", "def read_data(filename):\n data = np.genfromtxt(filename, delimiter=',', dtype=str)\n X = data[1:,2:].astype(np.float)\n y = data[1:,0]\n y[y==label0]='0'\n y[y==label1]='1'\n y[y==label2]='2'\n y=y.astype(np.float)\n return X, y", "def get_data(self, csv_file):\n pass", "def get_data():\r\n data = pd.read_csv(FILE_PATH)\r\n # Replace 'Zero KM' by year 2022 assuming it's a new car\r\n data['Ano'] = data['Ano'].str.replace('Zero KM', '2021').replace('2022', '2021')\r\n data['Ano'] = data['Ano'].astype(int)\r\n data['Automático'] = data['Automático'].astype(int)\r\n return data", "def preprocess(self):\n df = pd.read_csv(self.input, sep=self.dataSeparator, index_col = 0)\n #ATTENTION: this processing assumes that the data is formatted in a way that header and index are automatically recognized. remove trailing commas/separators at first line of the file for this to be achieved\n if self.transposeMatrix:\n df = df.T\n\n filePrefix = self.input.split(\"/\")[-1].split(\".\")[\n 0] # split path by / to receive filename, split filename by . to receive filename without ending\n filename = self.output + filePrefix + \"_transposed.csv\"\n\n df.to_csv(filename)\n return filename", "def process_csv(self, user: User, csv_file):\n self.db_session.rollback()\n csv = pandas.read_csv(StringIO(csv_file.read().decode('utf-8')))\n missing_cols = [col_name for col_name in CSV_SENSOR_MAP.values() if col_name not in csv.columns.values]\n if missing_cols:\n raise OBDControllerError(f'CSV is missing the following columns: {\", \".join(missing_cols)}')\n\n csv = csv[CSV_SENSOR_MAP.values()]\n start_datetime = self._resolve_date_from_csv_row(csv.iloc[0])\n gen_session_id = str(start_datetime.timestamp()).replace('.', '')[:12]\n\n if self.db_session.query(OBDSession).filter(OBDSession.id == gen_session_id).first():\n return\n\n session = OBDSession.create(self.db_session, id=gen_session_id, user_id=user.id, date=start_datetime)\n _ = CarState.create_from_csv(self.db_session, session, csv)\n self.db_session.commit()", "def ReadMetrics( fileName ):\n DataDF=pd.read_csv(fileName,header=0,delimiter=',',parse_dates=[0])\n DataDF = DataDF.set_index('Date')\n #print(DataDF.head())\n return( DataDF )", "def prepare_CSV(self):\n self.drop_columns()\n self.rename_columns()\n self.spilt_columns()\n self.add_vehicle_id_column()\n self.add_source_column()\n self.add_timestamp_columns()\n self.get_colour_columns()\n self.clean_column_formats()\n\n # print(self.data.info())\n # print(self.data.sample(10))\n\n return self.data", "def load_simulator_data(self, csvfname):\n data = []\n with open(csvfname, 'r') as csvfile:\n data_tmp = list(csv.reader(csvfile, delimiter=','))\n for row in data_tmp:\n x7 = [float(x) for x in row[7].split(':')]\n x8 = [float(x) for x in row[8].split(':')]\n\n data.append(((row[0], row[1], row[2]),\n np.array([float(row[3]), float(row[4]), float(row[5]), float(row[6])] + x7 + x8)))\n\n return data", "def prepare_data_train(fname):\n # Read data\n data = pd.read_csv(fname)\n # events file\n events_fname = fname.replace('_data','_events')\n # read event file\n labels= pd.read_csv(events_fname)\n clean=data.drop(['id' ], axis=1)#remove id\n labels=labels.drop(['id' ], axis=1)#remove id\n return clean,labels", "def prepare_data_train(fname):\n # Read data\n data = pd.read_csv(fname)\n # events file\n events_fname = fname.replace('_data','_events')\n # read event file\n labels= pd.read_csv(events_fname)\n clean=data.drop(['id' ], axis=1)#remove id\n labels=labels.drop(['id' ], axis=1)#remove id\n return clean,labels", "def prepare_data_train(fname):\n # Read data\n data = pd.read_csv(fname)\n # events file\n events_fname = fname.replace('_data','_events')\n # read event file\n labels= pd.read_csv(events_fname)\n clean=data.drop(['id' ], axis=1)#remove id\n labels=labels.drop(['id' ], axis=1)#remove id\n return clean,labels", "def load_metrics(fp):\r\n with open(fp) as csvfile:\r\n read = csv.reader(csvfile, delimiter=\",\", quotechar='\"')\r\n lst = []\r\n for i in read:\r\n new_row = i[0:2] + i[7:-1]\r\n lst.append(new_row)\r\n data = np.array(lst)\r\n return data", "def importData(filename):\r\n data = pd.read_csv(filename)\r\n return data", "def parse(csvfilename):\r\n with open(csvfilename, 'r') as f:\r\n reader = csv.reader(f, delimiter=';')\r\n #reader = csv.reader(f, delimiter=';', quotechar=\"'\")\r\n data = list(reader)\r\n # transform data into numpy array\r\n data = np.array(data).astype(float)\r\n return data", "def import_csv():\n data_location = (os.path.join(os.path.dirname(__file__), 'data/NYPD_Motor_Vehicle_Collisions.csv'))\n\n data_frame = pd.read_csv(data_location, dtype={\n \"NUMBER OF PERSONS INJURED\": int,\n \"NUMBER OF PERSONS KILLED\": int,\n \"BOROUGH\": str,\n \"ZIP CODE\": str,\n \"LATITUDE\": float,\n \"LONGITUDE\": float,\n })\n\n # Add score column set to 0.0\n data_frame['SCORE'] = pd.Series(0.0, index=data_frame.index)\n return data_frame", "def read_csv(self, csv_input):\n # https://stackoverflow.com/a/45063514\n dtypes = {\n 'lat': 'U',\n 'long': 'U'\n }\n csv_data = pd.read_csv(csv_input, encoding='UTF-8', sep=',', na_values=[''], dtype=dtypes)\n\n self.table = csv_data.fillna('').applymap(lambda x: x.strip() if type(x) == str else x)\n self.log.info('Data read from CSV %s' % csv_input)\n #print('Data read from CSV %s' % csv_input)", "def read_fx_data_from_file(self, fileName, formatSpec):\n dataR = pd.read_csv(fileName, index_col=1)\n dataR.index = pd.to_datetime(dataR.index, format=formatSpec)\n dataR.sort_index(inplace=True)\n label = dataR['Name'][0]\n dataR.drop('Name', axis=1, inplace=True)\n return dataR, label", "def read_test_csv(self, file_path, header=True):\n BasePredictor.read_test_csv(self, file_path, header)\n self.obs = np.array(self.obs, dtype=np.int32)\n return", "def import_csv(self, csvfileobject):\n # Clear previously stored info\n self._tracks = []\n self._selected = None\n\n for row in csvfileobject:\n if row[0] == \"T\":\n track = self.addTrack()\n track.properties = row\n elif row[0] == \"P\":\n period = self.addPeriod([0,1,'-'])\n period.properties = row", "def load_data(filename):\n #Admittedly copy-pasted from Heredity project cuz I'm resourceful like that\n #Makes 2 lists, one for evidence and one for labels\n evidence = []\n labels = []\n #Open csv file\n with open(\"shopping.csv\") as f:\n reader = csv.reader(f)\n next(reader)\n #Iterate through user rows of file\n for row in reader:\n i = 0\n tmp_list = []\n for column in row:\n if i in [0,2,4,11,12,13,14]:\n column = int(column)\n if i in [1,3,5,6,7,8,9]:\n column = float(column)\n if i == 10:\n if column == \"Jan\":\n column = 0\n if column == \"Feb\":\n column = 1\n if column == \"Mar\":\n column = 2\n if column == \"Apr\":\n column = 3\n if column == \"May\":\n column = 4\n if column == \"June\":\n column = 5\n if column == \"Jul\":\n column = 6\n if column == \"Aug\":\n column = 7\n if column == \"Sep\":\n column = 8\n if column == \"Oct\":\n column = 9\n if column == \"Nov\":\n column = 10\n if column == \"Dec\":\n column = 11\n if i in [15,16]:\n if column == \"Returning_Visitor\" or column == \"TRUE\":\n column = 1\n else:\n column = 0\n if i == 17:\n if column == \"TRUE\":\n column = 1\n else:\n column = 0\n labels.append(column)\n else:\n tmp_list.append(column)\n i+=1\n evidence.append(tmp_list)\n \n return (evidence,labels)", "def data_transform(filename):\n gap = 1\n dirpath = tempfile.mkdtemp()\n pd_list = []\n file_df = pd.read_csv(filename, header = 0)\n for line in range(len(file_df)):\n if line % gap == 0:\n print(line,len(file_df))\n rna_uuid = file_df.iloc[line][\"rna_seq_uuid\"]\n case_uuid = file_df.iloc[line][\"case_uuid\"]\n try:\n df = pd.read_csv(download_rna_seq([rna_uuid], dirpath),sep=\"\\t\",names = ['rna_id','value'])\n df = df.transpose()\n df.columns = df.iloc[0]\n df = df.drop(df.index[0])\n df[\"case_uuid\"] = str(case_uuid)\n pd_list.append(df.transpose())\n except:\n continue\n\n final_df = pd.concat(pd_list, axis=1, sort=False)\n final_df = final_df.transpose()\n\n return final_df", "def read_traffic_sensor_from_csv(path: str) -> pd.DataFrame:\n\n df = pd.read_csv(path)\n df[\"measuredTime\"] = pd.to_datetime(df[\"measuredTime\"])\n df.set_index(\"measuredTime\", inplace=True)\n return df", "def load_data(filename):\n \n labels = []\n evidence = []\n\n monthdict = {\n \"Jan\": 0, \"Feb\": 1, \"Mar\": 2, \"Apr\": 3, \"May\": 4, \"June\": 5, \"Jul\": 6,\n \"Aug\": 7, \"Sep\": 8, \"Oct\": 9, \"Nov\": 10, \"Dec\": 11\n }\n\n with open(\"shopping.csv\") as f:\n reader = csv.reader(f)\n next(reader)\n\n for row in reader:\n evidence.append(\n [int(row[0]), float(row[1]), int(row[2]), float(row[3]), int(row[4]), float(row[5])] +\n [float(e) for e in row[6:9]] + [monthdict[row[10]]] +\n [int(e) for e in row[11:14]] + [0 if row[15] == \"New_Visitor\" else 1] +\n [1 if row[16] == \"TRUE\" else 0]\n )\n\n labels.append(0 if row[17] == \"FALSE\" else 1)\n \n return (evidence, labels)", "def parseFile()-> None:\n logging.info(f\"Parsing file with Pandas {getTime()}\")\n with open(DATA_FILE) as f:\n data = pd.read_csv(f)\n db = connect(\"result.db\")\n\n data.to_sql(\"data\",db,if_exists=\"replace\")\n\n result = pd.DataFrame({\"Uniqe Countries\":[len(set(data[\"location\"]))]})\n\n with open(RESULT_FILE,\"w\") as f:\n f.write(result.to_csv(index=False))\n logging.info(f\"Finsied parsing {getTime()}\")", "def load_data(self, filename):\r\n #sqlcontext = SQLContext(self.sc)\r\n #df = sqlcontext.read.format('com.databricks.spark.csv').options(header='false', inferschema='true').load(filename)\r\n #df = sc.textFile(r\"C:\\Users\\mohan\\Downloads\\patches.csv\").map(lambda line: line.split(\",\"))\r\n #print (df.count())\r\n df = self.sc.textFile(filename).map(lambda line: line.split(\",\"))\r\n l = df.map(lambda w: [int(float(c)) for c in w]).zipWithIndex()\r\n return l\r\n raise NotImplementedError", "def load_data(filename):\n\n # Load data\n df = pd.read_csv(filename)\n\n # Create mappings of months and visitor types\n months = {\"Jan\": 0,\n \"Feb\": 1,\n \"Mar\": 3,\n \"Apr\": 4,\n \"May\": 5,\n \"June\": 6,\n \"Jul\": 7,\n \"Aug\": 8,\n \"Sep\": 9,\n \"Oct\": 10,\n \"Nov\": 11,\n \"Dec\": 12}\n\n visitors = {\"New_Visitor\": 0,\n \"Returning_Visitor\": 1,\n \"Other\": 0}\n\n # Get labels for individual datapoints\n labels = df['Revenue'].astype(int).tolist()\n\n # Create data points as formatted lists of values, floats get rounded to two decimals\n df['Administrative'] = df['Administrative'].astype(int)\n df['Administrative_Duration'] = df['Administrative_Duration'].astype(float).round(2)\n df['Informational'] = df['Informational'].astype(int).round(2)\n df['Informational_Duration'] = df['Informational_Duration'].astype(float).round(2)\n df['ProductRelated'] = df['ProductRelated'].astype(int)\n df['ProductRelated_Duration'] = df['ProductRelated_Duration'].astype(float).round(2)\n df['BounceRates'] = df['BounceRates'].astype(float).round(2)\n df['ExitRates'] = df['ExitRates'].astype(float).round(2)\n df['PageValues'] = df['PageValues'].astype(float).round(2)\n df['SpecialDay'] = df['SpecialDay'].astype(float).round(2)\n df['Month'] = df['Month'].map(months)\n df['OperatingSystems'] = df['OperatingSystems'].astype(int)\n df['Browser'] = df['Browser'].astype(int)\n df['Region'] = df['Region'].astype(int)\n df['TrafficType'] = df['TrafficType'].astype(int)\n df['VisitorType'] = df['VisitorType'].map(visitors)\n df['Weekend'] = df['Weekend'].astype(int)\n del df['Revenue']\n\n # Init result\n evidence = df.values.tolist()\n result = [evidence, labels]\n\n # Return a tuple (evidence, labels).\n return result", "def parse(self):\n self.isParsingNeeded = False\n localizations = StormReader(self.file_path)\n localizations.readfile()\n localizations.get_header_info()\n\n #array = stormfile(self.file_path)\n #array.getHeaderInfo()\n self.stormData = localizations.data\n\n #prevent negative x,y values. Set to Zero\n self.stormData[...,0] = self.stormData[...,0]-self.stormData[...,0].min()\n self.stormData[...,1] = self.stormData[...,1]-self.stormData[...,1].min()\n self.size = np.array([self.stormData[...,0].max(), self.stormData[...,1].max()])\n #Build structured array with title name and value of columns.\n storm_reshaped = np.negative(np.ones((self.stormData.shape[0], 6)))\n for i,j in enumerate(localizations.dataColumn):\n if j >=0:\n storm_reshaped[...,int(i)] = self.stormData[..., int(j)]\n #set precision to 10 nm if no value given\n if (storm_reshaped[...,2]<0).all():\n storm_reshaped[...,2] = 10\n self.stormData = storm_reshaped", "def load():\n filepath = dirname(abspath(__file__))\n##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####\n data = recfromtxt(open(filepath + '/spector.csv',\"rb\"), delimiter=\" \",\n names=True, dtype=float, usecols=(1,2,3,4))\n names = list(data.dtype.names)\n endog = array(data[names[3]], dtype=float)\n endog_name = names[3]\n exog = column_stack(data[i] for i in names[:3]).astype(float)\n exog_name = names[:3]\n dataset = Dataset(data=data, names=names, endog=endog, exog=exog,\n endog_name = endog_name, exog_name=exog_name)\n return dataset", "def load_test_data(test_file_path):\n lines = csv.reader(open(test_file_path, \"rb\"))\n \n unformatted_data_set = list(lines)\n \n # map the data to floats for calculation purposes\n formatted_data = [map(float, data_line) for data_line in unformatted_data_set]\n return formatted_data", "def load_data(csv_file):\n df = pd.read_csv(csv_file)\n col_index = list(df.columns.values)\n result_label = col_index[-1] # get label of the last column\n x = df.drop(columns=result_label, axis=1)\n y = df.iloc[:, -1]\n return x, y", "def load_data(filename):\n # create an evidence and label list\n evidence = []\n label = []\n\n # create a dictionary to hold key months matching to their respective values\n month = {'Jan': 0, 'Feb': 1, 'Mar': 2, 'Apr': 3, 'May': 4, 'June': 5, 'Jul': 6, 'Aug': 7, 'Sep': 8, 'Oct': 9,\n 'Nov': 10, 'Dec': 11}\n\n # open and read the csv file\n with open(filename) as data:\n # use the dictionary csv reader to be able to call the cell values by the csv column header names\n reader = csv.DictReader(data)\n # read each row in the csv and append the evidence and labels to their respective lists\n for row in reader:\n evidence.append([\n int(row[\"Administrative\"]),\n float(row[\"Administrative_Duration\"]),\n int(row[\"Informational\"]),\n float(row[\"Informational_Duration\"]),\n int(row[\"ProductRelated\"]),\n float(row[\"ProductRelated_Duration\"]),\n float(row[\"BounceRates\"]),\n float(row[\"ExitRates\"]),\n float(row[\"PageValues\"]),\n float(row[\"SpecialDay\"]),\n month[row[\"Month\"]],\n int(row[\"OperatingSystems\"]),\n int(row[\"Browser\"]),\n int(row[\"Region\"]),\n int(row[\"TrafficType\"]),\n 1 if row[\"VisitorType\"] == \"Returning_Visitor\" else 0,\n 1 if row[\"Weekend\"] == \"TRUE\" else 0,\n ])\n label.append(\n 1 if row['Revenue'] == 'TRUE' else 0\n )\n\n return evidence, label", "def open_MRI_data_var(csv_path, train_set = 0.8, normalize=True):\n data_df = pd.read_csv(csv_path)\n\n mri_col = data_df.columns.str.contains(\"SV_UCSFFSX_11_02_15_UCSFFSX51_08_01_16\")\n mri_col = data_df.columns[mri_col].values\n\n data_df = data_df.dropna(axis=0, subset=mri_col)\n\n # Select only the subjects with nfollowups\n # Code to only select 5 first appearances of each PTID\n ptid_list = np.unique(data_df[\"PTID\"])\n\n idx_to_drop = []\n data_final = data_df.drop(idx_to_drop)\n\n # Divide between test and train\n from sklearn.model_selection import GroupShuffleSplit\n gss = GroupShuffleSplit(n_splits=1, test_size=1.0-train_set)\n train_dataset, test_dataset = next(gss.split(X=data_final, y=data_final.DX_bl.values, groups=data_final.PTID.values))\n\n df_train = data_final.iloc[train_dataset]\n df_test = data_final.iloc[test_dataset]\n\n df_train = df_train.reset_index(drop=True)\n df_test = df_test.reset_index(drop=True)\n\n # Return the features in the correct shape list of Tensors (timesteps, nfeatures)\n X_train = pandas_to_data_timeseries_var(df_train, mri_col)\n X_test = pandas_to_data_timeseries_var(df_test, mri_col)\n\n return X_train, X_test", "def load_data():\n train = pd.read_csv(\"../input/train.csv\", dtype={\"Age\": np.float64}, )\n test = pd.read_csv(\"../input/test.csv\", dtype={\"Age\": np.float64}, )\n\n train = train.set_index('PassengerId')\n test = test.set_index('PassengerId')\n\n train = train.apply(preprocess, axis=1)\n test = test.apply(preprocess, axis=1)\n\n x_train = train.drop(['Survived'], axis=1)\n y_train = train['Survived']\n x_test = test\n return {'train': {'x': x_train, 'y': y_train},\n 'test': {'x': x_test},\n 'full_features': pd.concat([x_train, x_test])}", "def create_dataset(input_file_path, output_file_path):\n col_index_map = {'user_id': 0, 'session_id': 1, 'timestamp': 2, 'step': 3, 'action_type': 4, 'reference': 5,\n 'platform': 6, 'city': 7, 'device': 8,\n 'current_filters': 9, 'impressions': 10, 'prices': 11}\n flat_dict = dict()\n with open(input_file_path, 'r') as csvFile:\n reader = csv.reader(csvFile)\n header = next(reader)\n col_names = [col_name for col_name in col_index_map.keys()]\n col_names.pop(0)\n index = 0\n for row in tqdm(reader):\n if len(flat_dict) > 40000:\n index += 1\n with open(output_file_path + \"_\" + str(index) + \".json\", \"w\") as file:\n json.dump(flat_dict, file)\n print(\" JSON : \", index)\n flat_dict = dict()\n col_values = [row[col_index_map[c_n]] for c_n in col_names]\n dict_for_each_row = dict(zip(col_names, col_values))\n to_list = dict_for_each_row['impressions']\n dict_for_each_row['impressions'] = to_list.split('|')\n to_list = dict_for_each_row['prices']\n dict_for_each_row['prices'] = to_list.split('|')\n user_id = row[col_index_map['user_id']]\n if user_id in flat_dict:\n flat_dict[user_id].append(dict_for_each_row)\n else:\n flat_dict[user_id] = [dict_for_each_row]\n\n print(\"Output is Saved\")", "def prepare_data(file_path: str):\n movie_industry_df = pd.read_csv(file_path, encoding='latin-1')\n return movie_industry_df", "def import_ag_data(data_csv):\n df = pd.read_csv(data_csv)\n col_to_drop = ['Program', 'Period', 'Week Ending', 'Geo Level', 'State',\n 'State ANSI', 'Zip Code', 'Region', 'watershed_code',\n 'Watershed', 'Data Item', 'Domain', 'Domain Category',\n 'Ag District', 'Ag District Code', 'CV (%)']\n df = df.drop(col_to_drop, axis=1)\n df = df[(df['Value'] != ' (D)') & (df['Value'] != ' (Z)')]\n df = df.replace(to_replace=r',', value='', regex=True)\n df['Value'] = df['Value'].astype('int')\n df = df.rename(columns={'Value': 'Yield'})\n df['Year'] = pd.to_datetime(df['Year'], format='%Y')\n return df", "def post_process_data(input_file):\n data_list, header_list = Parser.__parse_csv_data(input_file)\n json_data = Parser.__read_column_index()\n Y = [json_data['output'][data[1]]['value'] for data in data_list]\n data_list = [d[3:] for d in data_list]\n X = []\n\n for i in range(len(data_list)):\n x = numpy.zeros(len(json_data['input']))\n x[json_data['input']['pre-tax amount']['column_index']] = data_list[i][3]\n x[json_data['input']['tax amount']['column_index']] = data_list[i][3]\n\n for j in range(len(data_list[i])):\n try:\n float(data_list[i][j])\n except ValueError:\n try:\n x[json_data['input'][data_list[i][j]]['column_index']] = 1\n except KeyError:\n pass\n X.append(x)\n return X, Y", "def import_data(file):\n df = pd.read_csv(file, parse_dates=True, keep_date_col=True, sep=';')\n df = reduce_mem_usage(df)\n return df", "def read_data(\n self, path: str = \"src/data/data_aspects_tokens.csv\"\n ) -> Tuple[list, list]:\n data = pd.read_csv(path)\n data = self.summarize_review(data)\n self.dataset = data\n x = data[\"review_polarity\"].to_list()\n y = data[\"true_label\"].astype(int)\n\n return x, y", "def load_csv(filename):\r\n dataset = list()\r\n with open(filename, 'r') as file:\r\n csv_reader = reader(file, delimiter='\\t')\r\n for row in csv_reader:\r\n if not row:\r\n continue\r\n dataset.append([float(i) for i in row])\r\n return dataset", "def new_csv_imp(infile):\r\n with open(infile, \"r\") as fd:\r\n txt = fd.readlines()\r\n if len(txt) > 1:\r\n if 'Serial' in txt[0]:\r\n print('{:} is Solinst'.format(infile))\r\n if 'UNIT: ' in txt[7]:\r\n level_units = str(txt[7])[5:].strip().lower()\r\n if 'UNIT: ' in txt[12]:\r\n temp_units = str(txt[12])[5:].strip().lower()\r\n f = pd.read_csv(infile, skiprows=13, parse_dates=[[0, 1]], usecols=[0, 1, 3, 4])\r\n print(f.columns)\r\n f['DateTime'] = pd.to_datetime(f['Date_Time'], errors='coerce')\r\n f.set_index('DateTime', inplace=True)\r\n f.drop('Date_Time', axis=1, inplace=True)\r\n f.rename(columns={'LEVEL': 'Level', 'TEMP': 'Temp'}, inplace=True)\r\n level = 'Level'\r\n temp = 'Temp'\r\n\r\n if level_units == \"feet\" or level_units == \"ft\":\r\n f[level] = pd.to_numeric(f[level])\r\n elif level_units == \"kpa\":\r\n f[level] = pd.to_numeric(f[level]) * 0.33456\r\n printmes(\"Units in kpa, converting {:} to ft...\".format(os.path.basename(infile)))\r\n elif level_units == \"mbar\":\r\n f[level] = pd.to_numeric(f[level]) * 0.0334552565551\r\n elif level_units == \"psi\":\r\n f[level] = pd.to_numeric(f[level]) * 2.306726\r\n printmes(\"Units in psi, converting {:} to ft...\".format(os.path.basename(infile)))\r\n elif level_units == \"m\" or level_units == \"meters\":\r\n f[level] = pd.to_numeric(f[level]) * 3.28084\r\n printmes(\"Units in psi, converting {:} to ft...\".format(os.path.basename(infile)))\r\n else:\r\n f[level] = pd.to_numeric(f[level])\r\n printmes(\"Unknown units, no conversion\")\r\n\r\n if temp_units == 'Deg C' or temp_units == u'\\N{DEGREE SIGN}' + u'C':\r\n f[temp] = f[temp]\r\n elif temp_units == 'Deg F' or temp_units == u'\\N{DEGREE SIGN}' + u'F':\r\n printmes('Temp in F, converting {:} to C...'.format(os.path.basename(infile)))\r\n f[temp] = (f[temp] - 32.0) * 5.0 / 9.0\r\n return f\r\n\r\n elif 'Date' in txt[1]:\r\n print('{:} is Global'.format(infile))\r\n f = pd.read_csv(infile, skiprows=1, parse_dates=[[0, 1]])\r\n # f = f.reset_index()\r\n f['DateTime'] = pd.to_datetime(f['Date_ Time'], errors='coerce')\r\n f = f[f.DateTime.notnull()]\r\n if ' Feet' in list(f.columns.values):\r\n f['Level'] = f[' Feet']\r\n f.drop([' Feet'], inplace=True, axis=1)\r\n elif 'Feet' in list(f.columns.values):\r\n f['Level'] = f['Feet']\r\n f.drop(['Feet'], inplace=True, axis=1)\r\n else:\r\n f['Level'] = f.iloc[:, 1]\r\n # Remove first and/or last measurements if the transducer was out of the water\r\n # f = dataendclean(f, 'Level')\r\n flist = f.columns.tolist()\r\n if ' Temp C' in flist:\r\n f['Temperature'] = f[' Temp C']\r\n f['Temp'] = f['Temperature']\r\n f.drop([' Temp C', 'Temperature'], inplace=True, axis=1)\r\n elif ' Temp F' in flist:\r\n f['Temperature'] = (f[' Temp F'] - 32) * 5 / 9\r\n f['Temp'] = f['Temperature']\r\n f.drop([' Temp F', 'Temperature'], inplace=True, axis=1)\r\n else:\r\n f['Temp'] = np.nan\r\n f.set_index(['DateTime'], inplace=True)\r\n f['date'] = f.index.to_julian_date().values\r\n f['datediff'] = f['date'].diff()\r\n f = f[f['datediff'] > 0]\r\n f = f[f['datediff'] < 1]\r\n # bse = int(pd.to_datetime(f.index).minute[0])\r\n # f = hourly_resample(f, bse)\r\n f.rename(columns={' Volts': 'Volts'}, inplace=True)\r\n f.drop([u'date', u'datediff', u'Date_ Time'], inplace=True, axis=1)\r\n return f\r\n else:\r\n print('{:} is unrecognized'.format(infile))", "def _read_samples(self):\n\n logging.debug(\"Start file parsing.\")\n data = pd.read_csv(self._source_file, header=None)\n \n data = pd.read_csv(self._source_file, header=None)\n header = pd.read_csv(self._header_file, delimiter=':', skiprows=1, header=None)\n header.columns = ['column', 'column_type']\n\n data.columns = header.column.tolist() + ['attack']\n data['attack'] = data['attack'].str.replace('.', '')\n data['label'] = 1\n data.loc[data['attack'] == 'normal', 'label'] = 0\n\n symbolic_columns = header.loc[header.column_type == ' symbolic.'].column.tolist()\n # print(symbolic_columns)\n\n for scol in symbolic_columns:\n data[scol] = pd.Categorical(data[scol])\n one_hot_cols = pd.get_dummies(data[scol], prefix=scol)\n data = pd.concat([data, one_hot_cols], axis=1)\n\n data = data.drop(columns=symbolic_columns)\n data = data.drop(columns=['attack'])\n\n # data.loc[data.attack != 'normal' , ['attack', 'label']].head(20)\n\n data_normal = data.loc[data['label'] == 0]\n data_abnormal = data.loc[data['label'] == 1]\n\n data_normal_train = data_normal.sample(frac=0.7)\n data_normal_test = data_normal.loc[~data_normal.index.isin(data_normal_train.index)]\n\n data_normal_train = data_normal_train.drop(columns=['label']).values\n data_normal_test = data_normal_test.drop(columns=['label']).values\n data_abnormal = data_abnormal.drop(columns=['label']).values\n \n scaler = MinMaxScaler()\n _ = scaler.fit(data_normal_train)\n data_normal_train = scaler.transform(data_normal_train)\n data_normal_test = scaler.transform(data_normal_test)\n data_abnormal = scaler.transform(data_abnormal)\n \n logging.debug('Normal {}; Train {}; Test{}'.format(data_normal.shape, data_normal_train.shape, data_normal_test.shape))\n logging.debug('Abnormal {}'.format(data_abnormal.shape))\n\n samples = {}\n samples['NORMAL'] = data_normal_train\n samples['NORMAL_TEST'] = data_normal_test\n samples['ABNORMAL_TEST'] = data_abnormal\n\n logging.debug(\"End file parsing.\")\n\n return samples", "def load_CSV_data(path):\n return np.genfromtxt(os.path.join('data/traffic_data', path))", "def load_utlization(path):\n df = pd.read_csv(f\"{raw_data}\\\\{path}\", parse_dates=[\"AdmissionDate\"])\n\n df.rename(\n columns={\"MemberID\": \"member_id\", \"LOSDays\": \"los\", \"FacilityName\": \"facility\"},\n inplace=True,\n )\n\n df.columns = clean_table_columns(df.columns)\n\n facility_col = [col for col in df.columns if \"facility\" in col][0]\n\n df = cognify_facility_changes(df, facility_col)\n\n df = df[df.member_id != 1003]\n return df", "def airline(path):\n import pandas as pd\n path = os.path.expanduser(path)\n filename = 'airline.csv'\n if not os.path.exists(os.path.join(path, filename)):\n url = 'http://dustintran.com/data/r/Ecdat/Airline.csv'\n maybe_download_and_extract(path, url,\n save_file_name='airline.csv',\n resume=False)\n\n data = pd.read_csv(os.path.join(path, filename), index_col=0,\n parse_dates=True)\n x_train = data.values\n metadata = {'columns': data.columns}\n return x_train, metadata", "def load_from_file_csv(cls):\n try:\n with open(cls.__name__ + \".csv\", \"r\") as f:\n ld = []\n reader = csv.DictReader(f)\n for row in reader:\n for key, val in row.items():\n row[key] = int(val)\n ld.append(row)\n return [cls.create(**item) for item in ld]\n except FileNotFoundError:\n return []", "def __init__(self, csv_path):\r\n # Transforms\r\n self.to_tensor = transforms.ToTensor()\r\n # Read the csv file\r\n self.data_info = pd.read_csv(csv_path, header=None)\r\n # First column contains the image paths\r\n self.image_arr = np.asarray(self.data_info.iloc[:, 0])\r\n # Second column is the labels\r\n self.label_arr = [np.asarray(self.data_info.iloc[:, 1])]\r\n # Third column is for an operation indicator\r\n #self.operation_arr = np.asarray(self.data_info.iloc[:, 2])\r\n # Calculate len\r\n self.data_len = len(self.data_info.index)", "def auto(path):\n import pandas as pd\n path = os.path.expanduser(path)\n filename = 'auto.csv'\n if not os.path.exists(os.path.join(path, filename)):\n url = 'http://dustintran.com/data/r/ISLR/Auto.csv'\n maybe_download_and_extract(path, url,\n save_file_name='auto.csv',\n resume=False)\n\n data = pd.read_csv(os.path.join(path, filename), index_col=0,\n parse_dates=True)\n x_train = data.values\n metadata = {'columns': data.columns}\n return x_train, metadata", "def process_data(filename, skiprow=0):\n df = pd.read_csv(filename, encoding='big5', header=None, skiprows=skiprow)\n # drop 測站\n df.drop(1, axis=1, inplace=True)\n print('Data Loaded, preview:')\n print(df.head())\n\n data = {}\n # group data by date\n for name, ddf in df.groupby(0):\n date = [s.zfill(2) for s in name.split('/')]\n month = date[1]\n\n # drop the date\n ddf.drop(0, axis=1, inplace=True)\n\n # set index as the measure\n ddf.set_index(2, drop=True, inplace=True)\n\n # set column as month-day-hour\n ddf.columns = ['-'.join(date[1:]+[str(i).zfill(2)]) for i in range(24)]\n\n # concatenate\n if month in data:\n data[month] = pd.concat([data[month], ddf], axis=1)\n else:\n data[month] = ddf\n\n # sort the columns by datetime\n for key in data.keys():\n data[key] = data[key][data[key].columns.sort_values()]\n\n print('\\nShow data index:')\n print(data['01'].columns)\n\n return data", "def load_data():\n df = pd.read_csv(\"../../Data/breast_cancer_data/data.csv\")\n\n cols = df.columns\n X = df[cols[2:-1]].to_numpy()\n y = df[cols[1]].to_numpy()\n y = (y=='M').astype(np.int) * 2 - 1\n\n train_X = X[:-150]\n train_y = y[:-150]\n\n test_X = X[-150:]\n test_y = y[-150:]\n\n return train_X, train_y, test_X, test_y", "def read_csv_file(self):\n pass", "def load_data(path):\n\n columns = ['Item Year', 'Original Value', 'Standard Value', 'Original Currency',\n 'Standard Currency', 'Orignal Measure', 'Standard Measure', 'Location',\n 'Commodity']\n col_type = [int, float, float, object, object, object, object, object]\n\n col_type_dict = dict(zip(columns, col_type))\n\n au_df = pd.read_csv(path, usecols=columns)\n au_df = au_df.astype(col_type_dict)\n au_df.name = 'AU_data'\n \n return au_df, columns", "def import_from_csv(self) -> None:\n logging.info('import_from_csv')\n if self.target_table and str(self.target_table).lower() in [\"issue\", \"version\"]:\n if self.file_path and exists(self.file_path):\n # Read CSV file\n csv_data = pd.read_csv(self.file_path).to_dict('records')\n\n # Import Version\n if str(self.target_table).capitalize() == \"Version\":\n # Overwrite option\n if self.overwrite:\n self.session.query(Version).delete()\n click.echo('Overwrite Version table')\n\n for version in csv_data:\n if all(item in list(version.keys()) for item in ['tag', 'start_date', 'end_date']):\n newVersion=Version(\n project_id=version['project_id'],\n name=version[\"name\"], \n tag=version[\"tag\"], \n start_date=datetime.strptime(version[\"start_date\"], '%Y-%m-%d %H:%M:%S.%f'), \n end_date=datetime.strptime(version[\"end_date\"], '%Y-%m-%d %H:%M:%S.%f'), \n )\n \n try:\n self.session.add(newVersion)\n compute_version_metrics(self.session, self.configuration.current_branch, newVersion.project_id)\n click.echo('Importing ' + str(len(csv_data)) + ' version(s) on database')\n except Exception:\n logging.error(Exception)\n else:\n logging.error(\"CSV file no contain minimal mandatory fields\")\n sys.exit('CSV file no contain minimal mandatory fields')\n\n # Import Issue\n if str(self.target_table).capitalize() == \"Issue\":\n # Overwrite option\n if self.overwrite:\n self.session.query(Issue).delete()\n click.echo('Overwrite Issue table')\n\n for issue in csv_data:\n if all(item in list(issue.keys()) for item in ['number', 'created_at', 'updated_at']):\n newIssue=Issue(\n project_id=issue['project_id'],\n number=issue[\"number\"],\n title=issue[\"title\"],\n created_at=datetime.strptime(issue[\"created_at\"], '%Y-%m-%d %H:%M:%S.%f'),\n updated_at=datetime.strptime(issue[\"updated_at\"], '%Y-%m-%d %H:%M:%S.%f'))\n\n try:\n self.session.add(newIssue)\n click.echo('Importing ' + str(len(csv_data)) + ' issue(s) on database')\n except Exception:\n logging.error(Exception)\n else:\n logging.error(\"CSV file no contain minimal mandatory fields\")\n sys.exit('CSV file no contain minimal mandatory fields') \n\n self.session.commit()\n else:\n logging.error('File not found')\n sys.exit('File not found')\n else:\n logging.error('Target table not found')\n sys.exit('Target table not found')", "def create_data_for_model(file_name):\n y_values = []\n files_seconds = []\n with open(file_name, 'r') as file:\n reader = csv.reader(file)\n for row in reader:\n #process all frames that have cats in them\n if int(row[3]) != 0:\n y_values.append(int(row[3]))\n files_seconds.append((row[1], float(row[2])))\n x_values = []\n for (filename, t) in files_seconds:\n cat = CatVideo(\"data/videos/\" + filename)\n frame = cat.get_frame_time(t)\n x_values.append(get_features_frame(frame))\n\n return np.array(x_values), np.array(y_values)", "def load_data(path_to_data, outcome):\n df = pd.read_csv(path_to_data)\n\n mean_temp = df['meanTempDegree'].values\n daily_temp = df['dailyTempCat'].values\n obs_mean = df['lnRr_' + outcome].values\n obs_std = df['se_' + outcome].values\n study_id = df['adm1'].values\n data_id = np.arange(df.shape[0])\n\n valid_id = ~(np.isnan(obs_std) |\n np.isnan(obs_mean) |\n np.isinf(obs_std) |\n np.isinf(obs_mean))\n\n mean_temp = mean_temp[valid_id]\n daily_temp = daily_temp[valid_id]\n obs_mean = obs_mean[valid_id]\n obs_std = obs_std[valid_id]\n study_id = study_id[valid_id]\n data_id = data_id[valid_id]\n\n return utils.TempData(mean_temp,\n daily_temp,\n obs_mean,\n obs_std,\n study_id,\n data_id)", "def data_for_file (self):\n rowdata = np.array([self.Train_Time_min,self.Train_Time_max,self.Train_Time_avg,\n self.Loss_Value_min,self.Loss_Value_max,self.Loss_Value_avg,\n self.Iterations_min,self.Iterations_max,self.Iterations_avg,\n self.precision_avg,self.recall_avg])\n return rowdata\n\n\n \n\n\n #### FUNCTION DEFINTIIONS ####", "def read_trajectory(self, data_name):\r\n # read in CSV\r\n data = pd.read_csv(data_name)\r\n\r\n # pull out columns\r\n times = data[\"Time\"].to_numpy()\r\n dist = data[\"Distance\"].to_numpy()\r\n\r\n # set the minimum time, for adding back at the end\r\n self.mint = np.min(times)\r\n times = times-self.mint\r\n\r\n # find the ending time for cutoffs\r\n self.end_time = times[-1]\r\n\r\n # create spline\r\n self.trajectory = UnivariateSpline(times, dist)", "def process_csv(self):\n with open(self.filepath, mode=\"r\") as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\",\")\n header = next(csv_reader)\n\n date_idx = self._get_header_position(header, \"Label\")\n power_idx = self._get_header_position(header, \"kW\")\n\n if self.inverter_id not in header[power_idx]:\n raise Exception(\"Inverter data returned for the incorrect meter.\")\n\n for row in csv_reader:\n date_obj = self.csv_str_to_date(row[date_idx])\n power = float(row[power_idx] or 0)\n\n current_date = self.date_to_final_str(date_obj)\n rounded_time = self.round_up_to_quarter_hour(date_obj)\n\n if current_date not in self.intermediate_readings:\n self.intermediate_readings[\n current_date\n ] = self.build_intermediate_dict()\n\n current_reading = self.intermediate_readings[current_date][rounded_time]\n # Here's where we sum power readings together - in to fifteen min intervals\n self.intermediate_readings[current_date][rounded_time] = (\n current_reading + power\n )\n\n actual_time = self.date_to_intermediate_time_str(date_obj)\n if rounded_time == actual_time:\n # Here's where we average power readings together, in fifteen minute intervals\n self.intermediate_readings[current_date][rounded_time] = round(\n float(\n self.intermediate_readings[current_date][rounded_time] / 3\n ),\n 2,\n )\n\n return self.finalize_readings()", "def import_data(address):\n try:\n inputcsv = csv.reader(open(address, \"r\"), delimiter=\";\", lineterminator=\"\\n\")\n except IOError:\n print \"File not exists or is unreadable, please check it.\"\n exit(1)\n\n data = list() # all data\n item = list() # each tabular\n count = 0\n subcount = 0\n try:\n for row in inputcsv:\n if count < 2 : # read Time period and number of product\n data.append(int(row[1]))\n else :\n item.append(row[1:])\n subcount +=1 \n if subcount == data[1]:\n data.append(np.array(item, dtype=float))\n item = list()\n subcount = 0\n count += 1\n if (data[1] > 1):\n data.append(np.array(item, dtype=float)) # manage the last tabular\n except:\n print \"File is not well formated, please correct it.\"\n exit(1)\n return data", "def load_from_csv(self):\n\n self._logger.info('Reading data coming from CSV files')\n\n sta = self.stations\n\n if sta != None:\n msta = \", \".join(sta)\n self._logger.debug('Using only stations {0}'.format(msta))\n\n # load the data\n v = list(self.variables)\n v.append('metadata')\n for i in v:\n if i in self.dataConfig:\n\n self._logger.debug('Reading %s...' % self.dataConfig[i])\n if i == 'metadata':\n dp_final = pd.read_csv(self.dataConfig[i],\n index_col='primary_id')\n #Ensure all stations are all caps.\n dp_final.index = [s.upper() for s in dp_final.index]\n\n elif self.dataConfig[i]:\n dp_full = pd.read_csv(self.dataConfig[i],\n index_col='date_time',\n parse_dates=[0])\n dp_full.columns = [s.upper() for s in dp_full.columns]\n\n if sta is not None:\n\n data_sta = dp_full.columns.str.upper()\n\n # Grab IDs from user list thats also in Data\n self.stations = [s for s in data_sta if s in sta]\n dp = dp_full[dp_full.columns[(data_sta).isin(sta)]]\n\n else:\n dp = dp_full\n\n # Only get the desired dates\n dp_final = dp[self.start_date:self.end_date]\n\n if dp_final.empty:\n raise Exception(\"No CSV data found for {0}\"\n \"\".format(i))\n\n setattr(self, i, dp_final)" ]
[ "0.6448471", "0.64109224", "0.63845986", "0.6249481", "0.6204732", "0.6172775", "0.6129716", "0.61131644", "0.6100166", "0.6092652", "0.6074208", "0.60713947", "0.6030789", "0.6018793", "0.60126376", "0.59962356", "0.59944284", "0.5981371", "0.5959057", "0.5953626", "0.5935629", "0.59288085", "0.59174377", "0.5909255", "0.5908701", "0.5895557", "0.58953226", "0.5889651", "0.58877486", "0.5885227", "0.58678806", "0.5866583", "0.5859601", "0.5850934", "0.58289814", "0.5825868", "0.5814124", "0.58061767", "0.5803171", "0.5803138", "0.58030546", "0.57874906", "0.5785255", "0.57802635", "0.57704127", "0.57673395", "0.57669115", "0.57605845", "0.5751955", "0.5749917", "0.5749917", "0.5749917", "0.574946", "0.5738533", "0.57246727", "0.57082176", "0.57039845", "0.5699914", "0.5699471", "0.56953394", "0.56925535", "0.5690434", "0.5686234", "0.5675985", "0.56728435", "0.5667258", "0.56662995", "0.5657921", "0.5656449", "0.5654448", "0.5647855", "0.5647813", "0.5640673", "0.56361264", "0.5634934", "0.563306", "0.56297207", "0.56288844", "0.56283647", "0.56266457", "0.56251585", "0.56228447", "0.5620444", "0.56182337", "0.5614908", "0.5614831", "0.5614762", "0.56139964", "0.5605074", "0.5604128", "0.5596904", "0.55968255", "0.5593609", "0.55917716", "0.55909586", "0.55904126", "0.557863", "0.5574683", "0.55672073", "0.55655026", "0.55631524" ]
0.0
-1
Visualize a particular column of Y_pred anf Y_test for a particular series
def visualize_data(y_test, x_test, window_out, num_plots, num_win_ser, cols_y, col_idx): ser_idx = [i for i in range(0, len(y_test), num_win_ser)] if num_plots > len(ser_idx): print("Too many plots, reduce the mumber") else: indx = ser_idx[0:num_plots] days = range(num_win_ser) for idx in indx: CR = x_test[idx][0][3] #pred = y_pred[idx : idx+num_win_ser, window_out -1, col_idx] true = y_test[idx : idx+num_win_ser, window_out -1, col_idx] plt.title("Y_True, CR: "+ str(CR)) plt.xlabel('Days') plt.ylabel(cols_y[col_idx]) #plt.plot(days, pred, label = 'Pred') plt.plot(days, true, label = 'True') plt.legend() plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def visualize_pred(y_test, y_pred, test_seq, window_out, num_plots, num_win_ser, cols_y, col_idx):\n \n \n ser_idx = [i for i in range(0, len(y_test), num_win_ser)]\n if num_plots > len(ser_idx):\n print(\"Too many plots, reduce the mumber\")\n else:\n indx = ser_idx[0:num_plots]\n days = range(num_win_ser)\n for idx in indx:\n CR = test_seq[idx][0][0][3]\n pred = y_pred[idx : idx+num_win_ser, window_out -1, col_idx]\n true = y_test[idx : idx+num_win_ser, window_out -1, col_idx]\n \n plt.title(\"Y_True V/S Y_Pred, CR: \"+ str(CR))\n plt.xlabel('Days')\n plt.ylabel(cols_y[col_idx])\n \n plt.plot(days, pred, label = 'Pred')\n plt.plot(days, true, label = 'True')\n \n plt.legend()\n plt.show()", "def actual_pred_plot(preds):\r\n actual_pred = pd.DataFrame(columns=['Cost', 'prediction'])\r\n actual_pred['Cost'] = all_data['2020':].iloc[:, -1][1:len(preds) + 1]\r\n actual_pred['prediction'] = preds[:, -1]\r\n\r\n from keras.metrics import MeanSquaredError\r\n m = MeanSquaredError()\r\n m.update_state(np.array(actual_pred['Cost']), np.array(actual_pred['prediction']))\r\n\r\n return m.result().numpy(), actual_pred.plot()", "def plot_observations():\n plt.plot(history.history['loss'], label='training_loss')\n plt.plot(history.history['val_loss'], label='val_loss ')\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n plt.show()\n\n plt.plot(history.history['acc'], label='accuracy')\n plt.plot(history.history['val_acc'], label='val_accuracy')\n plt.xlabel('Epoch')\n plt.ylabel('Accuracy')\n plt.legend(loc='lower right')\n plt.show()\n\n test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)\n print(\"Test Accuracy:\", test_acc)", "def plot_actual_vs_predicted_by_equations(df, x_variable, y_variables, plot_title):\n #Plot results\n df.plot(x=x_variable, y=y_variables, title=plot_title)\n plt.show()", "def plotly_table():\n model_data = your_choice()\n model_data[\"test_prediction\"] = list(model_data[\"test_prediction\"])\n \n df = pd.DataFrame(model_data[\"test_prediction\"], columns=[\"test_prediction\"])\n for k,v in model_data.items():\n if k != \"test_prediction\":\n df[k] = str(v)\n\n fig = a_libraries.plotly_table(df)\n\n return fig", "def test_sarima_model(y, y_test, results, **kwargs):\n \n # Get predictions\n pred = results.get_prediction(start=y_test.index.min(), end=y_test.index.max(), **kwargs)\n y_pred = pred.predicted_mean\n pred_ci = pred.conf_int()\n\n # Calculate some metrics and print them out\n rmse = ((y_pred - y_test) ** 2).mean() ** 0.5\n print('Root Mean Squared Error =', rmse)\n \n r2 = r2_score(y_pred, y_test)\n print('R^2 =', r2)\n \n # Graph\n ax = y.plot(label='observed')\n y_pred.plot(ax=ax, label='predicted', alpha=.7, figsize=(15, 8))\n ax.fill_between(pred_ci.index,\n pred_ci.iloc[:, 0],\n pred_ci.iloc[:, 1], color='k', alpha=.2)\n plt.title('Average Monthly Temperature: Observed vs. Predicted')\n ax.set_xlabel('Date')\n ax.set_ylabel('Temperature')\n plt.legend()\n plt.show()", "def plot_test(y_test, y_pred, title = None, xlabel = 'Measured $Y = \\log_2(MIC)$', ylabel = 'Predicted $Y = \\log_2(MIC)$', legend = ['Ideal', 'Result'], groups = None):\n \n fig, ax = plt.subplots(1,1)\n fig.set_figheight(5)\n fig.set_figwidth(5)\n if groups is not None:\n groups_obj = pd.concat([y_test,y_pred], axis=1).groupby(np.array(groups))\n cmap=plt.get_cmap('tab10')\n for name, group in groups_obj:\n # Works only for groups with numeric names that are max cmap length:\n ax.plot(group.iloc[:,0], group.iloc[:,1], marker=\"o\", linestyle=\"\", label=int(name), color = cmap.colors[int(name)])\n ax.legend()\n else:\n ax.scatter(y_test,y_pred, color = 'red')\n ax_max = 10\n if np.max(y_test.values)>ax_max:\n ax_max = np.max(y_test).values\n ax_min = 0\n if np.min(y_test.values)<ax_min:\n ax_min = np.min(y_test.values)\n ax.plot([ax_min, ax_max], [ax_min, ax_max], '--', color='black')\n ax.set_aspect('equal', 'box')\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n #plt.savefig(title+'.pdf')\n plt.savefig(title+'.svg')\n #plt.savefig(title+'.png')#, dpi=600)\n #plt.show()", "def visualize_test(test_data_full, test_data, thetas):\n fig, ax = plt.subplots()\n ax.scatter(test_data_full[\"Weight\"], test_data_full[\"Height\"], color='blue')\n ax.plot(test_data_full[\"Weight\"], predict(test_data, thetas[-1]), color='red', linewidth=2)\n return fig", "def plot_pred(y, yhat, name, output_dir):\n ax = pd.DataFrame(y, columns=[\"y%s\" % LOOK_AHEAD]).plot(figsize=(15, 10))\n pd.DataFrame(yhat, columns=[\"yhat%s\" % LOOK_AHEAD]).plot(ax=ax)\n plt.title(\"%s\" % name)\n plt.tight_layout()\n plt.savefig(f\"{output_dir / name}.png\")\n\n pd.DataFrame(y-yhat, columns=[f\"yhat {LOOK_AHEAD}\"]).plot(figsize=(15, 10))\n plt.title(\"diff-%s\" % name)\n plt.tight_layout()\n plt.savefig(f\"{output_dir / name}-diff.png\")", "def plot_scatter(df):\n fig = px.scatter(df, x=\"preds\", y=\"truth\", title=\"Predictions vs True Values\", color=\"mae\")\n wandb.log({f\"Predictions vs True Values\": fig})\n\n # Poor Results\n df = df.query(\"mae > 2\")\n fig = px.scatter(df, x=\"preds\", y=\"truth\", title=\"Predictions vs True Values\", color=\"mae\")\n wandb.log({f\"Predictions vs True Values [mae > 2]\": fig})", "def show_score(clf, X_test, y_test):\n y_pred = predict(clf, X_test)\n print metrics.classification_report(y_test.astype(np.int), y_pred)", "def plot_results(actual_time_series, predicted_values, len_train_data,\n y_name='Parameter'):\n\n plt.plot(np.arange(0, len(actual_time_series)),\n actual_time_series, label='Actual values', c='green')\n plt.plot(np.arange(len_train_data, len_train_data + len(predicted_values)),\n predicted_values, label='Predicted', c='blue')\n # Plot black line which divide our array into train and test\n plt.plot([len_train_data, len_train_data],\n [min(actual_time_series), max(actual_time_series)], c='black',\n linewidth=1)\n plt.ylabel(y_name, fontsize=15)\n plt.xlabel('Time index', fontsize=15)\n plt.legend(fontsize=15)\n plt.grid()\n plt.show()", "def regression_analysis(cls, y_true, y_pred, path=None):\n residual = y_true - y_pred\n print(\"Histogram\")\n cls.histogram(residual, \"Residual\")\n print(\"Scatter\")\n cls.scatter_plot(y_pred, residual, \"pred\", \"residual\", path=path)\n print(\"Scatter\")\n cls.scatter_plot( y_true, y_pred, \"y_test\", \"pred\", path=path)", "def evaluate(self, y_pred, y_test):\n for i in range(len(y_pred)):\n if y_pred[i] == y_test.iloc[i]:\n self.accuracy += 1\n self.accuracy = (self.accuracy/len(y_pred))", "def evaluate_model(model, X_test, Y_test, category_names):\n \n \n yPredictorTest = model.predict(X_test)\n \n for idx, col in enumerate(Y_test):\n print(col, classification_report(Y_test[col], yPredictorTest[:, idx]))", "def evaluate_model(model, X_test, y_test):\n # run prediction with test data\n y_pred = model.predict(X_test)\n\n # print precision, recall and f1-score\n i = 0\n for col in y_test:\n print('Evaluation for \"{}\": \\n {} \\n\\n'.format(col, classification_report(y_test[col], y_pred[:,i])))\n i += 1", "def plot_result(data, gt_y, pred_y):\n assert data.shape[0] == gt_y.shape[0]\n assert data.shape[0] == pred_y.shape[0]\n\n plt.figure()\n\n plt.subplot(1, 2, 1)\n plt.title('Ground Truth', fontsize=18)\n\n for idx in range(data.shape[0]):\n if gt_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.subplot(1, 2, 2)\n plt.title('Prediction', fontsize=18)\n\n for idx in range(data.shape[0]):\n if pred_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.show()", "def plot_result(data, gt_y, pred_y):\n assert data.shape[0] == gt_y.shape[0]\n assert data.shape[0] == pred_y.shape[0]\n\n plt.figure()\n\n plt.subplot(1, 2, 1)\n plt.title('Ground Truth', fontsize=18)\n\n for idx in range(data.shape[0]):\n if gt_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.subplot(1, 2, 2)\n plt.title('Prediction', fontsize=18)\n\n for idx in range(data.shape[0]):\n if pred_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.show()", "def make_results_plot( df, k, reg ):\n\tuid = smalldf['user_id'].values\n\tbid = smalldf['business_id'].values\n\tactual = smalldf['stars'].values\n\tpredicted = np.zeros( len(actual) )\n\tcounter = 0\n\tfor biz_id, user_id in izip( bid, uid ):\n\t\tpredicted[counter] = rating( biz_id, user_id, k = k, reg = reg ) \n\t\tcounter = counter + 1\n\t# compare_results( actual, predicted )", "def report(self, X, y):\n predict = self.model.predict(X)\n\n skplt.estimators.plot_feature_importances(\n self.model, x_tick_rotation=90)\n plt.show()\n\n fig, ax = plt.subplots(figsize=(7, 7))\n sns.scatterplot(x=y, y=predict)\n lims = [\n np.min([ax.get_xlim(), ax.get_ylim()]), # min of both axes\n np.max([ax.get_xlim(), ax.get_ylim()]), # max of both axes\n ]\n ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)\n ax.set_aspect('equal')\n ax.set_xlim(lims)\n ax.set_ylim(lims)\n ax.set_xlabel(\"Observed\")\n ax.set_ylabel(\"Predict\")\n ax.set_title(\"Predict vs. Observed\")\n plt.show()\n\n residuals = y - predict\n\n fig, ax = plt.subplots(figsize=(7, 7))\n sns.scatterplot(x=y, y=residuals)\n plt.title(\"Residuals vs. Observed\")\n plt.xlabel(\"Obserbed\")\n plt.ylabel(\"Residuals\")\n plt.show()\n\n plt.hist(residuals)\n plt.title(\"Residuals distribution\")\n plt.xlabel(\"Residuals value\")\n plt.ylabel(\"Count\")\n plt.show()\n\n display(\n pd.DataFrame({\n \"explained_variance_score\":\n metrics.explained_variance_score(y, predict),\n \"mean_absolute_error\":\n metrics.mean_absolute_error(y, predict),\n \"mean_squared_log_error\":\n metrics.mean_squared_log_error(y, predict),\n \"median_absolute_error\":\n metrics.median_absolute_error(y, predict),\n \"r2_score\":\n metrics.r2_score(y, predict)\n },\n index=[0]))", "def _graph_results(self, X_test, y_test, y_pred):\n if self.regression is None:\n print(\"Regression results aren't available. Have you run linear_regression() yet?\")\n return\n\n if self.attributes.shape[1] > 1:\n print(\"Graphing is supported for one feature only.\")\n return\n\n plt.scatter(X_test, y_test, color=\"black\")\n plt.plot(X_test, y_pred, color=\"blue\", linewidth=3)\n plt.xticks(())\n plt.yticks(())\n plt.show()", "def evaluate_random_forest(y_test, y_pred):", "def evaluate(self):\n predictions = self.model.predict(self.test[0])\n accuracy = accuracy_score(self.test[1], predictions)\n print(\"Accuracy:\", str(accuracy * 100) + \"%\")\n self.plot_results(predictions)", "def illustrate_prediction(model, test_data, test_target):\n selects = np.random.random_integers(0, len(test_data), 16)\n labels = test_target[selects]\n predicts = model.predict(test_data[selects])\n plt.figure()\n for k in range(16):\n plt.subplot(4, 4, k+1)\n plot_face(test_data[selects[k]])\n if predicts[k] == 1:\n plt.title('smile')\n else:\n plt.title('ugly')\n\n if predicts[k] != labels[k]:\n plt.plot([0, 24], [0, 24], 'r', linewidth=2)\n plt.plot([0, 24], [24, 0], 'r', linewidth=2)", "def rmse(y_true, y_pred): # -> Any:\n ...", "def visualizePredictions(testData,knn_predictions):\r\n testData.visualize.scatterPlot('Petal length','Petal width')\r\n testData.dataDict[testData.reference] = knn_predictions\r\n testData.visualize.scatterPlot('Petal length','Petal width')\r\n\r\n pass", "def _plot_good_pred_whitout_reject(self, test: Set, title=None, fig_size=None):\r\n if fig_size is not None:\r\n fig = plt.figure(figsize=fig_size)\r\n else:\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111)\r\n goodclassified_index = []\r\n for idx_preds in range(self.preds.shape[1]):\r\n new_good_index = []\r\n for idx in range(self.preds.shape[0]):\r\n if test.labels[idx] == self.preds[idx, idx_preds]:\r\n new_good_index.append(idx)\r\n if new_good_index:\r\n ax.scatter(test.features[new_good_index, 0], self.features[new_good_index, 1],\r\n label='Good classified top{0}'.format(int(idx_preds + 1)))\r\n goodclassified_index += new_good_index\r\n misclassified = [idx for idx in range(self.preds.shape[0]) if idx not in goodclassified_index]\r\n if misclassified:\r\n ax.scatter(test.features[misclassified, 0], test.features[misclassified, 1],\r\n label='Misclassified', marker='x', c='red')\r\n box = ax.get_position()\r\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\r\n # Put a legend to the right of the current axis\r\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\r\n if title is not None:\r\n ax.set_title(title)\r\n plt.show()", "def parity_plot(y_pred, y_act):\n\n fig = plt.figure(figsize=FIG_SIZE)\n plt.scatter(y_act, y_pred)\n plt.plot([y_act.min(), y_act.max()], [y_act.min(), y_act.max()],\n lw=4, color='r')\n plt.xlabel('Actual')\n plt.ylabel('Predicted')\n\n return fig", "def _plot_experiment(df, axes, metric_name, isTrain):\n # colors: https://stackoverflow.com/questions/42086276/get-default-line-colour-cycle\n ldf = metric_short_to_long(df)\n plotted = \"Train\" if isTrain else \"Val\"\n m = ldf.query(\"stat == 'mse' and metric == @metric_name\")[[\"trial\",\"state\",\"value\"]].rename({\"value\":\"mse\"},axis=1)\n # aggregated\n ax = sns.barplot(x=\"trial\", y=\"mse\", data=m, palette=[u'#1f77b4'], ci=\"sd\", ax=axes[0])\n ax.set_ylabel(\"MSE (log)\")\n ax.set_yscale(\"log\")\n ax.set_title(f\"Aggregated State Errors ({plotted})\")\n ax.set_xlabel(\"Trial Number\")\n\n # individual state plots\n ax = sns.barplot(x=\"trial\", y=\"mse\", hue=\"state\",data=m, ci=\"sd\", ax=axes[1])\n ax.set_ylabel(\"MSE (log)\")\n ax.set_yscale(\"log\")\n ax.set_title(f\"State Error by Trial ({plotted})\")\n ax.set_xlabel(\"Trial Number\")", "def auto_evaluation(model,x_train,y_train,x_test,y_test):\n\n y_train_prediction=model.predict(x_train)\n y_test_prediction=model.predict(x_test)\n\n plt.scatter(y_train,y_train_prediction,c=\"b\",s=1,alpha=0.5)\n plt.scatter(y_test,y_test_prediction,c=\"r\",s=2,alpha=0.5)\n plt.xlabel(\"actual\")\n plt.ylabel(\"predicted\")\n\n print(\"tr R2: {:.2f}\".format(r2_score(y_train_prediction,y_train)))\n print(\"te R2: {:.2f}\".format(r2_score(y_test_prediction,y_test))) \n \n return y_train_prediction,y_test_prediction", "def plot_stats(x_axis, y_axis, df, highlight=[]):\n a, b = df[x_axis], df[y_axis]\n\n X_train, X_test, y_train, y_test = train_test_split(a, b, test_size=0.33, random_state=42)\n\n X_train = np.array(X_train).reshape(-1, 1)\n X_test = np.array(X_test).reshape(-1, 1)\n y_train = np.array(y_train).reshape(-1, 1)\n y_test = np.array(y_test).reshape(-1, 1)\n\n regr = linear_model.LinearRegression()\n\n regr.fit(X_train, y_train)\n\n df[y_axis + \" STD\"] = df[y_axis].apply(lambda a: round((a-df[y_axis].mean())/df[y_axis].std()))\n df[y_axis + \" rank\"] = df[y_axis].rank(ascending=False)\n df[x_axis + \" rank\"] = df[x_axis].rank(ascending=False)\n \n mapper = linear_cmap(field_name=y_axis + \" STD\", palette=brewer[\"RdBu\"][len(df[y_axis + \" STD\"].unique())], \n low=min(df[y_axis + \" STD\"].unique()), high=max(df[y_axis + \" STD\"].unique()))\n \n source = ColumnDataSource(df)\n source2 = ColumnDataSource(df[df[\"Player\"].isin(highlight)])\n \n p = figure(x_range=(df[x_axis].min() - df[x_axis].std(), df[x_axis].max() + df[x_axis].std()), \n y_range=(df[y_axis].min() - df[y_axis].std(), df[y_axis].max() + df[y_axis].std()))\n \n r1 = p.circle(x=x_axis, y=y_axis,\n source=source, size=10, color=mapper, line_color=\"black\", legend_group= y_axis + \" STD\")\n\n p.title.text = y_axis + \" vs. \" + x_axis\n p.title.align = \"center\"\n p.xaxis.axis_label = x_axis\n p.yaxis.axis_label = y_axis\n p.legend.location = 'top_left'\n p.legend.title = \"St. Dev's from Avg \" + y_axis\n p.background_fill_color = \"#dddddd\"\n p.background_fill_alpha = 0.1\n \n line_x = [df[x_axis].min().item() - df[x_axis].std().item(), df[x_axis].max().item() + df[x_axis].std().item()]\n line_y = [(line_x[0]*regr.coef_.item()) + regr.intercept_.item(), (line_x[1]*regr.coef_.item()) + regr.intercept_.item()]\n r2 = p.line(line_x, line_y, line_width=2, color=\"black\")\n\n p.add_tools(HoverTool(renderers=[r1], tooltips=[\n (\"Player\", \"@Player\"),\n (y_axis, \"@{\" + y_axis +\"}{0.000}\"),\n (y_axis + \" Rank\", \"#@{\" + y_axis + \" rank}\"),\n (x_axis, \"@{\" + x_axis +\"}{0}\"),\n (x_axis + \" Rank\", \"#@{\" + x_axis + \" rank}\")]))\n\n \n p.add_tools(HoverTool(renderers=[r2], \n tooltips=[(x_axis, \"$x{0000}\"),\n (\"Predicted \" + y_axis, \"$y\")]))\n \n labels = LabelSet(x=x_axis, \n y=y_axis, text=\"Player\", y_offset=8,\n text_font_size=\"11px\", text_color=\"#555555\",\n source=source2, text_align='center')\n \n p.add_layout(labels)\n\n st.bokeh_chart(p)", "def display_comparison(self, X_val, y_val):\n import matplotlib.pyplot as plt\n x = []\n y = []\n for model_tuple in self.model_list:\n x.append(model_tuple[1])\n y.append(model_tuple[0].score(X_val, y_val))\n plt.scatter(x, y)\n plt.show()", "def test(model, X_test, y_test):\n pred, loss = model(X_test, y_test)\n test_pred = np.argmax(pred, axis=1) \n acc = np.mean(np.argwhere(y_test==1)[:,1]==test_pred) \n\n print(\"Test acc is:\\n\", acc) \n return test\n raise NotImplementedError(\"Test method not implemented\")", "def test_fn(df, fn):\n\n y_pred = []\n y_true = []\n\n for key in df.index:\n y_t, *inputs = df.loc[key]\n y_true.append(y_t)\n y_p = fn(*inputs)\n y_pred.append(y_p)\n\n # linear regression without intercept\n c = np.mean(y_true) / np.mean(y_pred)\n y_pred = np.multiply(y_pred, c)\n\n rmse = np.sqrt(np.mean(np.subtract(y_pred, y_true) ** 2))\n return rmse, y_pred, y_true, c", "def plot_prediction(test_YY, predict_age_month):\n\n\t# PLot-actual vs predicted age from test image\n\tfig, ax = plt.subplots(figsize = (7,7))\n\n\tplt.plot(test_YY, predict_age_month, 'ro')\n\n\tax.plot(test_YY, predict_age_month, 'r.',\n\t\t\t\t\tlabel = 'predictions (xception)-test image')\n\n\tax.plot(test_YY, test_YY, 'b-',\n\t\t\t\t\t\t\t\tlabel = 'actual-test image')\n\n\tax.legend(loc = 'upper right')\n\tax.set_xlabel('Actual Age (Months)')\n\tax.set_ylabel('Predicted Age (Months)')\n\tplt.show()", "def evaluate(y_test, pred_labels):\n \n # Converts one-hot code to a label (the index of 1)\n y_test_labels = np.argmax(y_test, axis=1)\n \n # Compare test labels to predicted labels\n score = accuracy_score(y_test_labels, pred_labels)\n \n return y_test_labels, score", "def evaluate(model, df_result, label='test'):\n\n y_true = df_result['RUL']\n y_hat = df_result['y_hat']\n df_result['breakdown'].replace(0, False, inplace=True) # rsf only takes true or false\n df_result['breakdown'].replace(1, True, inplace=True) # rsf only takes true or false\n\n mse = mean_squared_error(y_true, y_hat)\n rmse = np.sqrt(mse)\n variance = r2_score(y_true, y_hat)\n\n # the concordance index (CI) is interested on the order of the predictions, not the predictions themselves\n # CI can only be measured between individual samples where a censoring or failure event occurred\n # https://medium.com/analytics-vidhya/concordance-index-72298c11eac7#:~:text=The%20concordance%20index%20or%20c,this%20definition%20mean%20in%20practice\n df_result_grouped = df_result.groupby('unit num').last()\n breakdown = df_result_grouped['breakdown']\n y_true = df_result_grouped['RUL']\n y_hat = df_result_grouped['y_hat']\n ci_sk = ci_scikit(breakdown, y_true, y_hat)[0]\n score = nasaScore(y_true, y_hat) # score should be based on the last instance\n # print(f'Number of concordant pairs (scikit-survival): {ci_scikit(breakdown, y_true, y_hat)[1]}')\n # print(f'Number of discordant pairs (scikit-survival): {ci_scikit(breakdown, y_true, y_hat)[2]}')\n # print(f'Number of pairs having tied estimated risks (scikit-survival): {ci_scikit(breakdown, y_true, y_hat)[3]}')\n # print(f'Number of comparable pairs sharing the same time (scikit-survival): {ci_scikit(breakdown, y_true, y_hat)[4]}')\n print('{} set RMSE:{:.2f}, Score:{:.2f}, CI(scikit):{:.4f}, R2:{:.2f}'.format(label, rmse, score, ci_sk, variance))\n result = [model, label, rmse, score, ci_sk, variance]\n return result", "def residual_vs_actual(\n y_true: ArrayLike | str,\n y_pred: ArrayLike | str,\n df: pd.DataFrame | None = None,\n ax: plt.Axes | None = None,\n xlabel: str = r\"Actual value\",\n ylabel: str = r\"Residual ($y_\\mathrm{true} - y_\\mathrm{pred}$)\",\n **kwargs: Any,\n) -> plt.Axes:\n y_true, y_pred = df_to_arrays(df, y_true, y_pred)\n assert isinstance(y_true, np.ndarray)\n assert isinstance(y_pred, np.ndarray)\n ax = ax or plt.gca()\n\n y_err = y_true - y_pred\n\n ax.plot(y_true, y_err, \"o\", alpha=0.5, label=None, mew=1.2, ms=5.2, **kwargs)\n ax.axline(\n [1, 0], [2, 0], linestyle=\"dashed\", color=\"black\", alpha=0.5, label=\"ideal\"\n )\n\n ax.set(xlabel=xlabel, ylabel=ylabel)\n ax.legend(loc=\"lower right\")\n\n return ax", "def evaluate(self, X_test, y_test):\n y_pred = self.pipeline.predict(X_test)\n return compute_rmse(y_pred, y_test)", "def score(self, y_true, y_pred):\r\n pass", "def evaluate(self, X_test, y_test):\n y_pred_train = self.pipeline.predict(self.X)\n mse_train = mean_squared_error(self.y, y_pred_train)\n rmse_train = np.sqrt(mse_train)\n \n self.mlflow_log_metric('rmse_train', rmse_train)\n \n y_pred_test = self.pipeline.predict(X_test)\n mse_test = mean_squared_error(y_test, y_pred_test)\n rmse_test = np.sqrt(mse_test)\n self.mlflow_log_metric('rmse_test', rmse_test)\n \n return (round(rmse_train, 3) ,round(rmse_test, 3))", "def make_predictions(model, x_test, y_test):\r\n preds = model.predict(x_test)\r\n y_hat = np.argmax(preds, axis=-1)\r\n print(type(y_test))\r\n y_test.columns = [0, 1]\r\n y = y_test.idxmax(axis=1)\r\n print(y_hat.shape)\r\n print(y.shape)\r\n return y_hat, y", "def check_model_performances(X,Y, model,show=False):\n #model.fit(X, Y)\n predictions = model.predict(X)\n \n predictions = predictions#.reshape(-1,1)\n \n # ######## Computes MSE ####### \n MSE = mean_squared_error(Y, predictions)\n print(f'\\nMSE : {MSE}')\n \n # ######## Computes R2 ####### \n R2 = r2_score(Y, predictions)\n print(f'R2 : {R2}')\n \n # ######## Plot Model predictions vs. target ####### \n if show:\n fig = go.Figure()\n \n fig.add_trace(go.Scatter(y=Y,\n mode='lines',\n name='target'))\n fig.add_trace(go.Scatter(y=predictions\n ,\n mode='lines',\n name='predictions'))\n \n fig.show()", "def tpr(y_true, y_pred):\n return recall(y_true, y_pred)", "def __call__(self, y_true: np.ndarray, y_pred: np.ndarray) -> float:", "def evaluate_model(model, X_test, Y_test, category_names):\n Y_pred = model.predict(X_test)\n\n for i, column in enumerate(category_names):\n y_true = Y_test.values[:, i]\n y_pred = Y_pred[:, i]\n target_names = ['not {}'.format(column), '{}'.format(column)]\n print(classification_report(\n y_true, y_pred, target_names=target_names))", "def score(y_values):\n y_act = y_values[:,0]\n y_pred = y_values[:,1]\n return (y_act==y_pred).mean()*100", "def plot_results(self, predictions: list):\n fig, ax = plt.subplots()\n cm = confusion_matrix(self.test[1], predictions)\n conf = confusion_matrix(self.test[1], predictions).ravel()\n nbr_labels = len(set(self.test[1]))\n cm = conf.reshape(nbr_labels, nbr_labels)\n sns.heatmap(cm, annot=True, fmt=\"d\", cmap=\"Spectral\")\n ax.set_xlabel(\"predicted label\")\n ax.set_ylabel(\"true label\")\n fig.savefig(\"confusion_matrix\")\n\n fig, ax = plt.subplots()\n x = self.train[0] + self.test[0]\n y = self.train[1] + self.test[1]\n x = [i[0] for i in x]\n y = [i for i in y]\n results = pd.DataFrame({\"polarity strength\": x, \"true label\": y})\n sns.boxplot(data=results, x=\"true label\", y=\"polarity strength\")\n fig.savefig(\"boxplot\")", "def eva_regress(y_true, y_pred):\n\n mape = MAPE(y_true, y_pred)\n vs = metrics.explained_variance_score(y_true, y_pred)\n mae = metrics.mean_absolute_error(y_true, y_pred)\n mse = metrics.mean_squared_error(y_true, y_pred)\n r2 = metrics.r2_score(y_true, y_pred)\n print('explained_variance_score:%f' % vs)\n print('mape:%f%%' % mape)\n print('mae:%f' % mae)\n print('mse:%f' % mse)\n print('rmse:%f' % np.sqrt(mse))\n print('r2:%f' % r2)", "def _evaluate(self, y_true, y_pred):\n pass", "def mse(y_pred, y):\n return np.mean((y - y_pred)**2)", "def test(self):\n y_list = []\n y_hat_list = []\n for ex_dict in ut.TEST_LIST:\n y_list.append(ex_dict[1])\n y_hat_list.append(self.predict(ex_dict[0]))\n acc = ut.compute_accuracy(y_hat_list, y_list)\n return y_hat_list, acc", "def evaluate_test(model, history, class_labels, train_X, test_X, train_y, test_y):\n train_loss, train_acc = model.evaluate(train_X, train_y, verbose=0)\n test_loss, test_acc = model.evaluate(test_X, test_y, verbose=0)\n print('Accuracy \\n Train: %.3f, Test: %.3f' % (train_acc, test_acc))\n print('Loss \\n Train: %.3f, Test: %.3f \\n' % (train_loss, test_loss))\n # plot loss during training\n plt.subplots_adjust(hspace = .5, wspace = 0.5)\n plt.subplot(211)\n plt.title('Loss', weight='bold')\n plt.plot(history.history['loss'], label='train')\n plt.plot(history.history['val_loss'], label='val')\n plt.legend()\n # plot accuracy during training\n plt.subplot(212)\n plt.title('Accuracy', weight='bold')\n plt.plot(history.history['acc'], label='train')\n plt.plot(history.history['val_acc'], label='val')\n plt.legend()\n plt.show()\n print('\\n')\n # predict probabilities for test set\n yhat_probs = model.predict(test_X, verbose=0)\n # predict classes for test set\n yhat_classes = model.predict_classes(test_X, verbose=0)\n # reduce to 1d array\n yhat_probs = yhat_probs[:, 0]\n yhat_classes = yhat_classes[:, 0]\n # calculate metrics\n report = metrics.classification_report(test_y, yhat_classes, target_names=class_labels)\n confusion_matrix = metrics.confusion_matrix(test_y, yhat_classes)\n plot_confusion_matrix(confusion_matrix, class_labels)\n print('\\n')\n return report", "def eval_prediction(df, print_all_wrong=False):\n incorrect_df = df.loc[~df['prediction_correct']].copy()\n print(f'Overall correctness: {(1 - len(incorrect_df) / len(df)) * 100:5.2f} %')\n\n print('\\nCorrectness per category:')\n\n def fm(x):\n series = pd.Series(data=len(x.loc[x['prediction_correct']]) / len(x),\n index=['corr %'])\n\n res = df.groupby('true_label').apply(fm)\n print(res)\n\n print('\\nHighest confidence for wrong predictions per category:')\n res = incorrect_df \\\n .loc[:, ['predicted_label', 'true_label', 'confidence']] \\\n .groupby(['predicted_label', 'true_label']) \\\n .max()\n print(res)\n\n # show all confidence distribution for wrong predictions\n if print_all_wrong:\n print('\\nConfidence distribution for wrong predictions:')\n sorted_df = incorrect_df \\\n .sort_values(by='confidence', ascending=False)\n sorted_df.reset_index(inplace=True, drop=True)\n\n for row in sorted_df.itertuples():\n s = f'{str(row[0]).rjust(4)} '\n s += ', '.join([f'{row[idx]:5.9f}' for idx in range(1, 6)])\n s += f', {row.predicted_label}, {row.true_label}, {row.max_confidence:5.9f}'\n print(s)", "def visualize_test_results(X, y, pred, signnames):\n assert(X.shape[0] == 14)\n nrows = 2\n ncols = 7\n nlabels = 43\n fig, axes = plt.subplots(nrows = 2 * nrows, ncols = ncols, figsize = (10, 10))\n for i in range(nrows):\n for j in range(ncols):\n aximg = axes[2*i, j]\n axprobs = axes[2*i + 1, j]\n idx = i*ncols + j\n\n img = X[idx]\n aximg.imshow(img)\n aximg.set_axis_off()\n\n probs = pred[idx]\n label = y[idx]\n colors = probs.shape[0] * [\"red\"]\n colors[label] = \"green\"\n\n n_top = 5\n topindices = sorted(np.arange(probs.shape[0]), key = lambda i: probs[i])[-n_top:]\n topprobs = probs[topindices]\n topcolors = [colors[i] for i in topindices]\n ypos = np.arange(n_top)\n axprobs.barh(ypos, topprobs, color = topcolors)\n axprobs.set_yticks(ypos)\n for ypos, l in zip(ypos, topindices):\n axprobs.text(0.025, ypos, textwrap.fill(signnames[l], 20), fontsize = 6)\n axprobs.set_axis_off()\n fig.savefig(os.path.join(img_dir, \"test_results.png\"))", "def investigate_data(training_data):\n return sns.pairplot(training_data.sample(100), hue=\"status\")", "def test_y(self):\n g = gca()\n lines = g.get_lines() \n self.assertEqual(lines[0].get_ydata().tolist(), [3, 3, 1, 1, 3])", "def plot_predictions(self):\n\n plt.title(\"Targets vs. Predictions\")\n plt.plot(self.T, label=\"Targets\")\n plt.plot(self.Y, label=\"Predictions\")\n plt.xlabel(\"Sample number\")\n plt.legend()\n plt.show()", "def get_ytrue_ypred(model, datagen): \n y_true = np.array([])\n\n for i in range(len(datagen)):\n y_true = np.append(y_true, datagen[i][1])\n \n y_pred = model.predict(datagen)\n \n y_true1 = y_true + 15\n y_pred1 = (y_pred +15).reshape(-1)\n true_pred_df = pd.DataFrame({'y_true':y_true1, 'y_pred':y_pred1})\n true_pred_df['mae'] = np.abs(true_pred_df.y_true - true_pred_df.y_pred)\n \n return y_true1, y_pred1, true_pred_df", "def evaluate_model(model, X_test, Y_test, category_names):\n \n y_preds = model.predict(X_test)\n predictions = pd.DataFrame(data=y_preds, columns=Y_test.columns, index=Y_test.index)\n for col in Y_test.columns:\n print(classification_report(predictions[col],Y_test[col]))", "def plot_y_test_means(y_test_means, out_dir, response_name, interactive_run=True):\n plt.rcParams['svg.fonttype'] = 'none'\n x_label = 'Optimization Step'\n y_label = f'Mean {response_name} in Test Set'\n\n plt.plot([_ for _ in range(len(y_test_means))], y_test_means,\n marker='s', markerfacecolor='m', markeredgecolor='black', \n c='m', markersize=0.1,\n markeredgewidth=0.01)\n plt.xticks(fontsize=24)\n plt.yticks(fontsize=24)\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n\n if interactive_run:\n plt.show()\n else:\n if not isdir(out_dir):\n mkdir(out_dir)\n out_fpath = join(out_dir, 'y_test_means-plot.svg')\n print(f'Saving to {out_fpath}')\n plt.savefig(out_fpath)\n plt.clf()", "def test(self, X, y):\n\t\tself.test_X = X\n\t\tself.test_y = y\n\n\t\tclassifier = self.classifier.fit(self.X, self.y)\n\t\ty_pred = classifier.predict(X) \t\t\t# class prediction\n\t\ty_prob = classifier.predict_proba(X)\t# probability of each class\n\t\tself.test_metrics = ModelMetrics(classifier, y, y_pred, y_prob, 'holdout')", "def evaluate_model(model, X_test, Y_test, category_names):\n# Print out Precision , recall F1_score and support for each column using classification_report function\n y_pred_test = model.predict(X_test)\n print(classification_report(Y_test, y_pred_test, target_names=category_names))", "def _plot_good_pred_whit_reject(self, test: Set, title=None, fig_size=None):\r\n if fig_size is not None:\r\n fig = plt.figure(figsize=fig_size)\r\n else:\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111)\r\n goodclassified_index = []\r\n for idx_preds in range(self.preds.shape[1] - 1):\r\n new_good_index = []\r\n for idx in range(self.preds.shape[0]):\r\n if self.preds[idx][0][idx_preds] == test.labels[idx] and \\\r\n self.preds[idx][1][idx_preds] != self.preds[idx][1][idx_preds + 1]:\r\n new_good_index.append(idx)\r\n if new_good_index:\r\n ax.scatter(test.features[new_good_index, 0], self.features[new_good_index, 1],\r\n label='Good classified top{0}'.format(int(idx_preds + 1)))\r\n goodclassified_index += new_good_index\r\n new_good_index = []\r\n for idx in range(self.preds.shape[0]):\r\n if self.preds[idx][0][-1] == test.labels[idx]:\r\n new_good_index.append(idx)\r\n if new_good_index:\r\n ax.scatter(test.features[new_good_index, 0], self.features[new_good_index, 1],\r\n label='Good classified top{0}'.format(int(self.preds.shape[1])))\r\n goodclassified_index += new_good_index\r\n reject_idx, misclassified_idx = ([], [])\r\n for idx in range(self.preds.shape[0]):\r\n if idx not in goodclassified_index:\r\n reject = False\r\n for idx_preds in range(self.preds.shape[1] - 1):\r\n if self.preds[idx][1][idx_preds] == self.preds[idx][1][idx_preds + 1]:\r\n reject_idx.append(idx)\r\n reject = True\r\n break\r\n if not reject:\r\n misclassified_idx.append(idx)\r\n if reject_idx:\r\n ax.scatter(test.features[reject_idx, 0], self.features[reject_idx, 1],\r\n label='Reject', c='orange', marker='^')\r\n if misclassified_idx:\r\n ax.scatter(test.features[misclassified_idx, 0], self.features[misclassified_idx, 1],\r\n label='Misclassified', marker='x', c='red')\r\n box = ax.get_position()\r\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\r\n # Put a legend to the right of the current axis\r\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\r\n if title is not None:\r\n ax.set_title(title)\r\n plt.show()", "def show(self):\n print \"Name: \"+str(self.name)\n ss = self.y.shape[0]\n for i in xrange(ss):\n print \"Actual: \"+str(self.y[i])\n print \"Prediction: \"+str(self.a[i])\n print \"\"\n print \"\\n\"", "def PlotComparison(result_values, descrete, continuous, jitter=100):\n df = result_values.copy()\n np.random.seed(0)\n df[continuous] = df[continuous] + np.random.randint(low=-jitter, high=jitter, size=len(df))\n base = alt.Chart(df).transform_calculate(\n ymin=\"datum.mean-2*datum.std\",\n ymax=\"datum.mean+2*datum.std\",\n ).properties(\n title = '[Interactive] Accuracy by Params'\n )\n \n points = base.mark_point(\n filled=True,\n size=10\n ).encode(\n x=continuous,\n y=alt.Y('mean:Q'),#, scale=alt.Scale(domain=(0.55, 0.7))),\n color=descrete,\n tooltip=['mean','std']\n )\n\n errorbars = base.mark_errorbar().encode(\n x=continuous,\n y=alt.Y(\"ymin:Q\",title='Accuracy'),\n y2=\"ymax:Q\",\n color=descrete,\n )\n\n return(points + errorbars)", "def rmse(y_true, y_pred):\n return backend.sqrt(backend.mean(backend.square(y_pred - y_true), axis=-1))", "def plot_regression_results(ax, y_true, y_pred, title, scores, elapsed_time):\n ax.plot([y_true.min(), y_true.max()],\n [y_true.min(), y_true.max()],\n '--r', linewidth=2)\n ax.scatter(y_true, y_pred, alpha=0.2)\n\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.get_xaxis().tick_bottom()\n ax.get_yaxis().tick_left()\n ax.spines['left'].set_position(('outward', 10))\n ax.spines['bottom'].set_position(('outward', 10))\n ax.set_xlim([y_true.min(), y_true.max()])\n ax.set_ylim([y_true.min(), y_true.max()])\n ax.set_xlabel('Measured')\n ax.set_ylabel('Predicted')\n extra = plt.Rectangle((0, 0), 0, 0, fc=\"w\", fill=False,\n edgecolor='none', linewidth=0)\n ax.legend([extra], [scores], loc='upper left')\n title = title + '\\n Evaluation in {:.2f} seconds'.format(elapsed_time)\n ax.set_title(title)", "def decision_plot(self, X, y):\n\n y = self._slice_target_index(y=y)\n\n for index in range(_n_targets(y)):\n if sklearn.utils.multiclass.type_of_target(y) == 'continuous-multioutput':\n self.fit(X, y.iloc[:, index].values.ravel(order='K'))\n else:\n self.fit(X, y)\n explainer, shap_values = self.explainer(X=X)\n shap.decision_plot(base_value=explainer.expected_value, shap_values=shap_values,\n feature_names=list(X.columns), show=self.show)", "def eval_perf_test(model, X_test, y_test):\n\n y_hat_test = model.predict(X_test)\n\n test_mae = metrics.mean_absolute_error(y_test, y_hat_test)\n test_mse = metrics.mean_squared_error(y_test, y_hat_test)\n test_rmse = np.sqrt(metrics.mean_squared_error(y_test, y_hat_test))\n test_r = metrics.r2_score(y_test, y_hat_test)\n\n print('Evaluating Performance on Testing Data:\\n')\n print(f'Test Mean Absolute Error: {test_mae:,.2f}')\n print(f'Test Mean Squared Error: {test_mse:,.2f}\\n')\n print(f'Test Root Mean Squared Error: {test_rmse:,.2f}')\n print(f'Test R-Square Value: {round(test_r,2)}')", "def plot_preds(\r\n training_data: np.mat,\r\n predictions: np.ndarray,\r\n col_x: np.ndarray,\r\n col_y: np.ndarray,\r\n cola_name: str,\r\n colb_name: str,\r\n) -> plt.plot:\r\n xsort = training_data.copy()\r\n xsort.sort(axis=0)\r\n plt.scatter(col_x, col_y, color=\"blue\")\r\n plt.plot(\r\n xsort[:, 1],\r\n predictions[training_data[:, 1].argsort(0)],\r\n color=\"yellow\",\r\n linewidth=5,\r\n )\r\n plt.title(\"Local Weighted Regression\")\r\n plt.xlabel(cola_name)\r\n plt.ylabel(colb_name)\r\n plt.show()", "def cross_validation_visualization(lambds, score_tr, score_te):\n plt.semilogx(lambds, score_tr, marker=\".\", color='b', label='train score');\n plt.semilogx(lambds, score_te, marker=\".\", color='r', label='test score');\n plt.xlabel(\"lambda\")\n plt.ylabel(\"score\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig(\"cross_validation_test\")", "def evaluate(self, X_test, y_test):\n \n y_pred = self.pipeline.predict(X_test)\n test_rmse = compute_rmse(y_pred, y_test)\n print(\"test rmse:\", test_rmse)\n return test_rmse", "def y_test_transformed(self):\n return self.test_transformed[self.target_param]", "def deviance_plot(est, X_test, y_test, ax=None, label='',train_color='#2c7bb6', test_color='#d7191c', alpha=1.0):\r\n\ttest_dev = np.empty(n_estimators) #创建数组\r\n\tfor i, pred in enumerate(est.staged_predict(X_test)):\r\n\t\ttest_dev[i] = est.loss_(y_test, pred)\r\n\tif ax is None:\r\n\t\tfig = plt.figure(figsize=(8,5))\r\n\t\tax = plt.gca();\r\n\tax.plot(np.arange(n_estimators)+1, test_dev, color=test_color, label='Test Error max_depth=1 %s' % label, linewidth=2, alpha=alpha)\r\n\tax.plot(np.arange(n_estimators)+1, est.train_score_, color=train_color, label='Train Error max_depth=1 %s' % label, linewidth=2, alpha=alpha)\r\n\tax.set_ylabel('Error')\r\n\tax.set_xlabel('n_estimators')\r\n\tax.set_ylim((0,2))\r\n\treturn test_dev, ax", "def rmse(y_true, y_pred):\n\treturn backend.sqrt(backend.mean(backend.square(y_pred - y_true), axis=-1))", "def evaluate_model(model, X_test, Y_test, category_names):\n\n Y_pred = pd.DataFrame(model.predict(X_test))\n Y_pred.columns = category_names\n Y_test = pd.DataFrame(Y_test)\n Y_test.columns = category_names\n\n for column in category_names:\n print('** {} **'.format(column).upper())\n print(classification_report(Y_test[column], Y_pred[column]))", "def plot_predictions(y, yhat, title=\"Predictions vs Actual\", output_dir=None):\n\n fig = plt.figure(figsize=(15, 6))\n plt.xlabel('Time')\n plt.ylabel('PM10')\n plt.plot(y, label=\"actual\", figure=fig)\n plt.plot(yhat, label=\"predicted\", figure=fig)\n plt.title(title)\n fig.legend()\n\n if output_dir != None:\n plt.savefig(os.path.join(output_dir, \"{}.png\".format(title)))\n\n plt.close(fig)", "def evaluate(self, X_test, y_test):\n pipeline = run()\n y_pred = pipeline.predict(X_test)\n rmse = compute_rmse(y_pred, y_test)\n print(rmse)\n return rmse", "def plot(model, samples):\n # compute responsiblity values\n resp = model.predict_proba(samples)\n\n # plot\n plt.axis('equal')\n plt.scatter(samples[:,0], samples[:,1], c=resp)\n plt.show()", "def predict(x_train, y_train, x_test, y_test, fn, params):\n y_train_predicted = fn(x_train, None, *params)\n y_train_predicted = (y_train_predicted >= 0.5) * 1\n y_test_predicted = fn(x_test, None, *params)\n y_test_predicted = (y_test_predicted >= 0.5) * 1\n\n train_acc = np.sum(y_train_predicted == y_train) / x_train.shape[0]\n test_acc = np.sum(y_test_predicted == y_test) / x_test.shape[0]\n print('train accuracy =', train_acc)\n print('test accuracy =', test_acc)\n scatter_plot(x_train, y_train_predicted, x_test, y_test_predicted, 'predicted 0', 'predicted 1')", "def show_predictions(model, test_set, val_set, image_guess, img_res, data='OSNR', GRAY=True):\n \n ## Uses model to predict some amount of images\n predict = model.predict_classes(test_set, batch_size=5, verbose=1)\n \n ## Initialises variables for loop\n correctly_guessed = 0\n\n ## Defines figure dimensions\n fig = plt.figure(figsize=(20,30))\n\n ## Begins loop to find correct predictions and relay results to user\n ## Searches through the prediction array and compares it to the actual array.\n ## Displays image with the prediction and answer on the title\n for i in range(image_guess):\n correct = False\n actual = np.argmax(val_set[i])\n\n if predict[i] == actual:\n correctly_guessed += 1\n correct = True\n\n plt.subplot(6,3,i+1)\n fig.subplots_adjust(left=0.01,\n right=0.7,\n bottom=0.1,\n top=1.2,\n wspace=0.5,\n hspace=0.2\n )\n if GRAY == False:\n plt.imshow(test_set[i].reshape(img_res,img_res,3))\n else:\n plt.imshow(test_set[i].reshape(img_res,img_res), cmap='gray')\n\n if correct == True:\n if data == 'disp':\n plt.title('Correct! \\nPrediction = {}ps/nm Truth = {}ps/nm'\n .format((10+10*predict[i]), (10+10*(actual))), fontsize=15)\n \n if data == 'disp-short':\n plt.title('Correct! \\nPrediction = {} ~ {}ps/nm Truth = {} ~{}ps/nm'\n .format(100*(predict[i]), (100+100*predict[i]), 100*(actual), (100+100*(actual)), fontsize=15))\n \n if data == 'OSNR':\n plt.title('Correct! \\nPrediction = {}dB Truth = {}dB'\n .format((12+0.5*predict[i]), (12+0.5*(actual))), fontsize=15)\n \n \n else:\n if data == 'disp':\n plt.title('\\nPrediction = {}ps/nm Truth = {}ps/nm'\n .format((10+10*predict[i]), (10+10*(actual))), fontsize=15)\n \n if data == 'disp-short':\n plt.title('\\nPrediction = {} ~ {}ps/nm Truth = {} ~{}ps/nm'\n .format(100*(predict[i]), (100+100*predict[i]), 100*(actual), (100+100*(actual)), fontsize=15))\n \n if data == 'OSNR':\n plt.title('\\nPrediction = {}dB Truth = {}dB'\n .format((12+0.5*predict[i]), (12+0.5*(actual))), fontsize=15)\n\n ## Returns amount of predictions that were correct\n print('Correctly guessed = ', correctly_guessed)\n print('Inorrectly guessed = ', (image_guess-correctly_guessed))", "def get_score(y_true, y_pred):\n scores = []\n for i in tqdm_notebook(range(len(y_true))):\n score,_ = get_score_summary(y_true[i], y_pred[i])\n scores.append(score)\n return np.array(scores)", "def predictions_relevance(self):\n return [\"Support Vector Regression predictions comparison\", super().truncate_predictions_relevance(self.datasetManager.X_test, self.datasetManager.y_test, self.y_pred)]", "def plot_good_pred(self, test: Set, title=None, fig_size=None, reject=False):\r\n if reject:\r\n self._plot_good_pred_whit_reject(test, title, fig_size)\r\n else:\r\n self._plot_good_pred_whitout_reject(test, title, fig_size)", "def regression_evaluation(self, test_set, predicted_values):\r\n\r\n MAE = self.mean_absolute_error(test_set, predicted_values)\r\n MSE = self.mean_square_error(test_set, predicted_values)\r\n print(f\"Mean Percent Error:\\t{MAE:.2f}\")\r\n print(f\"Mean Square Error:\\t{MSE:.2f}\")", "def train_and_plot_prediction_metrics(X_train, y_train, X_test, y_test, pipelines):\n\n scores = pd.DataFrame(columns=[\"Model\", \"MAE\", \"MSE\", \"R2\"])\n\n for modelname, pipeline in pipelines.items():\n pipeline.fit(X_train, y_train)\n y_pred = pipeline.predict(X_test)\n mae = mean_absolute_error(y_test, y_pred)\n mse = mean_squared_error(y_test, y_pred)\n r2 = r2_score(y_test, y_pred)\n scores = scores.append(\n {\"Model\": modelname, \"MAE\": mae, \"MSE\": mse, \"R2\": r2}, ignore_index=True\n )\n\n for metric in [\"MAE\", \"MSE\", \"R2\"]:\n ax = sns.barplot(x=\"Model\", y=metric, data=scores)\n ax.set_ylim(bottom=0)\n plt.title(\"Test data: \" + metric)\n plt.show()", "def cross_validation_visualization(lambdas, loss_train, loss_test):\n plt.semilogx(lambdas, loss_train, marker=\".\", color='b', label='train error')\n plt.semilogx(lambdas, loss_test, marker=\".\", color='r', label='test error')\n plt.xlabel(\"lambda\")\n plt.ylabel(\"rmse\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig(\"cross_validation_mse\")", "def avg_response(df, x, y_obs, y_est, save=False, show=True):\n\n fig, ax1 = plt.subplots(figsize=(15,15))\n\n ax2 = ax1.twinx()\n\n x_name = x\n if df[x].dtype == \"int\":\n x = df[x].astype(\"category\")\n elif df[x].dtype == \"float\":\n x = pd.cut(df[x], bins=10)\n\n metrics = {\"mean\":\"mean\", \"std err\":\"sem\", \"count\":\"count\"}\n df_grouped = df.groupby([x])[y_obs, y_est].agg(metrics)\n \n x_vals = range(len(df_grouped))\n y_vals = df_grouped[\"mean\"][y_est]\n ax1.errorbar(x_vals, y_vals,yerr=df_grouped[\"std err\"][y_est], fmt='-',\n marker='o',color=\"R\", mec='black', ms=10, mew=2, linewidth=4, \n capsize=10, elinewidth=2)\n\n y_vals = df_grouped[\"mean\"][y_obs]\n ax1.plot(x_vals, y_vals, '-', label=y_obs, marker='o',\n color = \"G\",mec='black', ms=10, mew=2, linewidth=4)\n\n y_vals = df_grouped[\"count\"][y_obs]\n ax2.bar(x_vals,y_vals, color='DarkSlateGray', alpha = 0.25)\n\n ax1.set_xlim(x_vals[0]-0.2,x_vals[-1]+1)\n x_levels = list(y_vals.index)\n plt.xticks(x_vals, x_levels)\n ax1.set_xticklabels(x_levels, rotation=45)\n ax1.grid(False)\n ax2.grid(False)\n font_size = 20\n ax1.set_xlabel(x_name, fontsize=font_size)\n ax1.set_ylabel(y_obs, fontsize=font_size)\n ax2.set_ylabel(\"count\", fontsize=font_size)\n plt.title(\"Average {y} for groups of {x}\".format(x=x_name, y=y_obs), \n fontsize=font_size+5)\n ax1.legend([y_obs, y_est], fontsize=font_size-2)\n if save:\n fig.savefig(\"/home/edward/work/repos/prometheus/python/plots/avg_response/{}.png\".\n format(x_name), bbox_inches='tight')\n if show:\n plt.show()", "def mse(y, y_pred, verbose=True):\n\n mse_sum = 0\n\n for i in range(len(y)):\n mse_sum += mean_squared_error(y[i], y_pred[i])\n\n if verbose:\n print(f\"Mean MSE {mse_sum / len(y)}\")\n\n return mse_sum / len(y)", "def evaluate_model(model, X_test, Y_test): \n #Make predictions with the model\n Y_pred = model.predict(X_test)\n #convert numpy output to dataframe and add columns\n Y_pred_df = pd.DataFrame(Y_pred)\n Y_pred_df.columns = Y_test.columns\n #Convert predictions and correct y values to float for faciliate comparison\n Y_pred_df = Y_pred_df.astype('float64')\n Y_test = Y_test.astype('float64')\n print_score(Y_test, Y_pred_df, 'weighted avg')", "def simple_time_series(full_df, test_period, display_graphs=True):\n df = full_df.copy()\n df = df.filter([\"Canteen\"])\n\n train = df.iloc[:-test_period]\n test = df.iloc[-test_period:]\n\n resulting_prediction, predictions = prediction(train, test)\n\n if display_graphs is True:\n plt.figure(figsize=(14, 7))\n plt.plot(train)\n plt.plot(resulting_prediction)\n plt.legend([\"Real values\", \"Prediction\"], loc=\"best\")\n plt.xlabel(\"Date\")\n plt.ylabel(\"Number of people\")\n\n print(\n \"The mean absolute error (MAE) for the Simple Time Series model is {0:.0f} people\".format(\n find_MAE(test, predictions)\n )\n )", "def plot_true_predicted(train_test_sets, radii_test_RF,\n radii_test_output_error):\n\n X_train, X_test, y_train, y_test = train_test_sets\n plt.figure()\n plt.errorbar(radii_test_RF, y_test.values,\n xerr=radii_test_output_error,\n fmt='.', c='C1', elinewidth=0.5,\n label='Random forest')\n # 1:1 line and labels\n plt.plot(np.sort(y_test.values), np.sort(y_test.values), 'k-', lw=0.25)\n\n plt.ylabel(r'True radius ($R_\\oplus$)')\n plt.ylabel(r'Predicted radius ($R_\\oplus$)')\n plt.legend(loc='lower right')\n return None", "def load_univariate_series(self, test_col: str, grd_truth_col: str = None, plot_graph=False,\n ):\n random.seed(4)\n raw_data = pd.read_csv(filepath_or_buffer=self.file_path)\n assert test_col in list(raw_data.columns), test_col+\" is not in the columns of the data!\"\n if grd_truth_col is not None:\n assert grd_truth_col in list(raw_data.columns), grd_truth_col+\" is not in the columns of the data!\"\n drop_n = int(1. / self.train_ratio)\n raw_data = raw_data.iloc[::drop_n]\n raw_data.reset_index(inplace=True)\n test_data = raw_data[test_col]\n grd_truth_data = raw_data[grd_truth_col]\n if plot_graph:\n plt.plot(test_data, \".\", label='Test Data')\n plt.plot(grd_truth_data, color='r', label='Ground Truth')\n plt.xlabel('Time (s)')\n plt.ylabel(test_col)\n plt.show()\n # The non-NaN (or part of it) entries of the data will be used as training data; the NaN (missin data) will be\n # inferred from subsequent experiment.\n\n # First step, we simply use a univariate time series, regressing the tide height against time\n Y_grd = grd_truth_data.values\n X_grd = np.array(list(test_data.index))\n Y_train = test_data.dropna()\n X_train = np.array(list(Y_train.index))\n Y_train = Y_train.values\n data_null = test_data.isnull()\n X_test = np.array(test_data[data_null].index)\n\n if grd_truth_col is None:\n Y_test = None\n else:\n Y_test = grd_truth_data.iloc[X_test].values\n\n if self.n_test is not None:\n test_pt = np.minimum(self.n_test, len(Y_test))\n test_idx = np.array(random.sample(range(len(Y_test)), test_pt))\n Y_test = Y_test[test_idx]\n X_test = X_test[test_idx]\n\n # The index of data with missing entries. This will be used for prediction\n assert Y_test.shape[0] == X_test.shape[0], \"buggy code.\"\n return X_train.reshape(-1, 1), Y_train.reshape(-1, 1), X_test.reshape(-1, 1), \\\n Y_test.reshape(-1, 1), X_grd.reshape(-1, 1), Y_grd.reshape(-1, 1)", "def explained_variance_score(self):\n print('Explained variance score: ' + str(explained_variance_score(self.model.dataset.get_y_test(),\n self.model.get_predicted())))", "def plot_actual_predicted(self):\n predicted = [self.f(x, self.coefficients) for x in self.x_values]\n\n plt.scatter(self.x_values, self.y_values, label = \"Actual data\", c = 'b')\n plt.plot(self.x_values, predicted, label = \"Predicted data\", c = 'r')\n plt.title(f\"Graph of Prediected and Actual data points.\")\n plt.xlabel('x-axis')\n plt.ylabel('y-axis')\n plt.legend()\n plt.show()", "def max_error(y_true, y_pred):\n ...", "def _plot_train_test_experiment(mtrain, mval, metric_name, isState):\n # axes\n f, axes = plt.subplots(2,2,figsize=(12,10))\n ltrain = _plot_experiment(mtrain, axes[:,0], metric_name, isTrain=True)\n lval = _plot_experiment(mval, axes[:,1], metric_name, isTrain=False)\n # title\n target = \"State\" if isState else \"Output\"\n f.suptitle(f\"{target} Errors\")\n f.tight_layout()\n return f, axes", "def _plot_model_pred_vs_obs(self, ax):\n\n res = self._model.fit()\n\n ax.plot(self._model.endog, res.fittedvalues, '.', label='Observation')\n\n x_lim = ax.get_xlim()\n\n ax.plot(x_lim, x_lim, 'k:', label='1:1 line')\n\n x_label = 'Observed ' + self._model.endog_names\n y_label = 'Predicted ' + self._model.endog_names\n\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n\n ax.legend(loc='best', numpoints=1)", "def evaluate(self, X_test, y_test):\n self.run(self)\n self.y_pred = self.pipeline.predict(X_test)\n self.rmse = compute_rmse(self.y_pred, y_test)" ]
[ "0.6890531", "0.65764153", "0.6521914", "0.6249297", "0.61620384", "0.61183745", "0.60831374", "0.60805416", "0.60410786", "0.6039407", "0.60239905", "0.6023398", "0.6006061", "0.5991418", "0.5943231", "0.593769", "0.59314346", "0.59314346", "0.5888226", "0.58814496", "0.58776605", "0.58554655", "0.5855196", "0.58442944", "0.58394176", "0.58328986", "0.5809818", "0.57790524", "0.5775766", "0.5769486", "0.5756289", "0.5747484", "0.5746373", "0.5742391", "0.5728906", "0.5718431", "0.5687718", "0.568508", "0.56796116", "0.5679549", "0.56739116", "0.56737995", "0.5673532", "0.5673224", "0.567042", "0.5668825", "0.5651968", "0.56504333", "0.564612", "0.56371", "0.5624819", "0.5618863", "0.55993533", "0.559525", "0.55884683", "0.5575268", "0.5563058", "0.5556489", "0.5541868", "0.55412275", "0.5529827", "0.5526136", "0.5518525", "0.55109596", "0.550677", "0.55064756", "0.55054426", "0.5501095", "0.54988164", "0.5491605", "0.5489276", "0.5489071", "0.5483174", "0.54749024", "0.5470382", "0.5468404", "0.54649776", "0.5464968", "0.54648167", "0.5463264", "0.54588807", "0.5458716", "0.5451396", "0.54433507", "0.5431681", "0.54312694", "0.54204", "0.5414175", "0.54136187", "0.54132843", "0.5413271", "0.54120255", "0.5408955", "0.540812", "0.54060394", "0.5403494", "0.54021144", "0.5400972", "0.53984416", "0.5394272" ]
0.6406863
3
Makes Predictions for time series data using the model trained.
def predictions(loader, model, win_len_per_ser, criterion, device, window_out = 1 ): model.eval() num_win_per_ser = win_len_per_ser #num windows #print(num_win_per_ser) y_pred = [] y_true = [] with torch.no_grad(): for idx, (x, y) in enumerate(loader): #for i in range(0, len(y_test), num_win_per_ser): # i takes index values of first windows of different series win_start = torch.tensor(x[0]).float().to(device) # saving the first window of each series #print('win_start:', win_start) CR = win_start[0][3] # saving the CR value for particular series -> to be used for prediction #print('CR:', CR) win = win_start # window variable which will be updated for new windows, takes first value as the starting window #print(win) win = win.reshape((1, win.shape[0], win.shape[1])) #print(win.shape) for j in range(num_win_per_ser): # prediction loop y_hat = model(win) # predicting values wrt win variable #print('y_hat ', y_hat) y_pred.append(y_hat[0].cpu().detach().numpy()) # add the value to y_pred #print('y_pred:', y_pred) y_true.append(y[j].cpu().detach().numpy()) # add the value to y_pred #print('y_true:', y_true) cr_dummy = torch.empty((1, window_out, 1), dtype=torch.float32).to(device) y_hat = torch.cat((y_hat, cr_dummy.fill_(float(CR))), 2).float() #y_hat = tf.concat([y_hat, tf.fill(dims = (1, window_out, 1), value = CR)], axis = 2) # adding CR value y_hat for furter predictions #print('cr added to y_hat', y_hat) win = torch.cat((win, y_hat), 1) #win = tf.concat([win, y_hat], axis = 1) # adding our prediction to win #print('win', win) win = win[:,window_out:,:] # updating win by removing the starting elements #print('new_win for next iter', win) y_pred = torch.tensor(y_pred).to(device) y_true = torch.tensor(y_true).to(device) assert (y_pred.shape == y_true.shape) mae = criterion(y_pred, y_true) #mae = tf.reduce_sum(tf.keras.metrics.mean_absolute_error(y_pred, y_test)) print(f'The error is: " {mae: .5f}') model.train() return y_pred.cpu().detach().numpy(), y_true.cpu().detach().numpy(), mae.cpu().detach().numpy()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train_predict(train_file_arg):\n\n # Load command line arguments\n train_file = train_file_arg\n parameter_file = \"prophet/lstm/trainingConfig.json\"\n\n # Load training parameters\n params = json.loads(open(parameter_file).read())\n\n # Load time series dataset, and split it into train and test\n x_train, y_train, x_test, _, x_test_raw, _,\\\n last_window_raw, last_window, last_datetime_epoch = dataHelper.load_timeseries(train_file, params)\n\n # Build RNN (LSTM) model\n lstm_layer = [1, params[\"window_size\"], params[\"hidden_unit\"], 1]\n model = buildModel.rnn_lstm(lstm_layer, params)\n\n # Train RNN (LSTM) model with train set\n model.fit(\n x_train,\n y_train,\n batch_size=params[\"batch_size\"],\n epochs=params[\"epochs\"],\n validation_split=params[\"validation_split\"])\n\n # Check the model against test set\n predicted = buildModel.predict_next_timestamp(model, x_test)\n predicted_raw = []\n for i in range(len(x_test_raw)):\n predicted_raw.append((predicted[i] + 1) * x_test_raw[i][0])\n\n # Predict next time stamp\n next_timestamp = buildModel.predict_next_timestamp(model, last_window)\n next_timestamp_raw = (next_timestamp[0] + 1) * last_window_raw[0][0]\n print(\"The next time stamp forecasting is: {}\".format(next_timestamp_raw))\n\n # Add 5 minutes for a new timestamp of predictions\n last_datetime = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(last_datetime_epoch))\n last_datetime = datetime.strptime(last_datetime, \"%Y-%m-%d %H:%M:%S\")\n new_datetime = last_datetime + timedelta(seconds=300)\n new_datetime_epoch = time.mktime(new_datetime.timetuple())\n new_datetime = new_datetime.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n # Concatenate datetime and price forecast\n new_entry = \"coin_name,\" + str(new_datetime_epoch) + \",\" \\\n + str(next_timestamp_raw) + \",coin_supply\" + \",coin_mc\" + \"\\n\"\n\n # Write to CSV file of new prediction\n fd = open(train_file, \"a\")\n fd.write(new_entry)\n fd.close()\n\n # Return new prediction\n return [new_datetime, str(next_timestamp_raw)]", "def train(self, training_data):\n # load and preprocess\n super(Forecast, self).train(training_data)\n # remove NaNs\n self.historical_data = self.historical_data.loc[~self.historical_data.isnull().any(axis=1)]\n # project timestamps into vector space\n if self.timestamp_column in self.historical_data.columns:\n self.use_timestamp = True\n ts = self.historical_data.set_index(self.timestamp_column).index\n self.epoch = min(ts)\n self.epoch_span = float((max(ts) - self.epoch).total_seconds())\n time_features = make_time_features(ts,\n index=self.historical_data.index,\n epoch=self.epoch,\n epoch_span=self.epoch_span)\n self.historical_data = pd.concat([self.historical_data, time_features], axis=1)\n self.historical_data.drop(self.timestamp_column, axis=1, inplace=True)\n # leave all other variables independent\n self.independent_variables = [name for name in self.historical_data.columns\n if name not in self.dependent_variables]\n\n self.model.fit(self.historical_data[self.independent_variables],\n self.historical_data[self.dependent_variables])\n\n # release historical data to save on memory\n # note that python garbage collection is not instantaneous\n LOG.warn(\"Releasing building load forecast training data. The agent will not be able to retrain on this data\")\n self.historical_data = None", "def train(self):\n df = self.df\n self.scaler = MinMaxScaler()\n self.scaler.fit(df)\n df[df.columns] = self.scaler.transform(df)\n\n\n X_train, y_train = get_X_y(df, self.n_days, self.length , self.style)\n\n self.model = lstm_model(self.length, self.n_days, self.features, self.style)\n\n #es = EarlyStopping(monitor = 'accuracy',mode = 'min' , verbose = 1, patience = 100, restore_best_weights = True)\n\n self.history = self.model.fit(np.array(X_train), np.array(y_train),\n #validation_split = 0.3,\n #callbacks = [es],\n epochs = 200,\n batch_size = 64,\n shuffle = True,\n verbose = True)", "def train(self, X_t_, W_previous_, pf_value_previous_, dailyReturn_t_):\n self.sess.run(self.train_op, feed_dict={self.X_t: X_t_,\n self.W_previous: W_previous_,\n self.pf_value_previous: pf_value_previous_,\n self.dailyReturn_t: dailyReturn_t_})", "def test_predict_prep():\n args = get_layer('predict', 'manual', 'temporal', False, True, window=2, step_size=3)\n run_layer(*args)", "def makePredictions(self, data, batchSize=1):\n ds = tf.data.Dataset.from_tensor_slices(data)\n ds = ds.window(self.lookBack, shift=self.forecast, drop_remainder=True)\n ds = ds.flat_map(lambda w: w.batch(self.lookBack))\n ds = ds.batch(batchSize).prefetch(1)\n prediction = self.model.predict(ds)\n return prediction", "async def _build_model(\n self,\n data: Timeseries\n ) -> Prophet:\n model = Prophet()\n model.fit(data.get_dataframe())\n return model", "def create_forecast_dataset(self):\n pass", "def predict(self, next_days):\n last_date = self.series.index[-1]\n for time in range(next_days):\n row = {}\n for c in self.countries:\n history = self.series[c]\n series = history.astype(float)\n model = ARIMA(series, order=(2,1,0))\n model_fit = model.fit()\n output = model_fit.forecast()\n row[c] = output[0][0]\n last_date = last_date + pd.DateOffset(1)\n self.series.loc[last_date] = row", "def train_model(model, train_data, train_targets, epochs):\n history = model.fit(train_data, train_targets, epochs=epochs, \n batch_size=40, validation_split=0.15,verbose=False)\n \n return history", "def prophet_train(data): \n \n model = Prophet(interval_width=.95, changepoint_prior_scale=6, yearly_seasonality=True, \n seasonality_prior_scale=1, weekly_seasonality=False, daily_seasonality=False)\n model.add_seasonality(name='monthly', period=120, fourier_order=4)\n \n model.fit(data)\n \n future_dates = model.make_future_dataframe(periods=12, freq='MS')\n forecast = model.predict(future_dates)\n forecast = forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']]\n results = forecast[forecast['ds']>='2018-01-01']\n return(results)", "def train(self, x, t):\n for i in range(self.number_model):\n curr_model = self.all_model[i]\n curr_model.fit(x, t)", "def forecast(self) -> TSDataset:\n future = self.ts.make_future(self.horizon)\n predictions = self.model.forecast(future)\n return predictions", "def model():\n return TimeSeriesMultiReg()", "def forecast(self, stock):\n # Load the trained model\n model_handler = ModelHandler()\n model = model_handler.load_json_model(stock)\n\n # Importing the training set\n dataset = pd.read_csv(stock.csv_name)\n dates = dataset.iloc[len(dataset)-31:len(dataset)-1, 0].values\n dates = [dt.datetime.strptime(d, '%Y-%m-%d').date() for d in dates]\n\n # Create the test dataset\n dataset_test = dataset[len(dataset) - 30:]\n real_stock_price = dataset_test.iloc[:, 1:2].values\n dataset = dataset['Open']\n inputs = dataset[len(dataset) - len(dataset_test) - 60:].values\n inputs = inputs.reshape(-1, 1)\n\n # Feature Scaling\n sc = MinMaxScaler(feature_range=(0, 1))\n inputs = sc.fit_transform(inputs)\n\n x_test = []\n x_test.append(inputs[0:60, 0])\n predicted_values = []\n for i in range(1, 31):\n x_test_np = np.array(x_test)\n x_test_np = np.reshape(x_test_np, (x_test_np.shape[0], x_test_np.shape[1], 1))\n new_data = model.predict(x_test_np)\n predicted_values.append(new_data[0])\n x_test[0] = np.delete(x_test[0], 0)\n x_test[0] = np.concatenate([x_test[0], new_data[0]])\n\n predicted_values = sc.inverse_transform(predicted_values)\n plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))\n plt.gca().xaxis.set_major_locator(mdates.DayLocator())\n plt.plot(dates, real_stock_price, color='red', label=f'Actual {stock.ticker} Stock Price')\n plt.plot(dates, predicted_values, color='blue', label=f'Predicted {stock.ticker} Stock Price')\n plt.gcf().autofmt_xdate()\n plt.title(f'{stock.ticker} Stock Price Prediction')\n plt.xlabel('Time')\n plt.ylabel(f'{stock.ticker} Stock Price')\n plt.legend()\n plt.show()", "def train(self, x, t):\n self.model.fit(x, t)", "def train(self, x, t):\n self.model.fit(x, t)", "def train(self, x, t):\n self.model.fit(x, t)", "def train(self, x, t):\n self.model.fit(x, t)", "def train(self, x, t):\n self.model.fit(x, t)", "def sequence_predict(self, load_script=False, variant=\"predict\"):\n\n if variant != 'internal':\n # Open an existing model and get the input dataset. \n # Target for historical data are expected if using previous targets as a feature.\n request_data = self._get_model_and_data(ordered_data=True) \n if type(request_data) == list:\n X, y = request_data\n else:\n X = request_data\n else:\n X = self.X_test.copy()\n y = self.y_test.copy()\n\n # Scale the targets and increase stationarity if required\n if variant != 'internal' and self.model.lag_target and (self.model.scale_target or self.model.make_stationary):\n # If using differencing, we retain original y values for inversing the transformation later\n y_orig = y.values.ravel() if self.model.make_stationary=='difference' else None\n # Apply the transformer to the targets\n y = self.model.target_transformer.transform(y)\n # Drop samples where y cannot be transformed due to insufficient lags\n X = X.iloc[len(X)-len(y):]\n\n # Set the number of periods to be predicted\n prediction_periods = self.model.prediction_periods\n # Set the number of rows required for one prediction\n self.rows_per_pred = 1\n self.diff_lags = max(self.model.stationarity_lags) if self.model.lag_target and self.model.make_stationary=='difference' else 0\n # Set property depending on whether the current sample will be included as an input, or if we only use lag observations for predictions\n self.first_pred_modifier = 1 if self.model.current_sample_as_input else 0 \n\n # Check that the input data includes history to meet any lag calculation requirements\n if self.model.lags:\n # An additional lag observation is needed if previous targets are being added to the features\n self.rows_per_pred = self.model.lags+self.first_pred_modifier+1 if self.model.lag_target else self.model.lags+self.first_pred_modifier\n # If the target is being lagged and made stationary through differencing additional lag periods are required\n if self.model.lag_target and self.model.make_stationary=='difference':\n extra_msg = \" plus an additional {} periods for making the target stationary using differencing\".format(self.diff_lags)\n # For multi-step predictions we only expect lag values, not the current period's values\n # self.rows_per_pred = self.rows_per_pred-1 if prediction_periods > 1 else self.rows_per_pred\n assert len(X) >= self.rows_per_pred + self.diff_lags, \"Insufficient input data as the model requires {} lag periods for each prediction\".format(self.rows_per_pred) + extra_msg\n\n if variant != 'internal':\n # Prepare the response DataFrame\n # Initially set up with the 'model_name' and 'key' columns and the same index as request_df\n self.response = self.request_df.drop(columns=['n_features'])\n \n # Set up a list to contain predictions and probabilities if required\n predictions = []\n get_proba = False\n if variant == 'predict_proba':\n get_proba = True\n probabilities = [] \n\n # Refresh the keras model to avoid tensorflow errors\n if self.model.using_keras:\n self._keras_refresh()\n\n if prediction_periods > 1:\n if not self.model.lag_target:\n y = None\n\n # Check that we can generate 1 or more predictions of prediction_periods each\n n_samples = len(X)\n assert (n_samples - self.rows_per_pred) >= prediction_periods, \\\n \"Cannot generate predictions for {} periods with {} rows, with {} rows required for lag observations. You may need to provide more historical data or sufficient placeholder rows for future periods.\"\\\n .format(prediction_periods, n_samples, self.rows_per_pred)\n \n # For multi-step predictions we can add lag observations up front as we only use actual values\n # i.e. We don't use predicted y values for further predictions \n if self.model.lags or self.model.lag_target:\n X = self._add_lags(X, y=y, extrapolate=self.first_pred_modifier) \n\n # We start generating predictions from the first row as lags will already have been added to each sample\n start = 0\n else:\n # We start generating predictions from the point where we will have sufficient lag observations\n start = self.rows_per_pred\n \n if self.model.lag_target or prediction_periods > 1:\n # Get the predictions by walking forward over the data\n for i in range(start, len(X) + self.first_pred_modifier, prediction_periods): \n # For multi-step predictions we take in self.rows_per_pred rows of X to generate predictions for prediction_periods\n if prediction_periods > 1:\n batch_X = X.iloc[[i]]\n \n if not get_proba:\n # Get the prediction. \n pred = self.model.pipe.predict(batch_X)\n # Flatten the predictions for multi-step outputs and add to the list\n pred = pred.ravel().tolist()\n predictions += pred\n else:\n # Get the predicted probability for each sample \n proba = self.model.pipe.predict_proba(batch_X)\n proba = proba.reshape(-1, len(self.model.pipe.named_steps['estimator'].classes_))\n probabilities += proba.tolist()\n # For walk forward predictions with lag targets we use each prediction as input to the next prediction, with X values avaialble for future periods.\n else:\n batch_X = X.iloc[i-self.rows_per_pred : i] \n # Add lag observations\n batch_y = y.iloc[i-self.rows_per_pred : i]\n batch_X = self._add_lags(batch_X, y=batch_y, extrapolate=self.first_pred_modifier)\n\n # Get the prediction. We only get a prediction for the last sample in the batch, the remaining samples only being used to add lags.\n pred = self.model.pipe.predict(batch_X.iloc[[-1],:])\n\n # Add the prediction to the list. \n predictions.append(pred)\n \n # Add the prediction to y to be used as a lag target for the next prediction\n y.iloc[i - self.first_pred_modifier, 0] = pred\n\n # If probabilities need to be returned\n if get_proba:\n # Get the predicted probability for each sample \n probabilities.append(self.model.pipe.predict_proba(batch_X.iloc[[-1],:]))\n else:\n # Add lag observations to the samples if required\n if self.model.lags:\n X = self._add_lags(X, extrapolate=self.first_pred_modifier)\n\n # Get prediction for X\n predictions = self.model.pipe.predict(X)\n\n # If probabilities need to be returned\n if get_proba:\n # Get the predicted probability for each sample \n probabilities = self.model.pipe.predict_proba(X)\n \n # Set the number of placeholders needed in the response\n # These are samples for which predictions were not generated due to insufficient lag periods or for meeting multi-step prediction period requirements\n self.placeholders = self.rows_per_pred + self.diff_lags - self.first_pred_modifier\n\n # Transform probabilities to a readable string\n if get_proba:\n # Add the required number of placeholders at the start of the response list\n y = [\"\\x00\"] * self.placeholders\n \n # Truncate multi-step predictions if the (number of samples - self.rows_per_pred) is not a multiple of prediction_periods\n if prediction_periods > 1 and ((n_samples-self.rows_per_pred) % prediction_periods) > 0: \n probabilities = probabilities[:-len(probabilities)+(n_samples-self.rows_per_pred)]\n \n for a in probabilities:\n s = \"\"\n i = 0\n for b in a:\n s = s + \", {0}: {1:.3f}\".format(self.model.pipe.named_steps['estimator'].classes_[i], b)\n i += 1\n y.append(s[2:])\n\n # Prepare predictions\n else:\n if prediction_periods > 1:\n # Set the value to use for nulls\n null = np.NaN if is_numeric_dtype(np.array(predictions)) else \"\\x00\"\n\n # Truncate multi-step predictions if the (number of samples - self.placeholders) is not a multiple of prediction_periods\n if (n_samples-self.rows_per_pred) % prediction_periods > 0:\n predictions = predictions[:-len(predictions)+(n_samples-self.rows_per_pred)]\n\n # Add null values at the start of the response list to match the cardinality of the input from Qlik\n y = np.array(([null] * (self.rows_per_pred - self.first_pred_modifier)) + predictions)\n elif self.model.lag_target: \n # Remove actual values for which we did not generate predictions due to insufficient lags\n if is_numeric_dtype(y.iloc[:, 0].dtype):\n y.iloc[:self.placeholders - self.first_pred_modifier, 0] = np.NaN\n else:\n y.iloc[:self.placeholders - self.first_pred_modifier, 0] = \"\\x00\"\n # Flatten y to the expected 1D shape\n y = y.values.ravel()\n else:\n y = np.array(predictions)\n \n # Inverse transformations on the targets if required \n if variant != 'internal' and (self.model.scale_target or self.model.make_stationary):\n # Take out placeholder values before inverse transform of targets\n null_values = y[:self.rows_per_pred - self.first_pred_modifier] if prediction_periods > 1 or self.model.lag_target else []\n # Add placeholders for samples removed during differencing\n if self.model.make_stationary=='difference':\n null_values = np.append(null_values, np.repeat(null_values[0], self.diff_lags))\n y = y if len(null_values) == 0 else y[-len(predictions):]\n # Add untransformed lag values for differencing if required\n end = self.placeholders\n start = end - self.diff_lags\n y = y if y_orig is None else np.append(y_orig[start : end], y)\n\n # Apply the transformer to the test targets\n y = self.model.target_transformer.inverse_transform(y) \n\n # Remove lags used for making the series stationary in case of differencing\n if self.model.make_stationary == 'difference':\n y = y[self.diff_lags:]\n\n # Replace lags used for making the series stationary with nulls in case of differencing\n # if self.model.make_stationary == 'difference':\n #null = np.NaN if is_numeric_dtype(np.array(predictions)) else \"\\x00\"\n # y = np.append(np.array([null]*self.diff_lags), y[self.diff_lags:])\n \n # Add back the placeholders for lag values\n if len(null_values) > 0:\n y = np.append(null_values, y)\n \n if variant == 'internal':\n return y\n\n # Add predictions / probabilities to the response\n self.response['result'] = y\n\n # Reindex the response to reset to the original sort order\n self.response = self.response.reindex(self.original_index)\n \n if load_script:\n # If the function was called through the load script we return a Data Frame\n self._send_table_description(\"predict\")\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n return self.response\n \n # If the function was called through a chart expression we return a Series\n else:\n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n return self.response.loc[:,'result']", "def train_and_predict(gas_station_id=DEFAULT_GAS_STATION_ID, start_time=None, end_time=None,\n up_to_days=DEFAULT_UP_TO_DAYS, plot=False, use_cached=False, cache=False):\n model_loaded = False\n if use_cached:\n model_path = MODEL_PATH.format(gas_station_id)\n try:\n if not os.path.isfile(model_path):\n raise ValueError(\"No model was found at {}\".format(model_path))\n\n model = pickle.load(open(model_path, \"rb\"))\n df_future = None\n model_loaded = True\n except Exception as e:\n print(e)\n\n if not model_loaded:\n model, df_future = train(gas_station_id=gas_station_id, up_to_days=up_to_days, cache=cache)\n df_forecast = predict(model, start_time=start_time, end_time=end_time, up_to_days=up_to_days, plot=plot)\n return model, df_future, df_forecast", "def predict(self, epochs): # noqa\n self._prep_times()\n super(TimeDecoding, self).predict(epochs)\n self._clean_times()\n return self.y_pred_", "def linear_regression_forecasting(x_train,y_train,x_valid,y_valid,x_test,y_test):\n y_train = y_train.reshape(TRAINING_BATCH_SIZE,N_PREDICTIONS*N_OUTPUT_FEATURES)\n y_valid = y_valid.reshape(VALIDATION_BATCH_SIZE,N_PREDICTIONS*N_OUTPUT_FEATURES)\n layer1 = keras.layers.Flatten(input_shape=[N_INPUT_STEPS,N_INPUT_FEATURES]) #input layer flattens each batch instance from [n_steps,n_input_features] to [n_steps*n_input_features]\n layer2 = keras.layers.Dense(N_PREDICTIONS*N_OUTPUT_FEATURES) #fully connected layer solves combination of linear equations\n model = keras.models.Sequential([layer1,layer2])\n model.compile(loss=\"mse\",optimizer=\"adam\")\n training_history = model.fit(x_train,y_train,epochs=N_EPOCHS,validation_data=(x_valid,y_valid),verbose=0)\n y_pred = model.predict(x_test, TESTING_BATCH_SIZE)\n y_pred = y_pred.reshape(TESTING_BATCH_SIZE,N_PREDICTIONS,N_OUTPUT_FEATURES)\n return training_history.history, y_pred, model", "async def _forecast_single(\n self,\n model: Prophet\n ) -> pd.DataFrame:\n future = model.make_future_dataframe(self._periods, 'H', False)\n return model.predict(future)", "def _build_forecast_series(self,\n points_preds: np.ndarray) -> TimeSeries:\n\n time_index = self._generate_new_dates(len(points_preds))\n\n return TimeSeries.from_times_and_values(time_index, points_preds, freq=self.training_series.freq())", "def trainModel(df):\n # Parameters of the Network\n NEURONS = 4\n BATCH_SIZE = 1\n NB_EPOCH = 5\n # DEV_SIZE = 64\n DT_SIZE = 720\n\n if len(df) < DT_SIZE:\n DT_SIZE = len(df) - 8\n\n data_cols = []\n for i in range(1, 8):\n data_cols.append('last{}day'.format(8-i))\n data_cols.append('target')\n\n data = []\n for i in range(DT_SIZE):\n index = len(df) - 1\n x = getFeatures(df, index-i)\n z = standardScaler(x)\n data.append(z[0])\n\n data = np.array(data)\n dataModel = pd.DataFrame(data=data, columns=data_cols)\n\n X, y = dataModel[data_cols[:-1]].values, dataModel[data_cols[-1:]].values\n X = X.reshape(X.shape[0], 1, X.shape[1])\n # X_train, X_dev, y_train, y_dev = X[:len(X) - DEV_SIZE], X[-DEV_SIZE:], y[:len(X) - DEV_SIZE], y[-DEV_SIZE:]\n\n model = Sequential()\n model.add(LSTM(NEURONS, batch_input_shape=(BATCH_SIZE, X.shape[1], X.shape[2]), stateful=True))\n model.add(Dense(1))\n model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mse'])\n\n for i in range(NB_EPOCH):\n model.fit(X, y, epochs=1, batch_size=BATCH_SIZE, verbose=0, shuffle=False)\n model.reset_states()\n print(\"Epoch {} completed!\".format(i+1))\n \n return model", "def train_model(model, data_train, y_train, data_test, y_test, ARGS):\n callback_list = create_callbacks(model, (data_test, y_test), ARGS)\n train_generator = SequenceBuilder(data_train, ARGS, target=y_train, target_out=True)\n test_generator = SequenceBuilder(data_test, ARGS, target=y_test, target_out=True)\n history = model.fit_generator(generator=train_generator,\n epochs=ARGS.epochs, verbose=2,\n validation_data=test_generator,\n # validation_freq=[1, 5, 10],\n callbacks=callback_list\n # ,max_queue_size=15, use_multiprocessing=False,\n # workers=3, initial_epoch=0\n )\n return history", "def train(self):\n self.emission_model(self.train_data)\n self.transition_model(self.train_data)", "def run():\n\n df = read_input() # the parameters\n df = add_time_period(df) # a feature\n df = is_holiday(df) # a feature\n df = scale_continous(df) # continous feature transformation\n df = encode_dummy(df) # categorical feature transformation\n df = order_columns(df) # ordering model inputs\n model = load_model() # the multiple linear regression model\n prediction = int(model.predict(df)) # form a prediction\n return prediction # return the prediction", "def train(self, train_set=dt.treat_data(dt.load_data(\"data/train.csv\"))):\n if self.model is None:\n return\n my_callback = keras.callbacks.callbacks.EarlyStopping(monitor='loss', min_delta=0.0, patience=1000, verbose=2,\n mode='auto', baseline=None, restore_best_weights=False)\n\n train_data, train_labels = train_set\n history = self.model.fit(x=train_data, y=train_labels, epochs=100000, batch_size=45,\n callbacks=[my_callback], verbose=2, shuffle=True)\n self.model.save(\"titanic_\" + str(time.time()) + \".h5\")\n return history", "def fit_model(self):\n model = self.make_model()\n self.history = model.fit(x=self.xt_train, y=self.yt_train,\n epochs=self.n_epochs, verbose=0,\n validation_split=self.v_split, shuffle=True)\n self.eval_model(model)\n self.save_model(model)\n return model", "def predict(self, trained_model, prediction_datetime):\n return trained_model.predict()", "def train(self, x_data, y_data):\n for model in self.list_of_models:\n model.fit(x_data, y_data)\n self.trained_models.append(model)", "def create_sts_model(train_x, train_y):\n model = GaussianNB()\n model.fit(train_x, train_y)\n save_model(model, \"simple_time_series\")\n return model", "def set_training_data(self, *, inputs: Inputs, outputs: Outputs) -> None:\n\n # save copy of train data so we don't predict for each row in training\n self._output_columns = outputs.columns\n self._train_data = inputs.copy()\n\n # combine inputs and outputs for internal TimeSeries object\n self._ts_frame = inputs.append_columns(outputs)\n\n # Parse cols needed for ts object\n # TODO should only find cols to drop once!\n self._get_cols(self._ts_frame.metadata)\n\n # Mark time difference (between min and min + 1 timestamp)\n if self._grouping_column is None:\n self._max_train = max(self._ts_frame.iloc[:, self._timestamp_column])\n self._train_diff = int(\n np.diff(np.sort(self._ts_frame.iloc[:, self._timestamp_column]))[0]\n )\n else:\n g_col, t_col = (\n self._ts_frame.columns[self._grouping_column],\n self._ts_frame.columns[self._timestamp_column],\n )\n self._max_train = self._ts_frame.groupby(g_col)[t_col].agg(\"max\")\n # making simplifying assumption that difference is the same across all groups\n self._train_diff = int(\n self._ts_frame.groupby(g_col)[t_col]\n .apply(lambda x: np.diff(np.sort(x))[0])\n .iloc[0]\n )\n\n # assumption is that integer timestamps are days (treated this way by DeepAR objects)\n timestamp_semantic_types = self._ts_frame.metadata.query_column_field(\n self._timestamp_column, \"semantic_types\"\n )\n if \"http://schema.org/Integer\" in timestamp_semantic_types:\n self._integer_timestamps = True\n else:\n self._integer_timestamps = False\n\n # drop cols if multiple grouping columns\n if len(self._drop_cols) > 0:\n self._ts_frame = self._ts_frame.remove_columns(self._drop_cols)\n self._update_indices()\n\n # Create TimeSeries dataset object and learner\n self._create_data_object_and_learner(self.hyperparams[\"val_split\"])\n\n # mark that new training data has been set\n self._new_train_data = True", "def train(model, timeseries, indices, words, args):\n top = len(timeseries)-args.window_size-args.batch_size-int(len(timeseries)*0.05)\n print(\"--- Model Version: {} ---\".format(name))\n print(\"Number of words in training set: {}\".format(top))\n model.fit_generator(generator(timeseries, indices, words, args, 1, top), steps_per_epoch=args.epoch_steps, epochs=args.nb_epoch, validation_data=generator(timeseries, indices, words, args, top, len(timeseries)-args.window_size-args.batch_size), validation_steps=args.val_steps, callbacks=[lr_reducer, checkpointer], shuffle=True)", "def train_model(self, data:List[np.ndarray]):\n d = np.vstack(data)\n np.random.shuffle(d)\n self.regressor.fit(\n X=self.input(d),\n y=self.output(d)\n )", "def predict(model, up_to_days=DEFAULT_UP_TO_DAYS, start_time=None, end_time=None, plot=False):\n if start_time is None and end_time is None:\n df_future = model.make_future_dataframe(periods=24 * up_to_days, freq='H')\n start_future = df_future.iloc[-1, :]['ds'] - datetime.timedelta(days=up_to_days)\n df_future = df_future[df_future['ds'] >= start_future]\n elif start_time is not None and end_time is not None:\n indices = pd.date_range(start_time, end_time, freq='H')\n assert len(indices) > 0, \"Indices should not be empty\"\n df_future = pd.DataFrame(columns=['ds', 'y'])\n df_future['ds'] = indices\n else:\n raise ValueError(\"Either up_to_days or start_time and end_time must be set appropriately.\")\n df_forecast = model.predict(df_future)\n if plot:\n model.plot(df_forecast)\n model.plot_components(df_forecast)\n return df_forecast", "def test(self):\n with torch.no_grad():\n self.model.eval()\n p10_forecast, p10_forecast, p90_forecast, target = None, None, None, None\n\n t = time()\n for step, sample in enumerate(self.test_loader):\n\n # Hide future predictions from input vector, set to 0 (or 1) values where timestep > encoder_steps\n steps = self.cnf.all_params['num_encoder_steps']\n pred_len = sample['outputs'].shape[1]\n x = sample['inputs'].float().to(self.cnf.device)\n x[:, steps:, 0] = 1\n\n # Feed input to the model\n if self.cnf.all_params[\"model\"] == \"transformer\" or self.cnf.all_params[\"model\"] == \"grn_transformer\":\n\n # Auto-regressive prediction\n for i in range(pred_len):\n output = self.model.forward(x)\n x[:, steps + i, 0] = output[:, i, 1]\n output = self.model.forward(x)\n\n elif self.cnf.all_params[\"model\"] == \"tf_transformer\":\n output, _, _ = self.model.forward(x)\n else:\n raise NameError\n\n output = output.squeeze()\n y, y_pred = sample['outputs'].squeeze().float().to(self.cnf.device), output\n\n # Compute loss\n loss, _ = self.loss(y_pred, y)\n smape = symmetric_mean_absolute_percentage_error(output[:, :, 1].detach().cpu().numpy(),\n sample['outputs'][:, :, 0].detach().cpu().numpy())\n\n # De-Normalize to compute metrics\n target = unnormalize_tensor(self.data_formatter, y, sample['identifier'][0][0])\n p10_forecast = unnormalize_tensor(self.data_formatter, y_pred[..., 0], sample['identifier'][0][0])\n p50_forecast = unnormalize_tensor(self.data_formatter, y_pred[..., 1], sample['identifier'][0][0])\n p90_forecast = unnormalize_tensor(self.data_formatter, y_pred[..., 2], sample['identifier'][0][0])\n\n # Compute metrics\n self.test_losses['p10'].append(self.loss.numpy_normalised_quantile_loss(p10_forecast, target, 0.1))\n self.test_losses['p50'].append(self.loss.numpy_normalised_quantile_loss(p50_forecast, target, 0.5))\n self.test_losses['p90'].append(self.loss.numpy_normalised_quantile_loss(p90_forecast, target, 0.9))\n\n self.test_loss.append(loss.item())\n self.test_smape.append(smape)\n\n # Plot serie prediction\n p1, p2, p3, target = np.expand_dims(p10_forecast, axis=-1), np.expand_dims(p50_forecast, axis=-1), \\\n np.expand_dims(p90_forecast, axis=-1), np.expand_dims(target, axis=-1)\n p = np.concatenate((p1, p2, p3), axis=-1)\n plot_temporal_serie(p, target)\n\n # Log stuff\n for k in self.test_losses.keys():\n mean_test_loss = np.mean(self.test_losses[k])\n print(f'\\t● AVG {k} Loss on TEST-set: {mean_test_loss:.6f} │ T: {time() - t:.2f} s')\n\n # log log log\n mean_test_loss = np.mean(self.test_loss)\n mean_smape = np.mean(self.test_smape)\n print(f'\\t● AVG Loss on TEST-set: {mean_test_loss:.6f} │ T: {time() - t:.2f} s')\n print(f'\\t● AVG SMAPE on TEST-set: {mean_smape:.6f} │ T: {time() - t:.2f} s')", "def train_model(self, model, data) -> keras.Model:\n self.history = model.fit(\n self.generator.flow(data.x.train, data.y.train),\n epochs=self.N_epochs,\n validation_data=(data.x.valid, data.y.valid),\n verbose=1,\n steps_per_epoch=int(np.floor(data.x.train.shape[0] / self.batch_size)),\n callbacks=self.callbacks,\n shuffle=True,\n )\n\n return model", "def ts_fit(series: TimeSeries) -> TimeSeries:\n pass", "def train(self):\n\t\tself.model.fit(self.training_data, self.training_labels)", "def train(self):\n df = self.df\n self.scaler = MinMaxScaler()\n self.scaler.fit(df)\n df[df.columns] = self.scaler.transform(df)\n\n\n X_train, y_train = get_X_y(df, self.n_days, self.length , self.style)\n X_train = np.array(X_train)\n X_train.shape = (X_train.shape[0], X_train.shape[2])\n\n self.clf = LogisticRegression().fit(X_train, y_train)\n\n #es = EarlyStopping(monitor = 'accuracy',mode = 'min' , verbose = 1, patience = 100, restore_best_weights = True)", "def entry(self):\n if not os.path.isfile('model'):\n train()\n schedule.every(0.01).seconds.do(predict, self)\n while True:\n schedule.run_pending()", "def fit_predict(self, X: TimeSeriesInstances, y=None) -> np.ndarray:\n self.fit(X)\n return self.predict(X)", "def forecast(\n input_values: np.ndarray, future_dates: List, model: Sequential, scaler\n) -> pd.DataFrame:\n if scaler:\n future_values = scaler.inverse_transform(\n model.predict(input_values.reshape(1, -1, 1)).reshape(-1, 1)\n )\n else:\n future_values = model.predict(input_values.reshape(1, -1, 1)).reshape(-1, 1)\n\n df_future = pd.DataFrame(\n future_values, index=future_dates, columns=[\"Predicted Price\"]\n )\n return df_future", "def predict(data: pd.DataFrame) -> pd.DataFrame:\n return pd.DataFrame(data={\"prediction\": trained_model.predict(data)})", "def train_predict(model_list,X_train, X_test, y_train, y_test):\n P = np.zeros((y_test.shape[0], len(model_list)))\n P = pd.DataFrame(P)\n\n print(\"Fitting models.\")\n cols = list()\n for i, (name, m) in enumerate(models.items()):\n print(\"%s...\" % name, end=\" \", flush=False)\n m.fit(X_train, y_train)\n P.iloc[:, i] = m.predict_proba(X_test)[:, 1]\n cols.append(name)\n print(\"done\")\n\n P.columns = cols\n print(\"Done.\\n\")\n return P", "def before_epoch(self):\n\n # Prepare prediction container in every epoch, set/reset here as new predictions are obtained after each epoch as NN learns\n self.y_pred = []", "def test_predict_prep_proba():\n args = get_layer('predict', 'manual', 'temporal', True, True, window=2, step_size=3)\n run_layer(*args)", "def model_predict(country,year,month,day,all_models=None,test=False):\r\n\r\n ## start timer for runtime\r\n time_start = time.time()\r\n\r\n ## load model if needed\r\n if not all_models:\r\n all_data,all_models = model_load(training=False)\r\n \r\n ## input checks\r\n if country not in all_models.keys():\r\n #log\r\n raise Exception(\"ERROR (model_predict) - model for country '{}' could not be found\".format(country))\r\n\r\n for d in [year,month,day]:\r\n if re.search(\"\\D\",d):\r\n #log\r\n raise Exception(\"ERROR (model_predict) - invalid year, month or day\")\r\n \r\n ## load data\r\n model = all_models[country]\r\n data = all_data[country]\r\n\r\n ## check date\r\n target_date = \"{}-{}-{}\".format(year,str(month).zfill(2),str(day).zfill(2))\r\n print(target_date)\r\n\r\n if target_date not in data['dates']:\r\n raise Exception(\"ERROR (model_predict) - date {} not in range {}-{}\".format(target_date,\r\n data['dates'][0],\r\n data['dates'][-1]))\r\n date_indx = np.where(data['dates'] == target_date)[0][0]\r\n query = data['X'].iloc[[date_indx]]\r\n \r\n ## sainty check\r\n if data['dates'].shape[0] != data['X'].shape[0]:\r\n #log\r\n raise Exception(\"ERROR (model_predict) - dimensions mismatch\")\r\n\r\n ## make prediction and gather data for log entry\r\n y_pred = model.predict(query)\r\n y_proba = None\r\n if 'predict_proba' in dir(model) and 'probability' in dir(model):\r\n if model.probability == True:\r\n y_proba = model.predict_proba(query)\r\n\r\n\r\n m, s = divmod(time.time()-time_start, 60)\r\n h, m = divmod(m, 60)\r\n runtime = \"%03d:%02d:%02d\"%(h, m, s)\r\n\r\n ## update predict log\r\n update_predict_log('Prediction completed for country {0} and target date {1}. Total runtime {2}'.format(country,target_date,runtime))\r\n return({'y_pred':y_pred,'y_proba':y_proba})", "def prepare_train_data(self):\r\n ## Impute rlkpis\r\n print(\"Imputing rlKPI df\")\r\n self.rlkpi.add_target_labels(1)\r\n self.rlkpi.impute_rl_kpis()\r\n\r\n print(\"Add 'met-real-station_no' & met-forecast-station_no to rl_kpis_df\")\r\n self.add_met_real_forecast_station_col_to_rlkpis()\r\n print(\"Merge 'met-real-sampled df to rl kps \")\r\n self.merge_met_real_sampled_df_to_rlkpis()\r\n\r\n ## Imputations for met-forecast\r\n print(\"Impute met-forecast\")\r\n met_forecast_obj = self.metfcast\r\n met_forecast_obj.impute_met_forecast()\r\n\r\n #Merge met forecast data to earlier merged data\r\n print(\"Merge Train data with imputed forecast df\")\r\n self.train_data = pd.merge(self.train_data,\r\n met_forecast_obj.imputed_forecast_df,\r\n on=['datetime-station_no'], indicator=True, how='inner')\r\n print(\"Check any imputation needed\", self.train_data.isna().sum().sum())\r\n self.train_data.drop(['_merge'], axis=1, inplace=True)\r\n self.perform_data_under_sampling(self.train_data)", "def predict(self, **kwargs):\n\n logging.debug(\n \"Call predict() with parameters. \"\n \"Forecast 1 step only, kwargs:{kwargs}\".format(kwargs=kwargs)\n )\n\n X_test = self.df[-self.step :][self.feature_names]\n X_test = self.scaler.transform(X_test)\n y_predict = self.model.predict(X_test)\n poly_now = self.y_train_season_obj[-1]\n first_occ = np.where(self.y_train_season_obj == poly_now)\n polynext = self.y_train_season_obj[first_occ[0][0] + self.step]\n now = self.df[\"y\"][-self.step :]\n return (now - poly_now) - y_predict + polynext", "def predict(self, days=None, name=\"Main\"):\n # Arguments\n if name not in self._reghandler_dict:\n raise UnExecutedError(f\"Scenario.fit(name={name})\")\n # Prediction with regression model\n handler = self._reghandler_dict[name]\n df = handler.predict()\n # -> end_date/parameter values\n df.index = [date.strftime(self.DATE_FORMAT) for date in df.index]\n df.index.name = \"end_date\"\n # Days to predict\n days = days or [len(df) - 1]\n self._ensure_list(days, candidates=list(range(len(df))), name=\"days\")\n phase_df = df.reset_index().loc[days, :]\n # Set new future phases\n for phase_dict in phase_df.to_dict(orient=\"records\"):\n self.add(name=name, **phase_dict)\n return self", "def make_prediction(x_train, y_train, x_test, model):\n model.fit(x_train, y_train)\n y_predict = model.predict(x_test)\n return y_predict", "def rnn_iterative_forecasting(x_train,x_valid,x_test,series,indices):\n\n models = []\n for feature in range(N_INPUT_FEATURES):\n # model is only trained to predict the next time step (i.e. step n+1).\n y_train, y_valid = series[:indices[0],-N_PREDICTIONS,feature], series[indices[0]:indices[1],-N_PREDICTIONS,feature]\n m = get_sequence_to_vector_rnn_model(1)\n m.compile(loss=\"mse\",optimizer=\"adam\")\n m.fit(x_train,y_train,epochs=N_EPOCHS,validation_data=(x_valid,y_valid),verbose=0)\n models.append(m)\n full_sequence = np.zeros((TESTING_BATCH_SIZE,N_STEPS,N_INPUT_FEATURES))\n full_sequence[:,:N_INPUT_STEPS,:] = x_test[:,:,:] # fill in the input time steps\n for step in range(N_PREDICTIONS):\n for feature in range(N_INPUT_FEATURES):\n m = models[feature]\n offset = N_INPUT_STEPS + step\n #full_sequence[:,offset,N_OUTPUT_FEATURES:] = x_test[:,-1,N_OUTPUT_FEATURES:]\n data_out = m.predict(full_sequence[:,step:offset,:],TESTING_BATCH_SIZE)\n full_sequence[:,offset,feature] = data_out.reshape(TESTING_BATCH_SIZE) # output features span from index 0 to N_OUTPUT_STEPS\n y_pred = full_sequence[:,-N_PREDICTIONS:,:N_OUTPUT_FEATURES]\n return np.mean(keras.losses.mean_squared_error(y_test,y_pred)), y_pred", "def main():\n df = prepro_last()\n X, y = train_build(df)\n fit_store(X, y)", "def forecast(self, ti=None, tf=None, recalculate=False, use_model=None, n_jobs=6):\n self._use_model = use_model\n makedir(self.preddir)\n\n # \n self.ti_forecast = self.ti_model if ti is None else datetimeify(ti)\n self.tf_forecast = self.tf_model if tf is None else datetimeify(tf)\n if self.tf_forecast > self.data.tf:\n self.tf_forecast = self.data.tf\n if self.ti_forecast - self.dtw < self.data.ti:\n self.ti_forecast = self.data.ti+self.dtw\n\n loadFeatureMatrix = True\n\n model_path = self.modeldir + os.sep\n if use_model is not None:\n self._detect_model()\n model_path = self._use_model+os.sep\n \n model,classifier = get_classifier(self.classifier)\n\n # logic to determine which models need to be run and which to be \n # read from disk\n pref = type(model).__name__\n fls = glob('{:s}/{:s}_*.pkl'.format(model_path, pref))\n load_predictions = []\n run_predictions = []\n if recalculate:\n run_predictions = fls\n else:\n for fl in fls:\n num = fl.split(os.sep)[-1].split('_')[-1].split('.')[0]\n flp = '{:s}/{:s}_{:s}.csv'.format(self.preddir, pref, num)\n if not os.path.isfile(flp):\n run_predictions.append(flp)\n else:\n load_predictions.append(flp)\n\n ys = [] \n # load existing predictions\n for fl in load_predictions:\n y = pd.read_csv(fl, index_col=0, parse_dates=['time'], infer_datetime_format=True)\n ys.append(y)\n\n # generate new predictions\n if len(run_predictions)>0:\n run_predictions = [(rp, rp.replace(model_path, self.preddir+os.sep).replace('.pkl','.csv')) for rp in run_predictions]\n\n # load feature matrix\n fM,_ = self._extract_features(self.ti_forecast, self.tf_forecast)\n\n # setup predictor\n if self.n_jobs > 1:\n p = Pool(self.n_jobs)\n mapper = p.imap\n else:\n mapper = map\n f = partial(predict_one_model, fM, model_path, pref)\n\n # train models with glorious progress bar\n for i, y in enumerate(mapper(f, run_predictions)):\n cf = (i+1)/len(run_predictions)\n print(f'forecasting: [{\"#\"*round(50*cf)+\"-\"*round(50*(1-cf))}] {100.*cf:.2f}%\\r', end='') \n ys.append(y)\n \n if self.n_jobs > 1:\n p.close()\n p.join()\n \n # condense data frames and write output\n ys = pd.concat(ys, axis=1, sort=False)\n consensus = np.mean([ys[col].values for col in ys.columns if 'pred' in col], axis=0)\n forecast = pd.DataFrame(consensus, columns=['consensus'], index=ys.index)\n forecast.to_csv('{:s}/consensus.csv'.format(self.preddir), index=True, index_label='time')\n \n # memory management\n if len(run_predictions)>0:\n del fM\n gc.collect()\n\n return forecast", "def predict(x_tst, model):\n\n predictions = model.predict(x_tst)\n return predictions", "def on_predict_begin(self, logs=None):", "def on_predict_begin(self, logs=None):", "def train_model(self, name):\n if self.data_provider.get_events_count > 10:\n logging.debug(\"Start training model\")\n events = self.data_provider.get_events()\n df = self.data_provider.events_to_dataframe(events)\n\n logging.info('Started parsing configs: {}'.format(df.shape))\n\n local_settings = dict() if self.settings is None else self.settings # type: Dict[str, Any]\n users_filters = local_settings.get('users', {}).get('filters', [])\n events_filters = local_settings.get('events', {}).get('filters', [])\n duplicate_thr_time = local_settings.get('events', {}).get('duplicate_thr_time', 0)\n positive_event_name = local_settings.get('positive_event', {}).get('name', u'passed')\n positive_event_filter = local_settings.get('positive_event', {}).get('filters', {}).get(name, None)\n if positive_event_filter is None:\n return\n negative_event_name = local_settings.get('negative_event', {}).get('name', u'lost')\n\n logging.info('Started DataFrame shape: {}'.format(df.shape))\n\n df = preparing.filter_users(df, users_filters)\n df = preparing.filter_events(df, events_filters)\n df = preparing.drop_duplicated_events(df, duplicate_thr_time)\n df = preparing.add_passed_event(df, positive_event_name, positive_event_filter)\n df = preparing.add_lost_events(df, positive_event_name, negative_event_name)\n\n logging.debug('DataFrame shape after preprocessing: {}'.format(df.shape))\n\n model = Model(df, negative_event_name)\n model.fit_model()\n self.models_container[name] = model", "def run(self) -> None:\n self.model = self.trainer.train_model(self.model, self.data)", "def prepare_reg_data_for_prediction(dataframe, model_dict, user_keyword, task_name):\r\n parent_dir = Path.cwd().parent\r\n pickle_dir = parent_dir.joinpath('default_results', 'pickle_files_feat_eng')\r\n\r\n feature_X_user,affect_dataframe, affect_index_dataframe = df, df, df\r\n emo_X_test_dict = {}\r\n affect_index_dict ={}\r\n\r\n for emotion, model_prop in model_dict.items():\r\n #Get the data with the emotion class\r\n if user_keyword == 'validation':\r\n affect_dataframe = dataframe[dataframe['Affect Dimension'] == 1]\r\n affect_index_list = dataframe.index[dataframe['Affect Dimension'] == 1].tolist()\r\n else:\r\n affect_dataframe = dataframe[dataframe[emotion] == 1]\r\n affect_index_list = dataframe.index[dataframe[emotion] == 1].tolist()\r\n test_tweets = affect_dataframe.iloc[:, [0, 1, 2]]\r\n\r\n #Perform preprocessing, feature extraction and transformation for the tweets to be predicted\r\n print(emotion, test_tweets.shape)\r\n if test_tweets.empty == False:\r\n preprocessed_X_user = Preprocessor.perform(test_tweets, emotion, user_keyword, task_name)\r\n feature_X_user = Feature_Transformer.perform(preprocessed_X_user, emotion, user_keyword, task_name)\r\n vectorizer = Dictionaries.vectorizer_dict[model_prop[2]]\r\n\r\n #Fit transform the vectorizer with the corresponding preprocessed training data\r\n if os.path.exists(pickle_dir.joinpath(emotion + '_r_train_preprocess_df.pkl')):\r\n preprocess_train_df = pd.read_pickle(pickle_dir.joinpath(emotion + '_r_train_preprocess_df.pkl'))\r\n train_vect = vectorizer.fit_transform(preprocess_train_df['preprocessed_text'].values)\r\n train_vect_df = pd.DataFrame(train_vect.toarray(), columns=vectorizer.get_feature_names())\r\n print(emotion, 'train-shape', train_vect_df.shape, sep='\\n')\r\n else:\r\n #If the file doesnt exist, exit the program with instructions\r\n print('\\nRequired files does not exist.\\n\\n Please, train the models first by running > Modelling.py')\r\n sys.exit(1)\r\n\r\n # Use the same vectorizer to transform test data and then perform the feature union\r\n vector_X = vectorizer.transform(preprocessed_X_user['preprocessed_text'].values)\r\n test_vect_df = pd.DataFrame(vector_X.toarray(), columns=vectorizer.get_feature_names())\r\n X_test = pd.DataFrame(pd.concat([test_vect_df, feature_X_user], axis=1)) #####?\r\n emo_X_test_dict[emotion] = X_test\r\n affect_index_dict[emotion] = affect_index_list\r\n else:\r\n emo_X_test_dict[emotion] = pd.DataFrame\r\n affect_index_dict[emotion] = []\r\n\r\n return emo_X_test_dict, affect_index_dict", "def predict(self, model, x_test):\n pass", "def get_predictions(year, month):\n \n start_date = str(year)+\"-\"+str(month)+\"-01\"\n end_date = str(year)+\"-\"+str(month)+\"-\"+str(monthrange(year, month)[1])\n\n date_range = pd.date_range(start_date,end_date, freq='D').strftime(\"%Y-%m-%d\").tolist()\n\n # predictfunction \n # do predictions\n pred_arr = []\n file_name = '../predictions/model_'+str(year)+'_'+str(month)+'.csv'\n \n try:\n predictions = load_predictions(file_name)\n predictions = predictions.round()\n except:\n print(\"An exception occurred\")\n predictions = pd.DataFrame(data = date_range,columns=['Datum'])\n \n \n for index,row in predictions.iterrows():\n \n pred_mail = 0\n pred_counter = 0\n pred_tel = 0\n \n # check predictions dataframe for 'Datum'\n if 'Datum' in predictions.columns:\n date = row['Datum']\n else:\n break;\n\n # check predictions dataframe for 'Mail'\n if 'Mail' in predictions.columns:\n pred_mail = row['Mail']\n\n # check predictions dataframe for 'Schalter'\n if 'Schalter' in predictions.columns:\n pred_counter = row['Schalter']\n\n # check predictions dataframe for 'Tel'\n if 'Tel' in predictions.columns:\n pred_tel = row['Tel']\n \n \n pred_dict = {'date': date, \n 'predictions':{'mail' : pred_mail, \n 'tel' : pred_tel, \n 'counter' : pred_counter\n }\n }\n\n pred_arr.append(pred_dict)\n\n print(pred_arr) \n \n return pred_arr", "def fit(self, series: TimeSeries) -> None:\n series._assert_univariate()\n self.training_series = series\n self.target_series = series\n super().fit()", "def train_full_model(X,y_train):\n scaler = MinMaxScaler()\n x_train = scaler.fit_transform(X)\n\n #chose model\n # model = RandomForestClassifier()\n model = GradientBoostingClassifier()\n\n\n #train\n print(\"train model\")\n tic = time.time()\n model.fit(x_train,y_train)\n tac = time.time()\n print(\"elapsed time\", tac-tic)\n\n #save data\n save_data(model,scaler,x_train,y_train)", "def predict(self):\n # format data\n df = self.normalize(self.daily)\n x = df.index.astype(np.int64).values.reshape(-1, 1)\n y = self.normalize(df[['Adj Close']]).values\n\n # format time\n one_day_time = 86400000000000\n x_tomorrow = x[-1] + one_day_time\n x_incl_tomorrow = np.append(x, [x_tomorrow], axis=0)\n dates = pd.to_datetime(x_incl_tomorrow.reshape(-1))\n\n # average the predictions\n lin_reg = self.linear_regression(x, y, x_tomorrow, x_incl_tomorrow, dates)\n knn = self.knn(x, y, x_tomorrow, x_incl_tomorrow, dates)\n tomorrow_norm = [(lin_reg + knn) / 2]\n today_norm = [df['Adj Close'][-1]]\n tomorrow = round((tomorrow_norm[0] * self.daily['Adj Close'][0]), 2)\n today = self.daily['Adj Close'][-1]\n percent_gain = round((((tomorrow / today) - 1) * 100), 2)\n percent_gain_int = abs(int(round(percent_gain, 0)))\n\n if percent_gain > 0:\n self.debug += '\\nExpected price gain: {} %, buys + {}, predicted close is {}'.format(percent_gain, percent_gain_int, tomorrow)\n self.buys += percent_gain_int\n else:\n self.debug += '\\nExpected price gain: {} %, sells + {}, predicted close is {}'.format(percent_gain, percent_gain_int, tomorrow)\n self.sells += percent_gain_int\n\n # plots dotted line connecting stock today with tomorrow's prediction\n predicting_line = np.append(today_norm, tomorrow_norm, axis=0)\n\n if self.will_plot:\n self.ax.plot(dates[-2:], predicting_line, color='cyan', dashes=([1, 1, 1, 1]))\n self.ax.plot(pd.to_datetime(x_tomorrow), tomorrow_norm, marker='o', markersize=3, color=\"cyan\")", "def fit_timeseries(xdates, ydata):\n\n pass", "def trainData(self, X, y, NeuralNet, epochs):", "def preprocess_train_data(self):\r\n print(\"* Preprocessing training data.\", flush=True)\r\n prep.create_HDF_file(self.C.training_set, is_training_set=True)\r\n\r\n self.print_time_elapsed()", "def predict(self, forecastyear, scenario=None):\n\n start_time = time()\n\n results = []\n for m in self.model:\n\n # \"TARGET\" -> ISO-3 country code of destination country\n R = {}\n R['target'] = m['target']\n\n # Forecast year in the input query\n R['year'] = forecastyear\n\n # Scenario in the input query\n R['scenario'] = scenario if scenario else {}\n\n Xv = self.__adjust_features(m['feature'], scenario)\n # logger.info(\"Return adjust features in {:3.2f} sec.\".format(time() - start_time))\n\n R['forecast'] = m['clf'].predict(Xv)[0]\n # logger.info(\"Return forecast predict in {:3.2f} sec.\".format(time() - start_time))\n\n # Explanation of point forecast using SHAP values\n # R['explanation'] = self.__get_explanations(m['explainer'], Xv, m['Xt'].columns)\n\n # Confidence intervals are now one-sided\n # R['CI'] = [0.0, m['CI'].predict(Xv)[0]]\n # logger.info(\"Return CI predict in {:3.2f} sec.\".format(time() - start_time))\n\n results.append(R)\n\n logger.debug(\"Return predict in {:3.2f} sec.\".format(time() - start_time))\n\n return results", "def make_predictions(data_dict):\n new_data = pd.DataFrame(columns=[\"location\", \"date\",\n \"logistic_prediction\",\n \"logistic_logarithmic_prediction\",\n \"logistic_polynomial_prediction\"])\n for country in data_dict.keys():\n data = data_dict[country][\"data\"]\n x_data = np.array(list(range(500)))\n model1 = LogisticRegressionModel(data_dict[country][\"data\"])\n y_pred1 = model1.predict(x_data.reshape(-1, 1))\n model2 = LogisticLogarithmicRegressionModel(data)\n y_pred2 = model2.predict(x_data.reshape(-1, 1))\n model3 = LogisticPolynomialRegressionModel(data)\n y_pred3 = model3.predict(x_data.reshape(-1, 1))\n model4 = PolynomialRegressionModel(data)\n y_pred4 = model4.predict(x_data.reshape(-1, 1))\n data = np.swapaxes(np.array([x_data, y_pred1, y_pred2,\n y_pred3, y_pred4]), 0, 1)\n tmp_data = pd.DataFrame(data, columns=[\"date\",\n \"logistic_prediction\",\n \"logistic_logarithmic_prediction\",\n \"logistic_polynomial_prediction\",\n \"polynomial_prediction\"])\n tmp_data[\"location\"] = country\n new_data = new_data.append(tmp_data)\n return new_data", "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.num_epochs):\n print(\"EPOHA\")\n self.run_epoch()\n print(\"SAVE MODEL\")\n self.save_model()", "def test_predict():\n args = get_layer('predict', 'manual', 'temporal', False, False, window=2, step_size=3)\n run_layer(*args)", "def _predict(cls, model, is_log_transformed,\n raw_actual, interpolated_actual,\n training_end=None, seasonal_feature_scoring=None, pred_date=None, order_of_diff=None,\n training_tail=None, ext_training_features=None, pred_len=None, freq=None,\n include_holidays_exog=None):\n\n import numpy as np\n import pandas as pd\n import scipy.stats as st\n from numpy.linalg import LinAlgError\n import math\n\n alpha = cls._sig_level\n alpha_extreme = cls._sig_level_extreme\n\n include_holidays_exog = include_holidays_exog if ext_training_features else 0\n\n index = pd.date_range(start=training_end, end=pred_date, freq=freq)[1:] # Holidays are always daily.\n\n de_obj = DataExploration()\n pred_exog = de_obj._get_exog_data(pred_date, pred_date, index) if include_holidays_exog else None\n\n if pred_exog is not None and set(pred_exog.columns.values) != set(ext_training_features):\n missing_col_list = list(set(ext_training_features) - set(pred_exog.columns.values))\n common_cols = list(set(ext_training_features).intersection(set(pred_exog.columns.values)))\n temp_df = pred_exog[common_cols]\n missing_feat_df = pd.DataFrame(np.zeros([len(pred_exog), len(missing_col_list)]),\n columns=missing_col_list, index=pred_exog.index.values)\n pred_exog = pd.concat([temp_df, missing_feat_df], axis=1)\n pred_exog = pred_exog[ext_training_features]\n\n freq = \"1\" + freq if not any(char.isdigit() for char in freq) else freq\n\n forecast_ndays = int((pred_date - pd.Timestamp(training_end)) / pd.Timedelta(freq))\n model_freshness = forecast_ndays / float(pred_len)\n\n try:\n if forecast_ndays > pred_len:\n raise ValueError('Current trained model object expired')\n\n float_min = 1e-10\n\n # set exogenous (holiday) variables for input data\n if include_holidays_exog:\n pred_exog = pred_exog.loc[pd.Timestamp(training_end) + pd.Timedelta(freq): pred_date]\n else:\n pred_exog = None\n\n if seasonal_feature_scoring:\n if not include_holidays_exog:\n pred_exog = seasonal_feature_scoring[:forecast_ndays]\n else:\n pred_exog['fourier_feature'] = seasonal_feature_scoring[:forecast_ndays]\n\n forecast = list(model.forecast(steps=forecast_ndays, alpha=alpha, exog=pred_exog))\n interpolated_training_data = list(zip(*training_tail))[1]\n\n for order in list(reversed(range(order_of_diff))):\n training_data_diff = np.diff(interpolated_training_data,\n order) if order > 0 else interpolated_training_data\n\n forecast_diff_mean = [training_data_diff[-1]]\n forecast_diff_ci = []\n\n for i in range(forecast_ndays):\n forecast_diff_mean.append(forecast_diff_mean[-1] + forecast[0][i])\n forecast_diff_ci.append([forecast_diff_mean[-1] -\n (st.norm.ppf(1 - (alpha / 2.0)) * forecast[1][i]),\n forecast_diff_mean[-1] +\n (st.norm.ppf(1 - (alpha / 2.0)) * forecast[1][i])])\n forecast[0] = forecast_diff_mean[1:]\n forecast[2] = forecast_diff_ci\n\n if is_log_transformed:\n transformed_back_forecast = np.exp(forecast[0][-1] + ((forecast[1][-1] ** 2) / 2.0)) - 1\n transformed_back_std_err = np.sqrt((np.exp(forecast[1][-1] ** 2) - 1) * (np.exp((2 * forecast[0][-1]) +\n (forecast[1][\n -1] ** 2))))\n transformed_back_CILower = transformed_back_forecast - \\\n st.norm.ppf(1 - (alpha / 2.0), 0, transformed_back_std_err) \\\n if transformed_back_std_err != 0 else transformed_back_forecast\n transformed_back_CIUpper = transformed_back_forecast + \\\n st.norm.ppf(1 - (alpha / 2.0), 0, transformed_back_std_err) \\\n if transformed_back_std_err != 0 else transformed_back_forecast\n transformed_back_interpolated_actual = float(np.exp(interpolated_actual) - 1)\n if np.sum(np.isnan(forecast[0][-1])) or np.isnan(forecast[1][-1]):\n raise ValueError('Predicted null value')\n\n if is_log_transformed:\n zscore = (transformed_back_interpolated_actual -\n transformed_back_forecast) / max(float(transformed_back_std_err), float_min)\n\n anomaly_probability = (2 * st.norm(0, 1).cdf(abs(zscore))) - 1\n if math.isnan(anomaly_probability) or math.isnan(transformed_back_CILower) \\\n or math.isnan(transformed_back_CIUpper):\n raise ValueError('Either Anomaly probability or CILower or CIUpper is NaN under log transform')\n down_anomaly_probability = 1 - st.norm(0, 1).cdf(zscore)\n up_anomaly_probability = st.norm(0, 1).cdf(zscore)\n\n result = {'Success': True,\n 'IsLogTransformed': is_log_transformed,\n 'LogTransformedAdjustedActual': interpolated_actual,\n 'LogTransformedPrediction': float(forecast[0][-1]),\n 'LogTransformedStdErr': float(forecast[1][-1]),\n 'LogTransformedCILower': float(forecast[2][-1][0]),\n 'LogTransformedCIUpper': float(forecast[2][-1][1]),\n 'AdjustedActual': transformed_back_interpolated_actual,\n 'Prediction': float(transformed_back_forecast) if not float(\n transformed_back_forecast) == float('inf') else 0.0,\n 'StdErr': float(transformed_back_std_err) if not float(\n transformed_back_std_err) == float('inf') else 0.0,\n 'CILower': float(transformed_back_CILower) if not float(\n transformed_back_CILower) == float('-inf') else 0.0,\n 'CIUpper': float(transformed_back_CIUpper) if not float(\n transformed_back_CIUpper) == float('inf') else 0.0,\n 'ConfLevel': float(1.0 - alpha) * 100,\n 'ExogenousHolidays': include_holidays_exog,\n 'IsAnomaly': bool(anomaly_probability > 1 - alpha),\n 'IsAnomalyExtreme': bool(anomaly_probability > 1 - alpha_extreme),\n 'AnomalyProbability': 1 if raw_actual is None else float(anomaly_probability),\n 'DownAnomalyProbability': 1 if raw_actual is None else float(down_anomaly_probability),\n 'UpAnomalyProbability': 1 if raw_actual is None else float(up_anomaly_probability),\n 'ModelFreshness': model_freshness}\n\n else:\n zscore = (interpolated_actual - forecast[0][-1]) / max(float(forecast[1][-1]), float_min)\n\n anomaly_probability = (2 * st.norm(0, 1).cdf(abs(zscore))) - 1\n if math.isnan(anomaly_probability) or math.isnan(forecast[2][-1][0]) or math.isnan(forecast[2][-1][1]):\n raise ValueError('Either Anomaly probability or CILower or CIUpper is NaN')\n\n down_anomaly_probability = 1 - st.norm(0, 1).cdf(zscore)\n up_anomaly_probability = st.norm(0, 1).cdf(zscore)\n\n result = {'Success': True,\n 'IsLogTransformed': is_log_transformed,\n 'AdjustedActual': interpolated_actual,\n 'Prediction': float(forecast[0][-1]) if not float(\n forecast[0][-1]) == float('inf') else 0.0,\n 'StdErr': float(forecast[1][-1]) if not float(\n forecast[1][-1]) == float('inf') else 0.0,\n 'CILower': float(forecast[2][-1][0]) if not float(\n forecast[2][-1][0]) == float('-inf') else 0.0,\n 'CIUpper': float(forecast[2][-1][1]) if not float(\n forecast[2][-1][1]) == float('inf') else 0.0,\n 'ConfLevel': float(1.0 - alpha) * 100,\n 'ExogenousHolidays': include_holidays_exog,\n 'IsAnomaly': bool(anomaly_probability > 1 - alpha),\n 'IsAnomalyExtreme': bool(anomaly_probability > 1 - alpha_extreme),\n 'AnomalyProbability': 1 if raw_actual is None else float(anomaly_probability),\n 'DownAnomalyProbability': 1 if raw_actual is None else float(down_anomaly_probability),\n 'UpAnomalyProbability': 1 if raw_actual is None else float(up_anomaly_probability),\n 'ModelFreshness': model_freshness}\n\n except (LinAlgError, ValueError, LADStructuralError) as e:\n result = {'Success': False,\n 'AdjustedActual': interpolated_actual,\n 'ErrorMessage': str(e)}\n\n return result", "def preprocess(self, X):\n X = X.copy()\n predictor_subset = self.predictor_subset.copy()\n if 'all' in predictor_subset:\n predictor_subset = add_all_predictors(predictor_subset, X.columns)\n \n use_temporal = 'temporal' in predictor_subset\n if use_temporal:\n X_temporal = get_temporal_predictors(\n X['TIMESTAMP_END']\n )\n predictor_subset.remove('temporal')\n\n X = X[predictor_subset]\n\n if use_temporal:\n X = pd.concat([X, X_temporal], axis=1)\n\n if 'WD' in predictor_subset:\n X = process_wind_direction_predictor(X)\n\n return X", "def fit(self, train):\n model = ARIMA(train, order=(self.p, self.d, self.q))\n model_fit = model.fit(disp=0)\n output = model_fit.forecast()\n nextDaysPred = output[0][0]\n return nextDaysPred", "def get_prediction(amazon_id, df):\n\n # convert values to pd DataFrame\n df, predict_window = process_data(df)\n\n #check if model for the product exists and use it, otherwise create new one\n if(not os.path.exists(get_model_path(amazon_id))):\n\n #process data\n dataset_train, dataset_test = split_dataset(df, predict_window)\n\n #transform data\n dataset_train, dataset_test = transform_data(dataset_train, dataset_test)\n\n #scale data\n x_train, y_train, x_test, y_test = reshape_datasets(dataset_train, dataset_test, predict_window)\n\n #create model\n model = get_model(x_train)\n\n #fit data \n model.fit(x_train, y_train, epochs=500, batch_size=512)\n\n #save model \n model.save(get_model_path(amazon_id))\n\n #clear the session\n Clear.clear_session()\n \n if os.path.exists(get_model_path(amazon_id)):\n\n #upload model\n model = load_model(get_model_path(amazon_id)) \n\n #run scaler\n scaler.fit(df)\n\n #get inputs for prediction\n inputs = df[len(df) - predict_window:]\n\n #transform inputs\n inputs = scaler.transform(inputs)\n\n # Slide the window forward by one, so the last predicted value now becomes the head of \n # the new window and predict the next, slide again, and so on\n for i in range(predict_window):\n x_predict = []\n\n x_predict.append(inputs[i:i + predict_window,0])\n x_predict = np.array(x_predict)\n x_predict = np.reshape(x_predict, (x_predict.shape[0], x_predict.shape[1],1))\n nextPrice = model.predict(x_predict)\n \n inputs = np.append(inputs, nextPrice, axis=0)\n\n #inverse transformation we did\n predictions = scaler.inverse_transform(inputs[predict_window:])\n\n #convert numpy array to python list\n python_list = get_python_list(predictions)\n\n #clear the session after creating or loading model\n Clear.clear_session()\n\n return python_list", "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.opt.num_epochs):\n self.run_epoch()\n if (self.epoch + 1) % self.opt.save_frequency == 0:\n self.save_model()", "def ml_pred_post(ticker=None, stock_last_day=None, train_pred_df=None, val_pred_df=None, test_pred_df=None, \nX_features=None, y_features=None, n_past_days=90, n_future_days=5, loss='mean_squared_error', lr=0.01, epochs=12, batch_size=32, RMSE_train_pred=None, RMSE_val_pred=None):\n\n try:\n train_pred_df_dict = train_pred_df.to_dict(orient='records')\n val_pred_df_dict = val_pred_df.to_dict(orient='records')\n test_pred_df_dict = test_pred_df.to_dict(orient='records')\n \n n_X_features = len(X_features)\n n_y_features = len(y_features)\n\n pred_post_to_upload = {\n 'Stock': ticker,\n 'Datetime': stock_last_day,\n 'X_features': X_features,\n 'y_features': y_features,\n 'n_past_days': n_past_days,\n 'n_future_days': n_future_days,\n 'ML Model': {\n 'model': 'LSTM',\n 'parameters': {\n 'layers': {'LSTM_1 units': 64,\n 'LSTM_1 input_shape': (n_past_days, n_X_features),\n 'LSTM_2 units': 32,\n 'Dropout': 0.2,\n 'Dense units': n_future_days*n_y_features\n },\n 'compile': {\n 'optimizer': 'Adam',\n 'loss': str(loss),\n 'lr': lr\n },\n 'fit': {\n 'epochs': epochs,\n 'batch_size': batch_size\n } \n },\n\n },\n 'RMSE_train_pred': float(RMSE_train_pred),\n 'RMSE_val_pred': float(RMSE_val_pred),\n 'train_pred': train_pred_df_dict,\n 'val_pred': val_pred_df_dict,\n 'test_pred': test_pred_df_dict\n }\n return pred_post_to_upload\n\n except Exception as e:\n print('Something went wrong...')\n print(f'str(e) = {str(e)}')\n # print(f'repr(e) = {repr(e)}')\n\n # return pred_post_to_upload", "def train(self):\n for data_tier in self.data_tiers:\n fd = open(self.data_path + '/training_data_' + data_tier + '.json', 'r')\n self.preprocessed_data[data_tier] = json.load(fd)\n fd.close()\n tot = len(self.preprocessed_data[data_tier]['features'])\n p = int(math.ceil(tot*0.8))\n training_features = np.array(self.preprocessed_data[data_tier]['features'][:p])\n trend_training_classifications = np.array(self.preprocessed_data[data_tier]['trend_classifications'][:p])\n avg_training_classifications = np.array(self.preprocessed_data[data_tier]['avg_classifications'][:p])\n t1 = datetime.datetime.utcnow()\n self.clf_trend[data_tier].fit(training_features, trend_training_classifications)\n self.clf_avg[data_tier].fit(training_features, avg_training_classifications)\n t2 = datetime.datetime.utcnow()\n td = t2 - t1\n self.logger.info('Training %s for data tier %s took %s', self.name, data_tier, str(td))\n joblib.dump(self.clf_trend[data_tier], self.data_path + '/' + self.name + '_trend_' + data_tier + '.pkl')\n joblib.dump(self.clf_avg[data_tier], self.data_path + '/' + self.name + '_avg_' + data_tier + '.pkl')", "def eval_model(config, period, test_data):\n if config.network == 'MLPwithGAN':\n model = MLPwithGAN(config)\n elif config.network == 'MLP':\n model = MLP(config)\n elif config.network == 'LSTM':\n model = VanillaLSTM(config)\n elif config.network == 'CNN':\n model = CNNfeature(config)\n else:\n raise Exception('Unknown model type:{}'.format(config.network))\n\n if config.ensemble:\n m = model\n model = []\n\n for i in glob(gen_path(config.path, str(period)) + '/m*'):\n m.load_state_dict(\n torch.load(gen_path(i, filename=config.network + '.pkl')))\n m.to(config.device)\n m.eval()\n model.append(m)\n else:\n model.load_state_dict(\n torch.load(gen_path(config.path, str(period), 'model', filename=config.network + '.pkl')))\n model.to(config.device)\n model.eval()\n dataloader_test = test_data[0]\n test_date = test_data[1]\n test_symbol = test_data[2]\n sc_y = joblib.load(gen_path(config.path, str(period), 'scaler', filename='training_sc_y.pkl'))\n predict_y_test, real_y_test, valid_index_test = make_prediction(dataloader_test, sc_y, model, config)\n\n stock_score = pd.DataFrame()\n stock_score[\"symbol\"] = test_symbol[valid_index_test]\n stock_score[\"score\"] = predict_y_test\n stock_score['truth'] = real_y_test\n stock_score[\"date\"] = test_date[valid_index_test]\n stock_score = stock_score.sort_values(by=[\"date\"])\n stock_score.to_csv(gen_path(config.path, 'stock_score', filename=str(period) + '.csv'), index=False)", "def train(self, training_data):\n pass", "def train(self, data):\n pass", "def train(model, X_train, X_val, Y_train, Y_val, callbacks, datagen, batch_size, epochs):\n \n print(\"[INFO] Training model...\")\n print(\"[INFO[ Training data shape: {0}\".format(X_train.shape))\n\n before = time.time()\n\n history = model.fit_generator(\n datagen.flow(X_train, Y_train, batch_size=batch_size),\n steps_per_epoch=len(X_train)/batch_size, # entire training set is used per epoch\n epochs=epochs,\n verbose=1,\n validation_data=(X_val, Y_val),\n callbacks=callbacks)\n\n after = time.time()\n time_elapsed = after - before / 60 # return time elapsed in minutes\n print(\"[INFO] Training time elapsed: {:.2f}mins\".format(time_elapsed))\n\n return history", "def make_forecast(chain, train_data, len_forecast: int, max_window_size: int):\n\n # Here we define which task should we use, here we also define two main\n # hyperparameters: forecast_length and max_window_size\n task = Task(TaskTypesEnum.ts_forecasting,\n TsForecastingParams(forecast_length=len_forecast,\n max_window_size=max_window_size,\n return_all_steps=False,\n make_future_prediction=True))\n\n # Prepare data to train the model\n train_input = InputData(idx=np.arange(0, len(train_data)),\n features=None,\n target=train_data,\n task=task,\n data_type=DataTypesEnum.ts)\n\n # Make a \"blank\", here we need just help FEDOT understand that the\n # forecast should be made exactly the \"len_forecast\" length\n predict_input = InputData(idx=np.arange(0, len_forecast),\n features=None,\n target=None,\n task=task,\n data_type=DataTypesEnum.ts)\n\n available_model_types_primary = ['linear', 'ridge', 'lasso',\n 'dtreg', 'knnreg']\n\n available_model_types_secondary = ['linear', 'ridge', 'lasso', 'rfr',\n 'dtreg', 'knnreg', 'svr']\n\n composer_requirements = GPComposerRequirements(\n primary=available_model_types_primary,\n secondary=available_model_types_secondary, max_arity=5,\n max_depth=3, pop_size=10, num_of_generations=12,\n crossover_prob=0.8, mutation_prob=0.8,\n max_lead_time=datetime.timedelta(minutes=5),\n add_single_model_chains=True)\n\n metric_function = MetricsRepository().metric_by_id(\n RegressionMetricsEnum.RMSE)\n builder = GPComposerBuilder(task=task).with_requirements(\n composer_requirements).with_metrics(metric_function).with_initial_chain(\n chain)\n composer = builder.build()\n\n obtained_chain = composer.compose_chain(data=train_input,\n is_visualise=False)\n obtained_chain.__class__ = TsForecastingChain\n\n print('Obtained chain')\n obtained_models = []\n for node in obtained_chain.nodes:\n print(str(node))\n obtained_models.append(str(node))\n depth = int(obtained_chain.depth)\n print(f'Глубина цепочки {depth}')\n\n # Fit it\n obtained_chain.fit_from_scratch(train_input)\n\n # Predict\n predicted_values = obtained_chain.forecast(initial_data=train_input,\n supplementary_data=predict_input).predict\n\n return predicted_values, obtained_models, depth", "def run_training(\n self, model_data: RasaModelData, label_ids: Optional[np.ndarray] = None\n ) -> None:\n if not self.finetune_mode:\n # This means the model wasn't loaded from a\n # previously trained model and hence needs\n # to be instantiated.\n self.model = self.model_class()(\n model_data.get_signature(),\n self.config,\n isinstance(self.featurizer, MaxHistoryTrackerFeaturizer),\n self._label_data,\n self._entity_tag_specs,\n )\n self.model.compile(\n optimizer=tf.keras.optimizers.Adam(self.config[LEARNING_RATE])\n )\n (\n data_generator,\n validation_data_generator,\n ) = rasa.utils.train_utils.create_data_generators(\n model_data,\n self.config[BATCH_SIZES],\n self.config[EPOCHS],\n self.config[BATCH_STRATEGY],\n self.config[EVAL_NUM_EXAMPLES],\n self.config[RANDOM_SEED],\n )\n callbacks = rasa.utils.train_utils.create_common_callbacks(\n self.config[EPOCHS],\n self.config[TENSORBOARD_LOG_DIR],\n self.config[TENSORBOARD_LOG_LEVEL],\n self.tmp_checkpoint_dir,\n )\n\n if self.model is None:\n raise ModelNotFound(\"No model was detected prior to training.\")\n\n self.model.fit(\n data_generator,\n epochs=self.config[EPOCHS],\n validation_data=validation_data_generator,\n validation_freq=self.config[EVAL_NUM_EPOCHS],\n callbacks=callbacks,\n verbose=False,\n shuffle=False, # we use custom shuffle inside data generator\n )", "def __init__(self, pandas_dataframe, dates_column, target_column, regressors=None, train_test_split=0.66, seed=7,\n look_back=1, look_forward=1, interval=0):\n data = pd.DataFrame(index=pandas_dataframe[dates_column].values, data=pandas_dataframe[target_column].values)\n # Calculate the training set size\n train_size = int(len(data)*train_test_split)\n # Scale the data pre-train/test split\n scaler = MinMaxScaler(feature_range=(0, 1))\n self.scaler = scaler\n data = scaler.fit_transform(data)\n # Get the time series as stationary (for the given interval, if 0 don't make it a series of 0)\n if interval > 0:\n data = difference(data, interval)\n # Map the series to a supervised problem (values for days 1-n with regressors for these days to predict days\n # n + 1 ... n + k\n x, y = timeseries_to_supervised(data, look_back=look_back, look_forward=look_forward)\n # Split train and test\n self.x_train, self.y_train = x[:train_size], y[:train_size]\n self.x_test, self.y_test = x[train_size:], y[train_size:]\n # Use regressors if required\n if regressors is not None:\n self.x_train, self.x_test = add_regressors(self.x_train, self.x_test, regressors, pandas_dataframe,\n dates_column, look_forward, look_back)\n # Set last attributes\n self.seed = seed\n self.look_back = look_back\n self.look_forward = look_forward\n self.regressors = regressors", "def tune_model(self):\n model = model_from_json(open(self.config.model_name).read())\n model.compile(loss='mse', optimizer='adam')\n model.load_weights(self.config.save_path)\n history = self.model.fit_generator(generator=self.train, \n samples_per_epoch=self.train_len, nb_epoch=self.config.epochs,)\n self.model.save_weights(self.config.save_path)\n return history", "def walkforward_validation(data, test_start_date, test_end_date=None, step_size=15, testsize=15, model='SARIMA'):\n test_start_date = pd.to_datetime(test_start_date)\n current_max_date = test_start_date\n\n modelling_results = pd.DataFrame(columns=['series_name', 'model_type', 'test_start', 'test_end', 'MAE', 'MAPE', 'RMSE'])\n\n if test_end_date is None:\n test_end_date = data.index.max()\n test_end_date = pd.to_datetime(test_end_date)\n else:\n test_end_date = pd.to_datetime(test_end_date)\n\n while current_max_date < test_end_date:\n data.index = pd.to_datetime(data.index)\n iter_data = data[data.index <= current_max_date + timedelta(days=testsize)]\n test, train = test_train_spl(iter_data, testsize=testsize)\n\n if (model.upper() == 'SARIMA') | (model.upper() == 'SARIMAX'):\n print('USING SARIMA MODEL')\n mae, rmse, mape, name, preds, conf_intervals = mod_sarima(train=train, test=test, **arima_model_params)\n elif model.upper() == 'PROPHET':\n print('USING PROPHET MODEL')\n mae, rmse, mape, name, preds, conf_intervals = mod_prophet(train=train, test=test, **prophet_model_params)\n else:\n print('model name not known')\n iter_results = pd.DataFrame({'series_name': name, 'model_type': model, 'test_start': [current_max_date],\n 'test_end': [current_max_date + timedelta(testsize)], 'MAE': [mae], 'MAPE': [mape], 'RMSE': [rmse]})\n modelling_results = modelling_results.append(iter_results, ignore_index=True)\n\n # this line is just for validation of the effect of regressors in the forecast\n preds.to_csv(mod_report_path + arima_model_params['name'] + 'forecast_' + str(current_max_date).replace(':', '')+ '.csv')\n\n current_max_date = current_max_date + timedelta(days=step_size)\n\n return modelling_results", "def train_model(self, *args, **kwargs):\n raise NotImplementedError", "def predict(self, instances):\r\n raise NotImplementedError", "def init_model(n_factors, n_dates, n_tickers):\n date = tf.keras.Input((1,), name=\"date\", dtype=\"int32\")\n ticker = tf.keras.Input((1,), name=\"ticker\", dtype=\"int32\")\n\n # learnable table of date -> factor returns\n date_embedded = tf.keras.layers.Embedding(\n n_dates, n_factors, name=\"date_embedding\"\n )(date)\n\n # learnable table of ticker -> factor loadings\n ticker_embedded = tf.keras.layers.Embedding(\n n_tickers, n_factors, name=\"ticker_embedding\"\n )(ticker)\n\n pred = tf.keras.layers.Reshape((1,))(\n tf.keras.layers.Dot(axes=-1)([date_embedded, ticker_embedded])\n )\n\n model = tf.keras.Model(inputs=[date, ticker], outputs=pred)\n model.compile(\"Adagrad\", \"mse\")\n return model", "def predict(model, X_testing):\n predictions = model.predict(X_testing)\n\n return predictions", "def _forecast(model, predict_steps):\n\n pred_uc = model.get_forecast(steps=predict_steps)\n\n # Produce the forecasted tables\n predicted_mean_df = pred_uc.predicted_mean.reset_index()\n predicted_mean_df.columns = ['Date', 'Predicted_Mean']\n return predicted_mean_df", "def train_predictors(market_data, functions_for_typical_price_data, functions_for_hlc_price_data, labels_for_typical_price_data, labels_for_hlc_price_data):\r\n standard = {}\r\n # high = market_data.loc[:, 'high'].values.tolist()\r\n # low = market_data.loc[:, 'low'].values.tolist()\r\n # close = market_data.loc[:, 'close'].values.tolist()\r\n volume = market_data.loc[:, 'volume'].values\r\n # typical_prices = typical_price(high, low, close)\r\n typical_prices = market_data.loc[:, 'weightedAverage'].values\r\n standard['volume'] = (np.nanmean(volume), np.nanstd(volume))\r\n standard['typical_price'] = (np.nanmean(typical_prices), np.nanstd(typical_prices))\r\n x = ((volume - standard['volume'][0])/standard['volume'][1])\r\n x = np.c_[(typical_prices - standard['typical_price'][0])/standard['typical_price'][1], x]\r\n typical_prices = typical_prices.tolist()\r\n for f, label in zip(functions_for_typical_price_data, labels_for_typical_price_data):\r\n values = np.array(f(typical_prices))\r\n standard[label] = (np.nanmean(values), np.nanstd(values))\r\n x = np.c_[x, (values - standard[label][0])/standard[label][1]]\r\n # for f, label in zip(functions_for_hlc_price_data, labels_for_hlc_price_data):\r\n # values = np.array(f(high, low, close))\r\n # if 'typical_price' in label and label != 'typical_price':\r\n # standard[label] = standard['typical_price']\r\n # else:\r\n # standard[label] = (np.nanmean(values), np.nanstd(values))\r\n # x = np.c_[x, (values - standard[label][0])/standard[label][1]]\r\n return pd.DataFrame(data=x, columns=['typical_price', 'volume']+labels_for_typical_price_data, index=market_data.index), standard", "def fit_model(x_train, y_train, model=None):\n if not model:\n model = create_general_lstm_model(x_train.shape[1], x_train.shape[2])\n\n history = model.fit(\n x_train,\n y_train,\n epochs=100,\n batch_size=64,\n validation_split=0.1,\n callbacks=[\n keras.callbacks.EarlyStopping(monitor=\"val_loss\", patience=10, mode=\"min\")\n ],\n shuffle=False,\n )\n\n return model, history", "def predict(self):\n for track in self.tracks:\n track.predict(self.kf)\n #track.du_doan(self.kf_test)" ]
[ "0.6817948", "0.66349447", "0.6473964", "0.6437722", "0.6398785", "0.63897544", "0.6377936", "0.6365075", "0.6336138", "0.62935156", "0.62524164", "0.6220017", "0.6206941", "0.6185454", "0.61810833", "0.6175819", "0.6175819", "0.6175819", "0.6175819", "0.6175819", "0.61737525", "0.61731225", "0.6172217", "0.61644065", "0.6152767", "0.6138689", "0.61055976", "0.60816693", "0.6074132", "0.6069845", "0.6062239", "0.6048742", "0.60471714", "0.6030376", "0.60171455", "0.59959596", "0.5979451", "0.59734523", "0.5968748", "0.59682274", "0.59667146", "0.5965759", "0.5942116", "0.59347516", "0.59309536", "0.5929371", "0.5928695", "0.5925541", "0.59250265", "0.5912921", "0.5874513", "0.58725315", "0.58625597", "0.58559895", "0.58553386", "0.58453465", "0.5843418", "0.5837906", "0.58265626", "0.581741", "0.5815897", "0.5815897", "0.58158684", "0.58144367", "0.5813008", "0.5812886", "0.5806833", "0.58013517", "0.57980645", "0.5796125", "0.57881564", "0.57754844", "0.5774137", "0.577198", "0.5763709", "0.5755757", "0.57545686", "0.5754119", "0.5742697", "0.574103", "0.5740654", "0.57354116", "0.57345104", "0.5731678", "0.5728249", "0.5726301", "0.57243955", "0.5719671", "0.5717539", "0.5715979", "0.5714908", "0.571153", "0.57077795", "0.5704223", "0.570225", "0.5699848", "0.569932", "0.5698867", "0.56962013", "0.56958157", "0.56932753" ]
0.0
-1
Visualize a particular column of Y_pred anf Y_test for a particular series
def visualize_pred(y_test, y_pred, test_seq, window_out, num_plots, num_win_ser, cols_y, col_idx): ser_idx = [i for i in range(0, len(y_test), num_win_ser)] if num_plots > len(ser_idx): print("Too many plots, reduce the mumber") else: indx = ser_idx[0:num_plots] days = range(num_win_ser) for idx in indx: CR = test_seq[idx][0][0][3] pred = y_pred[idx : idx+num_win_ser, window_out -1, col_idx] true = y_test[idx : idx+num_win_ser, window_out -1, col_idx] plt.title("Y_True V/S Y_Pred, CR: "+ str(CR)) plt.xlabel('Days') plt.ylabel(cols_y[col_idx]) plt.plot(days, pred, label = 'Pred') plt.plot(days, true, label = 'True') plt.legend() plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def actual_pred_plot(preds):\r\n actual_pred = pd.DataFrame(columns=['Cost', 'prediction'])\r\n actual_pred['Cost'] = all_data['2020':].iloc[:, -1][1:len(preds) + 1]\r\n actual_pred['prediction'] = preds[:, -1]\r\n\r\n from keras.metrics import MeanSquaredError\r\n m = MeanSquaredError()\r\n m.update_state(np.array(actual_pred['Cost']), np.array(actual_pred['prediction']))\r\n\r\n return m.result().numpy(), actual_pred.plot()", "def plot_observations():\n plt.plot(history.history['loss'], label='training_loss')\n plt.plot(history.history['val_loss'], label='val_loss ')\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n plt.show()\n\n plt.plot(history.history['acc'], label='accuracy')\n plt.plot(history.history['val_acc'], label='val_accuracy')\n plt.xlabel('Epoch')\n plt.ylabel('Accuracy')\n plt.legend(loc='lower right')\n plt.show()\n\n test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)\n print(\"Test Accuracy:\", test_acc)", "def visualize_data(y_test, x_test, window_out, num_plots, num_win_ser, cols_y, col_idx):\n \n \n ser_idx = [i for i in range(0, len(y_test), num_win_ser)]\n if num_plots > len(ser_idx):\n print(\"Too many plots, reduce the mumber\")\n else:\n indx = ser_idx[0:num_plots]\n days = range(num_win_ser)\n for idx in indx:\n CR = x_test[idx][0][3]\n #pred = y_pred[idx : idx+num_win_ser, window_out -1, col_idx]\n true = y_test[idx : idx+num_win_ser, window_out -1, col_idx]\n \n plt.title(\"Y_True, CR: \"+ str(CR))\n plt.xlabel('Days')\n plt.ylabel(cols_y[col_idx])\n \n #plt.plot(days, pred, label = 'Pred')\n plt.plot(days, true, label = 'True')\n \n plt.legend()\n plt.show()", "def plot_actual_vs_predicted_by_equations(df, x_variable, y_variables, plot_title):\n #Plot results\n df.plot(x=x_variable, y=y_variables, title=plot_title)\n plt.show()", "def plotly_table():\n model_data = your_choice()\n model_data[\"test_prediction\"] = list(model_data[\"test_prediction\"])\n \n df = pd.DataFrame(model_data[\"test_prediction\"], columns=[\"test_prediction\"])\n for k,v in model_data.items():\n if k != \"test_prediction\":\n df[k] = str(v)\n\n fig = a_libraries.plotly_table(df)\n\n return fig", "def test_sarima_model(y, y_test, results, **kwargs):\n \n # Get predictions\n pred = results.get_prediction(start=y_test.index.min(), end=y_test.index.max(), **kwargs)\n y_pred = pred.predicted_mean\n pred_ci = pred.conf_int()\n\n # Calculate some metrics and print them out\n rmse = ((y_pred - y_test) ** 2).mean() ** 0.5\n print('Root Mean Squared Error =', rmse)\n \n r2 = r2_score(y_pred, y_test)\n print('R^2 =', r2)\n \n # Graph\n ax = y.plot(label='observed')\n y_pred.plot(ax=ax, label='predicted', alpha=.7, figsize=(15, 8))\n ax.fill_between(pred_ci.index,\n pred_ci.iloc[:, 0],\n pred_ci.iloc[:, 1], color='k', alpha=.2)\n plt.title('Average Monthly Temperature: Observed vs. Predicted')\n ax.set_xlabel('Date')\n ax.set_ylabel('Temperature')\n plt.legend()\n plt.show()", "def plot_test(y_test, y_pred, title = None, xlabel = 'Measured $Y = \\log_2(MIC)$', ylabel = 'Predicted $Y = \\log_2(MIC)$', legend = ['Ideal', 'Result'], groups = None):\n \n fig, ax = plt.subplots(1,1)\n fig.set_figheight(5)\n fig.set_figwidth(5)\n if groups is not None:\n groups_obj = pd.concat([y_test,y_pred], axis=1).groupby(np.array(groups))\n cmap=plt.get_cmap('tab10')\n for name, group in groups_obj:\n # Works only for groups with numeric names that are max cmap length:\n ax.plot(group.iloc[:,0], group.iloc[:,1], marker=\"o\", linestyle=\"\", label=int(name), color = cmap.colors[int(name)])\n ax.legend()\n else:\n ax.scatter(y_test,y_pred, color = 'red')\n ax_max = 10\n if np.max(y_test.values)>ax_max:\n ax_max = np.max(y_test).values\n ax_min = 0\n if np.min(y_test.values)<ax_min:\n ax_min = np.min(y_test.values)\n ax.plot([ax_min, ax_max], [ax_min, ax_max], '--', color='black')\n ax.set_aspect('equal', 'box')\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n #plt.savefig(title+'.pdf')\n plt.savefig(title+'.svg')\n #plt.savefig(title+'.png')#, dpi=600)\n #plt.show()", "def visualize_test(test_data_full, test_data, thetas):\n fig, ax = plt.subplots()\n ax.scatter(test_data_full[\"Weight\"], test_data_full[\"Height\"], color='blue')\n ax.plot(test_data_full[\"Weight\"], predict(test_data, thetas[-1]), color='red', linewidth=2)\n return fig", "def plot_pred(y, yhat, name, output_dir):\n ax = pd.DataFrame(y, columns=[\"y%s\" % LOOK_AHEAD]).plot(figsize=(15, 10))\n pd.DataFrame(yhat, columns=[\"yhat%s\" % LOOK_AHEAD]).plot(ax=ax)\n plt.title(\"%s\" % name)\n plt.tight_layout()\n plt.savefig(f\"{output_dir / name}.png\")\n\n pd.DataFrame(y-yhat, columns=[f\"yhat {LOOK_AHEAD}\"]).plot(figsize=(15, 10))\n plt.title(\"diff-%s\" % name)\n plt.tight_layout()\n plt.savefig(f\"{output_dir / name}-diff.png\")", "def plot_scatter(df):\n fig = px.scatter(df, x=\"preds\", y=\"truth\", title=\"Predictions vs True Values\", color=\"mae\")\n wandb.log({f\"Predictions vs True Values\": fig})\n\n # Poor Results\n df = df.query(\"mae > 2\")\n fig = px.scatter(df, x=\"preds\", y=\"truth\", title=\"Predictions vs True Values\", color=\"mae\")\n wandb.log({f\"Predictions vs True Values [mae > 2]\": fig})", "def show_score(clf, X_test, y_test):\n y_pred = predict(clf, X_test)\n print metrics.classification_report(y_test.astype(np.int), y_pred)", "def plot_results(actual_time_series, predicted_values, len_train_data,\n y_name='Parameter'):\n\n plt.plot(np.arange(0, len(actual_time_series)),\n actual_time_series, label='Actual values', c='green')\n plt.plot(np.arange(len_train_data, len_train_data + len(predicted_values)),\n predicted_values, label='Predicted', c='blue')\n # Plot black line which divide our array into train and test\n plt.plot([len_train_data, len_train_data],\n [min(actual_time_series), max(actual_time_series)], c='black',\n linewidth=1)\n plt.ylabel(y_name, fontsize=15)\n plt.xlabel('Time index', fontsize=15)\n plt.legend(fontsize=15)\n plt.grid()\n plt.show()", "def regression_analysis(cls, y_true, y_pred, path=None):\n residual = y_true - y_pred\n print(\"Histogram\")\n cls.histogram(residual, \"Residual\")\n print(\"Scatter\")\n cls.scatter_plot(y_pred, residual, \"pred\", \"residual\", path=path)\n print(\"Scatter\")\n cls.scatter_plot( y_true, y_pred, \"y_test\", \"pred\", path=path)", "def evaluate(self, y_pred, y_test):\n for i in range(len(y_pred)):\n if y_pred[i] == y_test.iloc[i]:\n self.accuracy += 1\n self.accuracy = (self.accuracy/len(y_pred))", "def evaluate_model(model, X_test, Y_test, category_names):\n \n \n yPredictorTest = model.predict(X_test)\n \n for idx, col in enumerate(Y_test):\n print(col, classification_report(Y_test[col], yPredictorTest[:, idx]))", "def evaluate_model(model, X_test, y_test):\n # run prediction with test data\n y_pred = model.predict(X_test)\n\n # print precision, recall and f1-score\n i = 0\n for col in y_test:\n print('Evaluation for \"{}\": \\n {} \\n\\n'.format(col, classification_report(y_test[col], y_pred[:,i])))\n i += 1", "def plot_result(data, gt_y, pred_y):\n assert data.shape[0] == gt_y.shape[0]\n assert data.shape[0] == pred_y.shape[0]\n\n plt.figure()\n\n plt.subplot(1, 2, 1)\n plt.title('Ground Truth', fontsize=18)\n\n for idx in range(data.shape[0]):\n if gt_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.subplot(1, 2, 2)\n plt.title('Prediction', fontsize=18)\n\n for idx in range(data.shape[0]):\n if pred_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.show()", "def plot_result(data, gt_y, pred_y):\n assert data.shape[0] == gt_y.shape[0]\n assert data.shape[0] == pred_y.shape[0]\n\n plt.figure()\n\n plt.subplot(1, 2, 1)\n plt.title('Ground Truth', fontsize=18)\n\n for idx in range(data.shape[0]):\n if gt_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.subplot(1, 2, 2)\n plt.title('Prediction', fontsize=18)\n\n for idx in range(data.shape[0]):\n if pred_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.show()", "def make_results_plot( df, k, reg ):\n\tuid = smalldf['user_id'].values\n\tbid = smalldf['business_id'].values\n\tactual = smalldf['stars'].values\n\tpredicted = np.zeros( len(actual) )\n\tcounter = 0\n\tfor biz_id, user_id in izip( bid, uid ):\n\t\tpredicted[counter] = rating( biz_id, user_id, k = k, reg = reg ) \n\t\tcounter = counter + 1\n\t# compare_results( actual, predicted )", "def report(self, X, y):\n predict = self.model.predict(X)\n\n skplt.estimators.plot_feature_importances(\n self.model, x_tick_rotation=90)\n plt.show()\n\n fig, ax = plt.subplots(figsize=(7, 7))\n sns.scatterplot(x=y, y=predict)\n lims = [\n np.min([ax.get_xlim(), ax.get_ylim()]), # min of both axes\n np.max([ax.get_xlim(), ax.get_ylim()]), # max of both axes\n ]\n ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)\n ax.set_aspect('equal')\n ax.set_xlim(lims)\n ax.set_ylim(lims)\n ax.set_xlabel(\"Observed\")\n ax.set_ylabel(\"Predict\")\n ax.set_title(\"Predict vs. Observed\")\n plt.show()\n\n residuals = y - predict\n\n fig, ax = plt.subplots(figsize=(7, 7))\n sns.scatterplot(x=y, y=residuals)\n plt.title(\"Residuals vs. Observed\")\n plt.xlabel(\"Obserbed\")\n plt.ylabel(\"Residuals\")\n plt.show()\n\n plt.hist(residuals)\n plt.title(\"Residuals distribution\")\n plt.xlabel(\"Residuals value\")\n plt.ylabel(\"Count\")\n plt.show()\n\n display(\n pd.DataFrame({\n \"explained_variance_score\":\n metrics.explained_variance_score(y, predict),\n \"mean_absolute_error\":\n metrics.mean_absolute_error(y, predict),\n \"mean_squared_log_error\":\n metrics.mean_squared_log_error(y, predict),\n \"median_absolute_error\":\n metrics.median_absolute_error(y, predict),\n \"r2_score\":\n metrics.r2_score(y, predict)\n },\n index=[0]))", "def _graph_results(self, X_test, y_test, y_pred):\n if self.regression is None:\n print(\"Regression results aren't available. Have you run linear_regression() yet?\")\n return\n\n if self.attributes.shape[1] > 1:\n print(\"Graphing is supported for one feature only.\")\n return\n\n plt.scatter(X_test, y_test, color=\"black\")\n plt.plot(X_test, y_pred, color=\"blue\", linewidth=3)\n plt.xticks(())\n plt.yticks(())\n plt.show()", "def evaluate_random_forest(y_test, y_pred):", "def evaluate(self):\n predictions = self.model.predict(self.test[0])\n accuracy = accuracy_score(self.test[1], predictions)\n print(\"Accuracy:\", str(accuracy * 100) + \"%\")\n self.plot_results(predictions)", "def illustrate_prediction(model, test_data, test_target):\n selects = np.random.random_integers(0, len(test_data), 16)\n labels = test_target[selects]\n predicts = model.predict(test_data[selects])\n plt.figure()\n for k in range(16):\n plt.subplot(4, 4, k+1)\n plot_face(test_data[selects[k]])\n if predicts[k] == 1:\n plt.title('smile')\n else:\n plt.title('ugly')\n\n if predicts[k] != labels[k]:\n plt.plot([0, 24], [0, 24], 'r', linewidth=2)\n plt.plot([0, 24], [24, 0], 'r', linewidth=2)", "def rmse(y_true, y_pred): # -> Any:\n ...", "def visualizePredictions(testData,knn_predictions):\r\n testData.visualize.scatterPlot('Petal length','Petal width')\r\n testData.dataDict[testData.reference] = knn_predictions\r\n testData.visualize.scatterPlot('Petal length','Petal width')\r\n\r\n pass", "def _plot_good_pred_whitout_reject(self, test: Set, title=None, fig_size=None):\r\n if fig_size is not None:\r\n fig = plt.figure(figsize=fig_size)\r\n else:\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111)\r\n goodclassified_index = []\r\n for idx_preds in range(self.preds.shape[1]):\r\n new_good_index = []\r\n for idx in range(self.preds.shape[0]):\r\n if test.labels[idx] == self.preds[idx, idx_preds]:\r\n new_good_index.append(idx)\r\n if new_good_index:\r\n ax.scatter(test.features[new_good_index, 0], self.features[new_good_index, 1],\r\n label='Good classified top{0}'.format(int(idx_preds + 1)))\r\n goodclassified_index += new_good_index\r\n misclassified = [idx for idx in range(self.preds.shape[0]) if idx not in goodclassified_index]\r\n if misclassified:\r\n ax.scatter(test.features[misclassified, 0], test.features[misclassified, 1],\r\n label='Misclassified', marker='x', c='red')\r\n box = ax.get_position()\r\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\r\n # Put a legend to the right of the current axis\r\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\r\n if title is not None:\r\n ax.set_title(title)\r\n plt.show()", "def parity_plot(y_pred, y_act):\n\n fig = plt.figure(figsize=FIG_SIZE)\n plt.scatter(y_act, y_pred)\n plt.plot([y_act.min(), y_act.max()], [y_act.min(), y_act.max()],\n lw=4, color='r')\n plt.xlabel('Actual')\n plt.ylabel('Predicted')\n\n return fig", "def _plot_experiment(df, axes, metric_name, isTrain):\n # colors: https://stackoverflow.com/questions/42086276/get-default-line-colour-cycle\n ldf = metric_short_to_long(df)\n plotted = \"Train\" if isTrain else \"Val\"\n m = ldf.query(\"stat == 'mse' and metric == @metric_name\")[[\"trial\",\"state\",\"value\"]].rename({\"value\":\"mse\"},axis=1)\n # aggregated\n ax = sns.barplot(x=\"trial\", y=\"mse\", data=m, palette=[u'#1f77b4'], ci=\"sd\", ax=axes[0])\n ax.set_ylabel(\"MSE (log)\")\n ax.set_yscale(\"log\")\n ax.set_title(f\"Aggregated State Errors ({plotted})\")\n ax.set_xlabel(\"Trial Number\")\n\n # individual state plots\n ax = sns.barplot(x=\"trial\", y=\"mse\", hue=\"state\",data=m, ci=\"sd\", ax=axes[1])\n ax.set_ylabel(\"MSE (log)\")\n ax.set_yscale(\"log\")\n ax.set_title(f\"State Error by Trial ({plotted})\")\n ax.set_xlabel(\"Trial Number\")", "def auto_evaluation(model,x_train,y_train,x_test,y_test):\n\n y_train_prediction=model.predict(x_train)\n y_test_prediction=model.predict(x_test)\n\n plt.scatter(y_train,y_train_prediction,c=\"b\",s=1,alpha=0.5)\n plt.scatter(y_test,y_test_prediction,c=\"r\",s=2,alpha=0.5)\n plt.xlabel(\"actual\")\n plt.ylabel(\"predicted\")\n\n print(\"tr R2: {:.2f}\".format(r2_score(y_train_prediction,y_train)))\n print(\"te R2: {:.2f}\".format(r2_score(y_test_prediction,y_test))) \n \n return y_train_prediction,y_test_prediction", "def plot_stats(x_axis, y_axis, df, highlight=[]):\n a, b = df[x_axis], df[y_axis]\n\n X_train, X_test, y_train, y_test = train_test_split(a, b, test_size=0.33, random_state=42)\n\n X_train = np.array(X_train).reshape(-1, 1)\n X_test = np.array(X_test).reshape(-1, 1)\n y_train = np.array(y_train).reshape(-1, 1)\n y_test = np.array(y_test).reshape(-1, 1)\n\n regr = linear_model.LinearRegression()\n\n regr.fit(X_train, y_train)\n\n df[y_axis + \" STD\"] = df[y_axis].apply(lambda a: round((a-df[y_axis].mean())/df[y_axis].std()))\n df[y_axis + \" rank\"] = df[y_axis].rank(ascending=False)\n df[x_axis + \" rank\"] = df[x_axis].rank(ascending=False)\n \n mapper = linear_cmap(field_name=y_axis + \" STD\", palette=brewer[\"RdBu\"][len(df[y_axis + \" STD\"].unique())], \n low=min(df[y_axis + \" STD\"].unique()), high=max(df[y_axis + \" STD\"].unique()))\n \n source = ColumnDataSource(df)\n source2 = ColumnDataSource(df[df[\"Player\"].isin(highlight)])\n \n p = figure(x_range=(df[x_axis].min() - df[x_axis].std(), df[x_axis].max() + df[x_axis].std()), \n y_range=(df[y_axis].min() - df[y_axis].std(), df[y_axis].max() + df[y_axis].std()))\n \n r1 = p.circle(x=x_axis, y=y_axis,\n source=source, size=10, color=mapper, line_color=\"black\", legend_group= y_axis + \" STD\")\n\n p.title.text = y_axis + \" vs. \" + x_axis\n p.title.align = \"center\"\n p.xaxis.axis_label = x_axis\n p.yaxis.axis_label = y_axis\n p.legend.location = 'top_left'\n p.legend.title = \"St. Dev's from Avg \" + y_axis\n p.background_fill_color = \"#dddddd\"\n p.background_fill_alpha = 0.1\n \n line_x = [df[x_axis].min().item() - df[x_axis].std().item(), df[x_axis].max().item() + df[x_axis].std().item()]\n line_y = [(line_x[0]*regr.coef_.item()) + regr.intercept_.item(), (line_x[1]*regr.coef_.item()) + regr.intercept_.item()]\n r2 = p.line(line_x, line_y, line_width=2, color=\"black\")\n\n p.add_tools(HoverTool(renderers=[r1], tooltips=[\n (\"Player\", \"@Player\"),\n (y_axis, \"@{\" + y_axis +\"}{0.000}\"),\n (y_axis + \" Rank\", \"#@{\" + y_axis + \" rank}\"),\n (x_axis, \"@{\" + x_axis +\"}{0}\"),\n (x_axis + \" Rank\", \"#@{\" + x_axis + \" rank}\")]))\n\n \n p.add_tools(HoverTool(renderers=[r2], \n tooltips=[(x_axis, \"$x{0000}\"),\n (\"Predicted \" + y_axis, \"$y\")]))\n \n labels = LabelSet(x=x_axis, \n y=y_axis, text=\"Player\", y_offset=8,\n text_font_size=\"11px\", text_color=\"#555555\",\n source=source2, text_align='center')\n \n p.add_layout(labels)\n\n st.bokeh_chart(p)", "def test(model, X_test, y_test):\n pred, loss = model(X_test, y_test)\n test_pred = np.argmax(pred, axis=1) \n acc = np.mean(np.argwhere(y_test==1)[:,1]==test_pred) \n\n print(\"Test acc is:\\n\", acc) \n return test\n raise NotImplementedError(\"Test method not implemented\")", "def display_comparison(self, X_val, y_val):\n import matplotlib.pyplot as plt\n x = []\n y = []\n for model_tuple in self.model_list:\n x.append(model_tuple[1])\n y.append(model_tuple[0].score(X_val, y_val))\n plt.scatter(x, y)\n plt.show()", "def test_fn(df, fn):\n\n y_pred = []\n y_true = []\n\n for key in df.index:\n y_t, *inputs = df.loc[key]\n y_true.append(y_t)\n y_p = fn(*inputs)\n y_pred.append(y_p)\n\n # linear regression without intercept\n c = np.mean(y_true) / np.mean(y_pred)\n y_pred = np.multiply(y_pred, c)\n\n rmse = np.sqrt(np.mean(np.subtract(y_pred, y_true) ** 2))\n return rmse, y_pred, y_true, c", "def plot_prediction(test_YY, predict_age_month):\n\n\t# PLot-actual vs predicted age from test image\n\tfig, ax = plt.subplots(figsize = (7,7))\n\n\tplt.plot(test_YY, predict_age_month, 'ro')\n\n\tax.plot(test_YY, predict_age_month, 'r.',\n\t\t\t\t\tlabel = 'predictions (xception)-test image')\n\n\tax.plot(test_YY, test_YY, 'b-',\n\t\t\t\t\t\t\t\tlabel = 'actual-test image')\n\n\tax.legend(loc = 'upper right')\n\tax.set_xlabel('Actual Age (Months)')\n\tax.set_ylabel('Predicted Age (Months)')\n\tplt.show()", "def evaluate(y_test, pred_labels):\n \n # Converts one-hot code to a label (the index of 1)\n y_test_labels = np.argmax(y_test, axis=1)\n \n # Compare test labels to predicted labels\n score = accuracy_score(y_test_labels, pred_labels)\n \n return y_test_labels, score", "def evaluate(model, df_result, label='test'):\n\n y_true = df_result['RUL']\n y_hat = df_result['y_hat']\n df_result['breakdown'].replace(0, False, inplace=True) # rsf only takes true or false\n df_result['breakdown'].replace(1, True, inplace=True) # rsf only takes true or false\n\n mse = mean_squared_error(y_true, y_hat)\n rmse = np.sqrt(mse)\n variance = r2_score(y_true, y_hat)\n\n # the concordance index (CI) is interested on the order of the predictions, not the predictions themselves\n # CI can only be measured between individual samples where a censoring or failure event occurred\n # https://medium.com/analytics-vidhya/concordance-index-72298c11eac7#:~:text=The%20concordance%20index%20or%20c,this%20definition%20mean%20in%20practice\n df_result_grouped = df_result.groupby('unit num').last()\n breakdown = df_result_grouped['breakdown']\n y_true = df_result_grouped['RUL']\n y_hat = df_result_grouped['y_hat']\n ci_sk = ci_scikit(breakdown, y_true, y_hat)[0]\n score = nasaScore(y_true, y_hat) # score should be based on the last instance\n # print(f'Number of concordant pairs (scikit-survival): {ci_scikit(breakdown, y_true, y_hat)[1]}')\n # print(f'Number of discordant pairs (scikit-survival): {ci_scikit(breakdown, y_true, y_hat)[2]}')\n # print(f'Number of pairs having tied estimated risks (scikit-survival): {ci_scikit(breakdown, y_true, y_hat)[3]}')\n # print(f'Number of comparable pairs sharing the same time (scikit-survival): {ci_scikit(breakdown, y_true, y_hat)[4]}')\n print('{} set RMSE:{:.2f}, Score:{:.2f}, CI(scikit):{:.4f}, R2:{:.2f}'.format(label, rmse, score, ci_sk, variance))\n result = [model, label, rmse, score, ci_sk, variance]\n return result", "def residual_vs_actual(\n y_true: ArrayLike | str,\n y_pred: ArrayLike | str,\n df: pd.DataFrame | None = None,\n ax: plt.Axes | None = None,\n xlabel: str = r\"Actual value\",\n ylabel: str = r\"Residual ($y_\\mathrm{true} - y_\\mathrm{pred}$)\",\n **kwargs: Any,\n) -> plt.Axes:\n y_true, y_pred = df_to_arrays(df, y_true, y_pred)\n assert isinstance(y_true, np.ndarray)\n assert isinstance(y_pred, np.ndarray)\n ax = ax or plt.gca()\n\n y_err = y_true - y_pred\n\n ax.plot(y_true, y_err, \"o\", alpha=0.5, label=None, mew=1.2, ms=5.2, **kwargs)\n ax.axline(\n [1, 0], [2, 0], linestyle=\"dashed\", color=\"black\", alpha=0.5, label=\"ideal\"\n )\n\n ax.set(xlabel=xlabel, ylabel=ylabel)\n ax.legend(loc=\"lower right\")\n\n return ax", "def evaluate(self, X_test, y_test):\n y_pred = self.pipeline.predict(X_test)\n return compute_rmse(y_pred, y_test)", "def score(self, y_true, y_pred):\r\n pass", "def evaluate(self, X_test, y_test):\n y_pred_train = self.pipeline.predict(self.X)\n mse_train = mean_squared_error(self.y, y_pred_train)\n rmse_train = np.sqrt(mse_train)\n \n self.mlflow_log_metric('rmse_train', rmse_train)\n \n y_pred_test = self.pipeline.predict(X_test)\n mse_test = mean_squared_error(y_test, y_pred_test)\n rmse_test = np.sqrt(mse_test)\n self.mlflow_log_metric('rmse_test', rmse_test)\n \n return (round(rmse_train, 3) ,round(rmse_test, 3))", "def make_predictions(model, x_test, y_test):\r\n preds = model.predict(x_test)\r\n y_hat = np.argmax(preds, axis=-1)\r\n print(type(y_test))\r\n y_test.columns = [0, 1]\r\n y = y_test.idxmax(axis=1)\r\n print(y_hat.shape)\r\n print(y.shape)\r\n return y_hat, y", "def check_model_performances(X,Y, model,show=False):\n #model.fit(X, Y)\n predictions = model.predict(X)\n \n predictions = predictions#.reshape(-1,1)\n \n # ######## Computes MSE ####### \n MSE = mean_squared_error(Y, predictions)\n print(f'\\nMSE : {MSE}')\n \n # ######## Computes R2 ####### \n R2 = r2_score(Y, predictions)\n print(f'R2 : {R2}')\n \n # ######## Plot Model predictions vs. target ####### \n if show:\n fig = go.Figure()\n \n fig.add_trace(go.Scatter(y=Y,\n mode='lines',\n name='target'))\n fig.add_trace(go.Scatter(y=predictions\n ,\n mode='lines',\n name='predictions'))\n \n fig.show()", "def tpr(y_true, y_pred):\n return recall(y_true, y_pred)", "def evaluate_model(model, X_test, Y_test, category_names):\n Y_pred = model.predict(X_test)\n\n for i, column in enumerate(category_names):\n y_true = Y_test.values[:, i]\n y_pred = Y_pred[:, i]\n target_names = ['not {}'.format(column), '{}'.format(column)]\n print(classification_report(\n y_true, y_pred, target_names=target_names))", "def __call__(self, y_true: np.ndarray, y_pred: np.ndarray) -> float:", "def score(y_values):\n y_act = y_values[:,0]\n y_pred = y_values[:,1]\n return (y_act==y_pred).mean()*100", "def plot_results(self, predictions: list):\n fig, ax = plt.subplots()\n cm = confusion_matrix(self.test[1], predictions)\n conf = confusion_matrix(self.test[1], predictions).ravel()\n nbr_labels = len(set(self.test[1]))\n cm = conf.reshape(nbr_labels, nbr_labels)\n sns.heatmap(cm, annot=True, fmt=\"d\", cmap=\"Spectral\")\n ax.set_xlabel(\"predicted label\")\n ax.set_ylabel(\"true label\")\n fig.savefig(\"confusion_matrix\")\n\n fig, ax = plt.subplots()\n x = self.train[0] + self.test[0]\n y = self.train[1] + self.test[1]\n x = [i[0] for i in x]\n y = [i for i in y]\n results = pd.DataFrame({\"polarity strength\": x, \"true label\": y})\n sns.boxplot(data=results, x=\"true label\", y=\"polarity strength\")\n fig.savefig(\"boxplot\")", "def eva_regress(y_true, y_pred):\n\n mape = MAPE(y_true, y_pred)\n vs = metrics.explained_variance_score(y_true, y_pred)\n mae = metrics.mean_absolute_error(y_true, y_pred)\n mse = metrics.mean_squared_error(y_true, y_pred)\n r2 = metrics.r2_score(y_true, y_pred)\n print('explained_variance_score:%f' % vs)\n print('mape:%f%%' % mape)\n print('mae:%f' % mae)\n print('mse:%f' % mse)\n print('rmse:%f' % np.sqrt(mse))\n print('r2:%f' % r2)", "def _evaluate(self, y_true, y_pred):\n pass", "def mse(y_pred, y):\n return np.mean((y - y_pred)**2)", "def test(self):\n y_list = []\n y_hat_list = []\n for ex_dict in ut.TEST_LIST:\n y_list.append(ex_dict[1])\n y_hat_list.append(self.predict(ex_dict[0]))\n acc = ut.compute_accuracy(y_hat_list, y_list)\n return y_hat_list, acc", "def evaluate_test(model, history, class_labels, train_X, test_X, train_y, test_y):\n train_loss, train_acc = model.evaluate(train_X, train_y, verbose=0)\n test_loss, test_acc = model.evaluate(test_X, test_y, verbose=0)\n print('Accuracy \\n Train: %.3f, Test: %.3f' % (train_acc, test_acc))\n print('Loss \\n Train: %.3f, Test: %.3f \\n' % (train_loss, test_loss))\n # plot loss during training\n plt.subplots_adjust(hspace = .5, wspace = 0.5)\n plt.subplot(211)\n plt.title('Loss', weight='bold')\n plt.plot(history.history['loss'], label='train')\n plt.plot(history.history['val_loss'], label='val')\n plt.legend()\n # plot accuracy during training\n plt.subplot(212)\n plt.title('Accuracy', weight='bold')\n plt.plot(history.history['acc'], label='train')\n plt.plot(history.history['val_acc'], label='val')\n plt.legend()\n plt.show()\n print('\\n')\n # predict probabilities for test set\n yhat_probs = model.predict(test_X, verbose=0)\n # predict classes for test set\n yhat_classes = model.predict_classes(test_X, verbose=0)\n # reduce to 1d array\n yhat_probs = yhat_probs[:, 0]\n yhat_classes = yhat_classes[:, 0]\n # calculate metrics\n report = metrics.classification_report(test_y, yhat_classes, target_names=class_labels)\n confusion_matrix = metrics.confusion_matrix(test_y, yhat_classes)\n plot_confusion_matrix(confusion_matrix, class_labels)\n print('\\n')\n return report", "def eval_prediction(df, print_all_wrong=False):\n incorrect_df = df.loc[~df['prediction_correct']].copy()\n print(f'Overall correctness: {(1 - len(incorrect_df) / len(df)) * 100:5.2f} %')\n\n print('\\nCorrectness per category:')\n\n def fm(x):\n series = pd.Series(data=len(x.loc[x['prediction_correct']]) / len(x),\n index=['corr %'])\n\n res = df.groupby('true_label').apply(fm)\n print(res)\n\n print('\\nHighest confidence for wrong predictions per category:')\n res = incorrect_df \\\n .loc[:, ['predicted_label', 'true_label', 'confidence']] \\\n .groupby(['predicted_label', 'true_label']) \\\n .max()\n print(res)\n\n # show all confidence distribution for wrong predictions\n if print_all_wrong:\n print('\\nConfidence distribution for wrong predictions:')\n sorted_df = incorrect_df \\\n .sort_values(by='confidence', ascending=False)\n sorted_df.reset_index(inplace=True, drop=True)\n\n for row in sorted_df.itertuples():\n s = f'{str(row[0]).rjust(4)} '\n s += ', '.join([f'{row[idx]:5.9f}' for idx in range(1, 6)])\n s += f', {row.predicted_label}, {row.true_label}, {row.max_confidence:5.9f}'\n print(s)", "def visualize_test_results(X, y, pred, signnames):\n assert(X.shape[0] == 14)\n nrows = 2\n ncols = 7\n nlabels = 43\n fig, axes = plt.subplots(nrows = 2 * nrows, ncols = ncols, figsize = (10, 10))\n for i in range(nrows):\n for j in range(ncols):\n aximg = axes[2*i, j]\n axprobs = axes[2*i + 1, j]\n idx = i*ncols + j\n\n img = X[idx]\n aximg.imshow(img)\n aximg.set_axis_off()\n\n probs = pred[idx]\n label = y[idx]\n colors = probs.shape[0] * [\"red\"]\n colors[label] = \"green\"\n\n n_top = 5\n topindices = sorted(np.arange(probs.shape[0]), key = lambda i: probs[i])[-n_top:]\n topprobs = probs[topindices]\n topcolors = [colors[i] for i in topindices]\n ypos = np.arange(n_top)\n axprobs.barh(ypos, topprobs, color = topcolors)\n axprobs.set_yticks(ypos)\n for ypos, l in zip(ypos, topindices):\n axprobs.text(0.025, ypos, textwrap.fill(signnames[l], 20), fontsize = 6)\n axprobs.set_axis_off()\n fig.savefig(os.path.join(img_dir, \"test_results.png\"))", "def investigate_data(training_data):\n return sns.pairplot(training_data.sample(100), hue=\"status\")", "def test_y(self):\n g = gca()\n lines = g.get_lines() \n self.assertEqual(lines[0].get_ydata().tolist(), [3, 3, 1, 1, 3])", "def plot_predictions(self):\n\n plt.title(\"Targets vs. Predictions\")\n plt.plot(self.T, label=\"Targets\")\n plt.plot(self.Y, label=\"Predictions\")\n plt.xlabel(\"Sample number\")\n plt.legend()\n plt.show()", "def evaluate_model(model, X_test, Y_test, category_names):\n \n y_preds = model.predict(X_test)\n predictions = pd.DataFrame(data=y_preds, columns=Y_test.columns, index=Y_test.index)\n for col in Y_test.columns:\n print(classification_report(predictions[col],Y_test[col]))", "def get_ytrue_ypred(model, datagen): \n y_true = np.array([])\n\n for i in range(len(datagen)):\n y_true = np.append(y_true, datagen[i][1])\n \n y_pred = model.predict(datagen)\n \n y_true1 = y_true + 15\n y_pred1 = (y_pred +15).reshape(-1)\n true_pred_df = pd.DataFrame({'y_true':y_true1, 'y_pred':y_pred1})\n true_pred_df['mae'] = np.abs(true_pred_df.y_true - true_pred_df.y_pred)\n \n return y_true1, y_pred1, true_pred_df", "def plot_y_test_means(y_test_means, out_dir, response_name, interactive_run=True):\n plt.rcParams['svg.fonttype'] = 'none'\n x_label = 'Optimization Step'\n y_label = f'Mean {response_name} in Test Set'\n\n plt.plot([_ for _ in range(len(y_test_means))], y_test_means,\n marker='s', markerfacecolor='m', markeredgecolor='black', \n c='m', markersize=0.1,\n markeredgewidth=0.01)\n plt.xticks(fontsize=24)\n plt.yticks(fontsize=24)\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n\n if interactive_run:\n plt.show()\n else:\n if not isdir(out_dir):\n mkdir(out_dir)\n out_fpath = join(out_dir, 'y_test_means-plot.svg')\n print(f'Saving to {out_fpath}')\n plt.savefig(out_fpath)\n plt.clf()", "def test(self, X, y):\n\t\tself.test_X = X\n\t\tself.test_y = y\n\n\t\tclassifier = self.classifier.fit(self.X, self.y)\n\t\ty_pred = classifier.predict(X) \t\t\t# class prediction\n\t\ty_prob = classifier.predict_proba(X)\t# probability of each class\n\t\tself.test_metrics = ModelMetrics(classifier, y, y_pred, y_prob, 'holdout')", "def evaluate_model(model, X_test, Y_test, category_names):\n# Print out Precision , recall F1_score and support for each column using classification_report function\n y_pred_test = model.predict(X_test)\n print(classification_report(Y_test, y_pred_test, target_names=category_names))", "def _plot_good_pred_whit_reject(self, test: Set, title=None, fig_size=None):\r\n if fig_size is not None:\r\n fig = plt.figure(figsize=fig_size)\r\n else:\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111)\r\n goodclassified_index = []\r\n for idx_preds in range(self.preds.shape[1] - 1):\r\n new_good_index = []\r\n for idx in range(self.preds.shape[0]):\r\n if self.preds[idx][0][idx_preds] == test.labels[idx] and \\\r\n self.preds[idx][1][idx_preds] != self.preds[idx][1][idx_preds + 1]:\r\n new_good_index.append(idx)\r\n if new_good_index:\r\n ax.scatter(test.features[new_good_index, 0], self.features[new_good_index, 1],\r\n label='Good classified top{0}'.format(int(idx_preds + 1)))\r\n goodclassified_index += new_good_index\r\n new_good_index = []\r\n for idx in range(self.preds.shape[0]):\r\n if self.preds[idx][0][-1] == test.labels[idx]:\r\n new_good_index.append(idx)\r\n if new_good_index:\r\n ax.scatter(test.features[new_good_index, 0], self.features[new_good_index, 1],\r\n label='Good classified top{0}'.format(int(self.preds.shape[1])))\r\n goodclassified_index += new_good_index\r\n reject_idx, misclassified_idx = ([], [])\r\n for idx in range(self.preds.shape[0]):\r\n if idx not in goodclassified_index:\r\n reject = False\r\n for idx_preds in range(self.preds.shape[1] - 1):\r\n if self.preds[idx][1][idx_preds] == self.preds[idx][1][idx_preds + 1]:\r\n reject_idx.append(idx)\r\n reject = True\r\n break\r\n if not reject:\r\n misclassified_idx.append(idx)\r\n if reject_idx:\r\n ax.scatter(test.features[reject_idx, 0], self.features[reject_idx, 1],\r\n label='Reject', c='orange', marker='^')\r\n if misclassified_idx:\r\n ax.scatter(test.features[misclassified_idx, 0], self.features[misclassified_idx, 1],\r\n label='Misclassified', marker='x', c='red')\r\n box = ax.get_position()\r\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\r\n # Put a legend to the right of the current axis\r\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\r\n if title is not None:\r\n ax.set_title(title)\r\n plt.show()", "def show(self):\n print \"Name: \"+str(self.name)\n ss = self.y.shape[0]\n for i in xrange(ss):\n print \"Actual: \"+str(self.y[i])\n print \"Prediction: \"+str(self.a[i])\n print \"\"\n print \"\\n\"", "def rmse(y_true, y_pred):\n return backend.sqrt(backend.mean(backend.square(y_pred - y_true), axis=-1))", "def PlotComparison(result_values, descrete, continuous, jitter=100):\n df = result_values.copy()\n np.random.seed(0)\n df[continuous] = df[continuous] + np.random.randint(low=-jitter, high=jitter, size=len(df))\n base = alt.Chart(df).transform_calculate(\n ymin=\"datum.mean-2*datum.std\",\n ymax=\"datum.mean+2*datum.std\",\n ).properties(\n title = '[Interactive] Accuracy by Params'\n )\n \n points = base.mark_point(\n filled=True,\n size=10\n ).encode(\n x=continuous,\n y=alt.Y('mean:Q'),#, scale=alt.Scale(domain=(0.55, 0.7))),\n color=descrete,\n tooltip=['mean','std']\n )\n\n errorbars = base.mark_errorbar().encode(\n x=continuous,\n y=alt.Y(\"ymin:Q\",title='Accuracy'),\n y2=\"ymax:Q\",\n color=descrete,\n )\n\n return(points + errorbars)", "def plot_regression_results(ax, y_true, y_pred, title, scores, elapsed_time):\n ax.plot([y_true.min(), y_true.max()],\n [y_true.min(), y_true.max()],\n '--r', linewidth=2)\n ax.scatter(y_true, y_pred, alpha=0.2)\n\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.get_xaxis().tick_bottom()\n ax.get_yaxis().tick_left()\n ax.spines['left'].set_position(('outward', 10))\n ax.spines['bottom'].set_position(('outward', 10))\n ax.set_xlim([y_true.min(), y_true.max()])\n ax.set_ylim([y_true.min(), y_true.max()])\n ax.set_xlabel('Measured')\n ax.set_ylabel('Predicted')\n extra = plt.Rectangle((0, 0), 0, 0, fc=\"w\", fill=False,\n edgecolor='none', linewidth=0)\n ax.legend([extra], [scores], loc='upper left')\n title = title + '\\n Evaluation in {:.2f} seconds'.format(elapsed_time)\n ax.set_title(title)", "def decision_plot(self, X, y):\n\n y = self._slice_target_index(y=y)\n\n for index in range(_n_targets(y)):\n if sklearn.utils.multiclass.type_of_target(y) == 'continuous-multioutput':\n self.fit(X, y.iloc[:, index].values.ravel(order='K'))\n else:\n self.fit(X, y)\n explainer, shap_values = self.explainer(X=X)\n shap.decision_plot(base_value=explainer.expected_value, shap_values=shap_values,\n feature_names=list(X.columns), show=self.show)", "def eval_perf_test(model, X_test, y_test):\n\n y_hat_test = model.predict(X_test)\n\n test_mae = metrics.mean_absolute_error(y_test, y_hat_test)\n test_mse = metrics.mean_squared_error(y_test, y_hat_test)\n test_rmse = np.sqrt(metrics.mean_squared_error(y_test, y_hat_test))\n test_r = metrics.r2_score(y_test, y_hat_test)\n\n print('Evaluating Performance on Testing Data:\\n')\n print(f'Test Mean Absolute Error: {test_mae:,.2f}')\n print(f'Test Mean Squared Error: {test_mse:,.2f}\\n')\n print(f'Test Root Mean Squared Error: {test_rmse:,.2f}')\n print(f'Test R-Square Value: {round(test_r,2)}')", "def cross_validation_visualization(lambds, score_tr, score_te):\n plt.semilogx(lambds, score_tr, marker=\".\", color='b', label='train score');\n plt.semilogx(lambds, score_te, marker=\".\", color='r', label='test score');\n plt.xlabel(\"lambda\")\n plt.ylabel(\"score\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig(\"cross_validation_test\")", "def plot_preds(\r\n training_data: np.mat,\r\n predictions: np.ndarray,\r\n col_x: np.ndarray,\r\n col_y: np.ndarray,\r\n cola_name: str,\r\n colb_name: str,\r\n) -> plt.plot:\r\n xsort = training_data.copy()\r\n xsort.sort(axis=0)\r\n plt.scatter(col_x, col_y, color=\"blue\")\r\n plt.plot(\r\n xsort[:, 1],\r\n predictions[training_data[:, 1].argsort(0)],\r\n color=\"yellow\",\r\n linewidth=5,\r\n )\r\n plt.title(\"Local Weighted Regression\")\r\n plt.xlabel(cola_name)\r\n plt.ylabel(colb_name)\r\n plt.show()", "def evaluate(self, X_test, y_test):\n \n y_pred = self.pipeline.predict(X_test)\n test_rmse = compute_rmse(y_pred, y_test)\n print(\"test rmse:\", test_rmse)\n return test_rmse", "def y_test_transformed(self):\n return self.test_transformed[self.target_param]", "def deviance_plot(est, X_test, y_test, ax=None, label='',train_color='#2c7bb6', test_color='#d7191c', alpha=1.0):\r\n\ttest_dev = np.empty(n_estimators) #创建数组\r\n\tfor i, pred in enumerate(est.staged_predict(X_test)):\r\n\t\ttest_dev[i] = est.loss_(y_test, pred)\r\n\tif ax is None:\r\n\t\tfig = plt.figure(figsize=(8,5))\r\n\t\tax = plt.gca();\r\n\tax.plot(np.arange(n_estimators)+1, test_dev, color=test_color, label='Test Error max_depth=1 %s' % label, linewidth=2, alpha=alpha)\r\n\tax.plot(np.arange(n_estimators)+1, est.train_score_, color=train_color, label='Train Error max_depth=1 %s' % label, linewidth=2, alpha=alpha)\r\n\tax.set_ylabel('Error')\r\n\tax.set_xlabel('n_estimators')\r\n\tax.set_ylim((0,2))\r\n\treturn test_dev, ax", "def rmse(y_true, y_pred):\n\treturn backend.sqrt(backend.mean(backend.square(y_pred - y_true), axis=-1))", "def evaluate_model(model, X_test, Y_test, category_names):\n\n Y_pred = pd.DataFrame(model.predict(X_test))\n Y_pred.columns = category_names\n Y_test = pd.DataFrame(Y_test)\n Y_test.columns = category_names\n\n for column in category_names:\n print('** {} **'.format(column).upper())\n print(classification_report(Y_test[column], Y_pred[column]))", "def evaluate(self, X_test, y_test):\n pipeline = run()\n y_pred = pipeline.predict(X_test)\n rmse = compute_rmse(y_pred, y_test)\n print(rmse)\n return rmse", "def plot_predictions(y, yhat, title=\"Predictions vs Actual\", output_dir=None):\n\n fig = plt.figure(figsize=(15, 6))\n plt.xlabel('Time')\n plt.ylabel('PM10')\n plt.plot(y, label=\"actual\", figure=fig)\n plt.plot(yhat, label=\"predicted\", figure=fig)\n plt.title(title)\n fig.legend()\n\n if output_dir != None:\n plt.savefig(os.path.join(output_dir, \"{}.png\".format(title)))\n\n plt.close(fig)", "def plot(model, samples):\n # compute responsiblity values\n resp = model.predict_proba(samples)\n\n # plot\n plt.axis('equal')\n plt.scatter(samples[:,0], samples[:,1], c=resp)\n plt.show()", "def predict(x_train, y_train, x_test, y_test, fn, params):\n y_train_predicted = fn(x_train, None, *params)\n y_train_predicted = (y_train_predicted >= 0.5) * 1\n y_test_predicted = fn(x_test, None, *params)\n y_test_predicted = (y_test_predicted >= 0.5) * 1\n\n train_acc = np.sum(y_train_predicted == y_train) / x_train.shape[0]\n test_acc = np.sum(y_test_predicted == y_test) / x_test.shape[0]\n print('train accuracy =', train_acc)\n print('test accuracy =', test_acc)\n scatter_plot(x_train, y_train_predicted, x_test, y_test_predicted, 'predicted 0', 'predicted 1')", "def show_predictions(model, test_set, val_set, image_guess, img_res, data='OSNR', GRAY=True):\n \n ## Uses model to predict some amount of images\n predict = model.predict_classes(test_set, batch_size=5, verbose=1)\n \n ## Initialises variables for loop\n correctly_guessed = 0\n\n ## Defines figure dimensions\n fig = plt.figure(figsize=(20,30))\n\n ## Begins loop to find correct predictions and relay results to user\n ## Searches through the prediction array and compares it to the actual array.\n ## Displays image with the prediction and answer on the title\n for i in range(image_guess):\n correct = False\n actual = np.argmax(val_set[i])\n\n if predict[i] == actual:\n correctly_guessed += 1\n correct = True\n\n plt.subplot(6,3,i+1)\n fig.subplots_adjust(left=0.01,\n right=0.7,\n bottom=0.1,\n top=1.2,\n wspace=0.5,\n hspace=0.2\n )\n if GRAY == False:\n plt.imshow(test_set[i].reshape(img_res,img_res,3))\n else:\n plt.imshow(test_set[i].reshape(img_res,img_res), cmap='gray')\n\n if correct == True:\n if data == 'disp':\n plt.title('Correct! \\nPrediction = {}ps/nm Truth = {}ps/nm'\n .format((10+10*predict[i]), (10+10*(actual))), fontsize=15)\n \n if data == 'disp-short':\n plt.title('Correct! \\nPrediction = {} ~ {}ps/nm Truth = {} ~{}ps/nm'\n .format(100*(predict[i]), (100+100*predict[i]), 100*(actual), (100+100*(actual)), fontsize=15))\n \n if data == 'OSNR':\n plt.title('Correct! \\nPrediction = {}dB Truth = {}dB'\n .format((12+0.5*predict[i]), (12+0.5*(actual))), fontsize=15)\n \n \n else:\n if data == 'disp':\n plt.title('\\nPrediction = {}ps/nm Truth = {}ps/nm'\n .format((10+10*predict[i]), (10+10*(actual))), fontsize=15)\n \n if data == 'disp-short':\n plt.title('\\nPrediction = {} ~ {}ps/nm Truth = {} ~{}ps/nm'\n .format(100*(predict[i]), (100+100*predict[i]), 100*(actual), (100+100*(actual)), fontsize=15))\n \n if data == 'OSNR':\n plt.title('\\nPrediction = {}dB Truth = {}dB'\n .format((12+0.5*predict[i]), (12+0.5*(actual))), fontsize=15)\n\n ## Returns amount of predictions that were correct\n print('Correctly guessed = ', correctly_guessed)\n print('Inorrectly guessed = ', (image_guess-correctly_guessed))", "def get_score(y_true, y_pred):\n scores = []\n for i in tqdm_notebook(range(len(y_true))):\n score,_ = get_score_summary(y_true[i], y_pred[i])\n scores.append(score)\n return np.array(scores)", "def predictions_relevance(self):\n return [\"Support Vector Regression predictions comparison\", super().truncate_predictions_relevance(self.datasetManager.X_test, self.datasetManager.y_test, self.y_pred)]", "def regression_evaluation(self, test_set, predicted_values):\r\n\r\n MAE = self.mean_absolute_error(test_set, predicted_values)\r\n MSE = self.mean_square_error(test_set, predicted_values)\r\n print(f\"Mean Percent Error:\\t{MAE:.2f}\")\r\n print(f\"Mean Square Error:\\t{MSE:.2f}\")", "def plot_good_pred(self, test: Set, title=None, fig_size=None, reject=False):\r\n if reject:\r\n self._plot_good_pred_whit_reject(test, title, fig_size)\r\n else:\r\n self._plot_good_pred_whitout_reject(test, title, fig_size)", "def train_and_plot_prediction_metrics(X_train, y_train, X_test, y_test, pipelines):\n\n scores = pd.DataFrame(columns=[\"Model\", \"MAE\", \"MSE\", \"R2\"])\n\n for modelname, pipeline in pipelines.items():\n pipeline.fit(X_train, y_train)\n y_pred = pipeline.predict(X_test)\n mae = mean_absolute_error(y_test, y_pred)\n mse = mean_squared_error(y_test, y_pred)\n r2 = r2_score(y_test, y_pred)\n scores = scores.append(\n {\"Model\": modelname, \"MAE\": mae, \"MSE\": mse, \"R2\": r2}, ignore_index=True\n )\n\n for metric in [\"MAE\", \"MSE\", \"R2\"]:\n ax = sns.barplot(x=\"Model\", y=metric, data=scores)\n ax.set_ylim(bottom=0)\n plt.title(\"Test data: \" + metric)\n plt.show()", "def evaluate_model(model, X_test, Y_test): \n #Make predictions with the model\n Y_pred = model.predict(X_test)\n #convert numpy output to dataframe and add columns\n Y_pred_df = pd.DataFrame(Y_pred)\n Y_pred_df.columns = Y_test.columns\n #Convert predictions and correct y values to float for faciliate comparison\n Y_pred_df = Y_pred_df.astype('float64')\n Y_test = Y_test.astype('float64')\n print_score(Y_test, Y_pred_df, 'weighted avg')", "def cross_validation_visualization(lambdas, loss_train, loss_test):\n plt.semilogx(lambdas, loss_train, marker=\".\", color='b', label='train error')\n plt.semilogx(lambdas, loss_test, marker=\".\", color='r', label='test error')\n plt.xlabel(\"lambda\")\n plt.ylabel(\"rmse\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig(\"cross_validation_mse\")", "def mse(y, y_pred, verbose=True):\n\n mse_sum = 0\n\n for i in range(len(y)):\n mse_sum += mean_squared_error(y[i], y_pred[i])\n\n if verbose:\n print(f\"Mean MSE {mse_sum / len(y)}\")\n\n return mse_sum / len(y)", "def simple_time_series(full_df, test_period, display_graphs=True):\n df = full_df.copy()\n df = df.filter([\"Canteen\"])\n\n train = df.iloc[:-test_period]\n test = df.iloc[-test_period:]\n\n resulting_prediction, predictions = prediction(train, test)\n\n if display_graphs is True:\n plt.figure(figsize=(14, 7))\n plt.plot(train)\n plt.plot(resulting_prediction)\n plt.legend([\"Real values\", \"Prediction\"], loc=\"best\")\n plt.xlabel(\"Date\")\n plt.ylabel(\"Number of people\")\n\n print(\n \"The mean absolute error (MAE) for the Simple Time Series model is {0:.0f} people\".format(\n find_MAE(test, predictions)\n )\n )", "def avg_response(df, x, y_obs, y_est, save=False, show=True):\n\n fig, ax1 = plt.subplots(figsize=(15,15))\n\n ax2 = ax1.twinx()\n\n x_name = x\n if df[x].dtype == \"int\":\n x = df[x].astype(\"category\")\n elif df[x].dtype == \"float\":\n x = pd.cut(df[x], bins=10)\n\n metrics = {\"mean\":\"mean\", \"std err\":\"sem\", \"count\":\"count\"}\n df_grouped = df.groupby([x])[y_obs, y_est].agg(metrics)\n \n x_vals = range(len(df_grouped))\n y_vals = df_grouped[\"mean\"][y_est]\n ax1.errorbar(x_vals, y_vals,yerr=df_grouped[\"std err\"][y_est], fmt='-',\n marker='o',color=\"R\", mec='black', ms=10, mew=2, linewidth=4, \n capsize=10, elinewidth=2)\n\n y_vals = df_grouped[\"mean\"][y_obs]\n ax1.plot(x_vals, y_vals, '-', label=y_obs, marker='o',\n color = \"G\",mec='black', ms=10, mew=2, linewidth=4)\n\n y_vals = df_grouped[\"count\"][y_obs]\n ax2.bar(x_vals,y_vals, color='DarkSlateGray', alpha = 0.25)\n\n ax1.set_xlim(x_vals[0]-0.2,x_vals[-1]+1)\n x_levels = list(y_vals.index)\n plt.xticks(x_vals, x_levels)\n ax1.set_xticklabels(x_levels, rotation=45)\n ax1.grid(False)\n ax2.grid(False)\n font_size = 20\n ax1.set_xlabel(x_name, fontsize=font_size)\n ax1.set_ylabel(y_obs, fontsize=font_size)\n ax2.set_ylabel(\"count\", fontsize=font_size)\n plt.title(\"Average {y} for groups of {x}\".format(x=x_name, y=y_obs), \n fontsize=font_size+5)\n ax1.legend([y_obs, y_est], fontsize=font_size-2)\n if save:\n fig.savefig(\"/home/edward/work/repos/prometheus/python/plots/avg_response/{}.png\".\n format(x_name), bbox_inches='tight')\n if show:\n plt.show()", "def load_univariate_series(self, test_col: str, grd_truth_col: str = None, plot_graph=False,\n ):\n random.seed(4)\n raw_data = pd.read_csv(filepath_or_buffer=self.file_path)\n assert test_col in list(raw_data.columns), test_col+\" is not in the columns of the data!\"\n if grd_truth_col is not None:\n assert grd_truth_col in list(raw_data.columns), grd_truth_col+\" is not in the columns of the data!\"\n drop_n = int(1. / self.train_ratio)\n raw_data = raw_data.iloc[::drop_n]\n raw_data.reset_index(inplace=True)\n test_data = raw_data[test_col]\n grd_truth_data = raw_data[grd_truth_col]\n if plot_graph:\n plt.plot(test_data, \".\", label='Test Data')\n plt.plot(grd_truth_data, color='r', label='Ground Truth')\n plt.xlabel('Time (s)')\n plt.ylabel(test_col)\n plt.show()\n # The non-NaN (or part of it) entries of the data will be used as training data; the NaN (missin data) will be\n # inferred from subsequent experiment.\n\n # First step, we simply use a univariate time series, regressing the tide height against time\n Y_grd = grd_truth_data.values\n X_grd = np.array(list(test_data.index))\n Y_train = test_data.dropna()\n X_train = np.array(list(Y_train.index))\n Y_train = Y_train.values\n data_null = test_data.isnull()\n X_test = np.array(test_data[data_null].index)\n\n if grd_truth_col is None:\n Y_test = None\n else:\n Y_test = grd_truth_data.iloc[X_test].values\n\n if self.n_test is not None:\n test_pt = np.minimum(self.n_test, len(Y_test))\n test_idx = np.array(random.sample(range(len(Y_test)), test_pt))\n Y_test = Y_test[test_idx]\n X_test = X_test[test_idx]\n\n # The index of data with missing entries. This will be used for prediction\n assert Y_test.shape[0] == X_test.shape[0], \"buggy code.\"\n return X_train.reshape(-1, 1), Y_train.reshape(-1, 1), X_test.reshape(-1, 1), \\\n Y_test.reshape(-1, 1), X_grd.reshape(-1, 1), Y_grd.reshape(-1, 1)", "def plot_true_predicted(train_test_sets, radii_test_RF,\n radii_test_output_error):\n\n X_train, X_test, y_train, y_test = train_test_sets\n plt.figure()\n plt.errorbar(radii_test_RF, y_test.values,\n xerr=radii_test_output_error,\n fmt='.', c='C1', elinewidth=0.5,\n label='Random forest')\n # 1:1 line and labels\n plt.plot(np.sort(y_test.values), np.sort(y_test.values), 'k-', lw=0.25)\n\n plt.ylabel(r'True radius ($R_\\oplus$)')\n plt.ylabel(r'Predicted radius ($R_\\oplus$)')\n plt.legend(loc='lower right')\n return None", "def explained_variance_score(self):\n print('Explained variance score: ' + str(explained_variance_score(self.model.dataset.get_y_test(),\n self.model.get_predicted())))", "def plot_actual_predicted(self):\n predicted = [self.f(x, self.coefficients) for x in self.x_values]\n\n plt.scatter(self.x_values, self.y_values, label = \"Actual data\", c = 'b')\n plt.plot(self.x_values, predicted, label = \"Predicted data\", c = 'r')\n plt.title(f\"Graph of Prediected and Actual data points.\")\n plt.xlabel('x-axis')\n plt.ylabel('y-axis')\n plt.legend()\n plt.show()", "def max_error(y_true, y_pred):\n ...", "def _plot_train_test_experiment(mtrain, mval, metric_name, isState):\n # axes\n f, axes = plt.subplots(2,2,figsize=(12,10))\n ltrain = _plot_experiment(mtrain, axes[:,0], metric_name, isTrain=True)\n lval = _plot_experiment(mval, axes[:,1], metric_name, isTrain=False)\n # title\n target = \"State\" if isState else \"Output\"\n f.suptitle(f\"{target} Errors\")\n f.tight_layout()\n return f, axes", "def _plot_model_pred_vs_obs(self, ax):\n\n res = self._model.fit()\n\n ax.plot(self._model.endog, res.fittedvalues, '.', label='Observation')\n\n x_lim = ax.get_xlim()\n\n ax.plot(x_lim, x_lim, 'k:', label='1:1 line')\n\n x_label = 'Observed ' + self._model.endog_names\n y_label = 'Predicted ' + self._model.endog_names\n\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n\n ax.legend(loc='best', numpoints=1)", "def evaluate(self, X_test, y_test):\n self.run(self)\n self.y_pred = self.pipeline.predict(X_test)\n self.rmse = compute_rmse(self.y_pred, y_test)" ]
[ "0.6575989", "0.65215456", "0.6404881", "0.62492937", "0.6163107", "0.61196136", "0.60816854", "0.6080583", "0.6039545", "0.6038659", "0.6025292", "0.60227543", "0.6006382", "0.59930116", "0.5945069", "0.5939615", "0.5928846", "0.5928846", "0.5887865", "0.5881625", "0.58782905", "0.58576834", "0.5856805", "0.5844696", "0.5840252", "0.58336306", "0.5809927", "0.5777172", "0.5773653", "0.57705843", "0.5754428", "0.5748312", "0.57463586", "0.57437575", "0.57281643", "0.57198083", "0.56891197", "0.56854695", "0.56812376", "0.5680667", "0.5675413", "0.5674777", "0.56738174", "0.5673321", "0.56706125", "0.56703776", "0.56518686", "0.56491673", "0.5647603", "0.5637957", "0.56249887", "0.5620775", "0.5601163", "0.55972123", "0.558673", "0.5574077", "0.55622584", "0.5556132", "0.5543083", "0.5541836", "0.5529494", "0.5527861", "0.55207676", "0.55113006", "0.55069464", "0.5506116", "0.5505368", "0.54988825", "0.549719", "0.5493564", "0.5489169", "0.5489112", "0.5485128", "0.54752046", "0.54696107", "0.5469113", "0.54668105", "0.54666394", "0.5464083", "0.54617465", "0.5460017", "0.5459704", "0.54504335", "0.54462504", "0.5433721", "0.5432883", "0.54205495", "0.5415262", "0.54142845", "0.54138565", "0.5412487", "0.54118735", "0.5409796", "0.5409546", "0.540791", "0.540355", "0.5402557", "0.5399778", "0.5397158", "0.5396127" ]
0.6889194
0
Mock out the tkinter canvas and all graphics
def setUp(self): self.screentype_patcher = mock.patch( 'turtle._Screen', new=mock.Mock ) self.mock_screentype = self.screentype_patcher.start() self.screen_patcher = mock.patch('turtle.Turtle._screen') self.mock_screen = self.screen_patcher.start() self.mock_screen.xscale = 1.0 self.mock_screen.yscale = 1.0 self.mock_screen.mode.return_value = 'standard' self.update_patcher = mock.patch( 'aioturtle.aioturtle.AioBaseTurtle._update_graphics' ) self.mock_update = self.update_patcher.start()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_canvas(self):\n # create frame to contain canvas\n self.world_container = tk.Frame(self,\n width = self.world_size[1],\n height = self.world_size[0])\n self.world_container.grid(row = 1, column = 0, sticky = tk.W+tk.N)\n\n # create canvas\n self.canvas = tk.Canvas(\n self.world_container,\n width = self.world_size[1],\n height = self.world_size[0],\n borderwidth = 1,\n highlightthickness = 0)\n self.canvas.grid(row = 0, column = 0, sticky = tk.W)\n self.canvas.bind('<Button-1>', self.click_cell)", "def main():\n Canvas1Demo().mainloop()", "def draw_canvas(self):\n\n self.canvas = Canvas(self)\n self.scrollbar = ttk.Scrollbar(self, orient= VERTICAL,\n command=self.canvas.yview) \n self.canvas.configure(yscrollcommand=self.scrollbar.set)\n \n # make sure to add scrollbar before adding the canvas\n self.scrollbar.pack(side=RIGHT, fill=Y)\n self.canvas.pack(side=TOP, fill=BOTH, expand=1, padx=20, pady=20)\n \n # adding a frame to hold all the widgets, ttk Frame doesn't support\n # background config option \n self.frame = Frame(self.canvas) \n self.canvas.create_window(0,0,window=self.frame, anchor='nw')", "def create_canvas(self):\n self.canvas = tk.Canvas(\n self,\n bd=-2,\n height=self.controller.GAME_HEIGHT,\n width=self.controller.GAME_WIDTH)\n self.canvas.pack(expand=tk.YES, fill=tk.BOTH)", "def create_canvas(self):\n self.canvas = tk.Canvas(\n self,\n bd=-2,\n height=self.controller.GAME_HEIGHT,\n width=self.controller.GAME_WIDTH)\n self.canvas.pack(expand=tk.YES, fill=tk.BOTH)", "def create_canvas(self):\n self.canvas = tk.Canvas(\n self,\n bd=-2,\n height=self.controller.GAME_HEIGHT,\n width=self.controller.GAME_WIDTH)\n self.canvas.pack(expand=tk.YES, fill=tk.BOTH)", "def setup():\r\n #this happens just once\r\n size(width, height) #instead of create_canvas\r", "def __init__(self, width, height, pixelsPerCell = 10, title = \"Ants\"):\n from tkinter import Tk, Canvas, Toplevel\n self.width = width\n self.height = height\n self.color = [\"white\", \"black\", \"red\", \"yellow\", \"blue\", \"green\", \"purple\", \"pink\", \"cyan\", \"turquoise\", \"gray\"]\n self.board = [[0 for x in range(self.width)] for y in range(self.height)]\n self.box = [[0 for x in range(self.width)] for y in range(self.height)]\n self.pixelsPerCell = pixelsPerCell\n self.title = title\n self.app = Tk()\n self.app.withdraw()\n self.win = Toplevel()\n self.win.wm_title(title)\n self.canvas = Canvas(self.win,\n width=(self.width * pixelsPerCell),\n height=(self.height * pixelsPerCell))\n self.canvas.pack(side = 'bottom', expand = \"yes\", anchor = \"n\",\n fill = 'both')\n self.win.winfo_toplevel().protocol('WM_DELETE_WINDOW',self.close)\n #self.canvas.bind(\"<Configure>\", self.changeSize)\n self.draw()", "def __init__(self, canvas):\r\n\r\n # Initialize attributes\r\n self.canvas = canvas\r\n self.fig = canvas.fig\r\n self.units = None\r\n self.cb = None\r\n self.cb_bt = None\r\n self.cb_gga = None\r\n self.cb_vtg = None\r\n self.bt = None\r\n self.gga = None\r\n self.vtg = None\r\n self.hover_connection = None\r\n self.annot = None", "def make_canvas(self, painter, **args):\n\t\treturn None", "def new_canvas(self):\n libtcod.console_clear(self.console)", "def paint(self):\r\n self.canvas.delete(tkinter.ALL)\r\n self.visit(self.tree.root)", "def pysweep_before_finish_init(self):\n self.displaycanvas = DisplayCanvas(self.pysweep.master, self.boardsize, self.lcounterlength, self.rcounterlength, self.images)\n self.displaycanvas.pack()\n\n self.pysweep.master.update_idletasks()\n self.displaycanvas.update_idletasks()\n # enode = self.arbitrary()\n # print('DisplayCanvas:', enode)", "def __init__(self):\n Frame.__init__(self)\n self.master.title(\"Canvas Demo\")\n self.grid()\n\n # create a canvas and place in this frame\n self.canvas = Canvas(self, width = 200, height = 100, \n bg = \"white\")\n self.canvas.grid(row = 0, column = 0)\n\n # Place buttons in a frame\n frame = Frame(self)\n frame.grid(row = 1, column = 0)\n rectangle = Button(frame, text = \"Rectangle\", \n command = self.displayRect)\n oval = Button(frame, text = \"Oval\", \n command = self.displayOval)\n arc = Button(frame, text = \"Arc\", \n command = self.displayArc)\n polygon = Button(frame, text = \"Polygon\", \n command = self.displayPolygon)\n line = Button(frame, text = \"Line\", \n command = self.displayLine)\n string = Button(frame, text = \"String\", \n command = self.displayString)\n clear = Button(frame, text = \"Clear\", \n command = self.clearCanvas)\n\n rectangle.grid(row = 0, column = 0)\n oval.grid(row = 0, column = 1)\n arc.grid(row = 0, column = 2)\n polygon.grid(row = 0, column = 3)\n line.grid(row = 0, column = 4)\n string.grid(row = 0, column = 5)\n clear.grid(row = 0, column = 6)", "def simple_canvas(self):\n self.canvas = Canvas()\n\n self.box1 = Box()\n self.canvas.add(self.box1)\n self.box1.matrix.translate(100, 50)\n self.box1.width = 40 \n self.box1.height = 40 \n self.box1.request_update()\n\n self.box2 = Box()\n self.canvas.add(self.box2)\n self.box2.matrix.translate(100, 150)\n self.box2.width = 50 \n self.box2.height = 50 \n self.box2.request_update()\n\n self.line = Line()\n self.head = self.line.handles()[0]\n self.tail = self.line.handles()[-1]\n self.tail.pos = 100, 100\n self.canvas.add(self.line)\n\n self.canvas.update_now()\n self.view = GtkView()\n self.view.canvas = self.canvas\n from gi.repository import Gtk\n win = Gtk.Window()\n win.add(self.view)\n self.view.show()\n self.view.update()\n win.show()\n\n self.tool = ConnectHandleTool(self.view)", "def __init__(self):\r\n Frame.__init__(self)\r\n self.master.title(\"GUIs drawing geometric shapes\")\r\n self.grid()\r\n\r\n #create a canvas and place in this frame\r\n self.canvas = Canvas(self, width = 300, height = 400)\r\n self.canvas.grid(row = 0, column = 0)\r\n\r\n self.canvas.create_rectangle(100, 50, 200, 350)\r\n self.canvas.create_oval(100, 50, 200, 150,\r\n fill = \"white\", tags = \"RED\")\r\n self.canvas.create_oval(100, 150, 200, 250,\r\n fill = \"white\", tags = \"YELLOW\")\r\n self.canvas.create_oval(100, 250, 200, 350,\r\n fill = \"green\", tags = \"GREEN\")\r\n\r\n \r\n dx = 1\r\n while True:\r\n self.canvas.after(2000) # Sleep for 15 milliseconds\r\n self.canvas.update() # Update canvas\r\n if dx == 1:\r\n self.canvas.itemconfigure(\"YELLOW\", fill = \"yellow\")\r\n self.canvas.itemconfigure(\"GREEN\", fill = \"white\")\r\n dx += 1\r\n elif dx == 2:\r\n self.canvas.itemconfigure(\"RED\", fill = \"red\")\r\n self.canvas.itemconfigure(\"YELLOW\", fill = \"white\")\r\n dx += 1 \r\n else:\r\n self.canvas.itemconfigure(\"RED\", fill = \"white\")\r\n self.canvas.itemconfigure(\"GREEN\", fill = \"green\")\r\n dx = 1", "def prepareTestCanvas(self):\r\n loadLayers()\r\n setCanvasCrs(4326, True)\r\n CANVAS.resize(QtCore.QSize(400, 400))\r\n CANVAS.zoomToFullExtent()", "def __init__(self, tkRoot, title):\n # create an instance of the class for ATV Automation\n self.tv = ActionScript()\n self.rc = SonyRCKey()\n self.app = AppList()\n\n # Initialize tkRoot as the Tk() instance\n self.tkRoot = tkRoot\n self.tkRoot.title(title) # Change title for each test\n self.tkRoot.iconbitmap(\"img/bot_icon.ico\")\n self.tkRoot.geometry(\"1200x480\")\n\n # Create frame for header\n self.headerFrame = ttk.Frame(self.tkRoot)\n self.headerFrame.pack(fill=X)\n\n # Create canvas Testcase Instructions\n self.sideCanvas = Canvas(self.tkRoot)\n self.sideCanvas.pack(fill=BOTH, side=LEFT)\n\n # Create Frame for Testcase Instructions\n self.sideFrame = ttk.Frame(self.sideCanvas)\n self.sideFrame.pack(fill=BOTH, side=LEFT)\n\n # Create canvas for Testcase running\n self.testCanvas = Canvas(self.tkRoot)\n self.testCanvas.pack(fill=BOTH, side=LEFT, expand=True)\n\n # add scrollbar inside testcase canvas\n self.scrollbar = Scrollbar(self.tkRoot, command=self.testCanvas.yview)\n self.scrollbar.pack(fill=Y, side=RIGHT, expand=False)\n\n # Create frame for Testcase running\n self.testFrame = ttk.Frame(self.testCanvas)\n self.testFrame.pack(fill=BOTH, side=LEFT, expand=True)\n\n # configure canvas and scrollbar\n self.testCanvas.configure(yscrollcommand=self.scrollbar.set)\n\n # put sideframe in sidecanvas\n self.sideCanvas.create_window(\n (0, 0), window=self.sideFrame, anchor='nw', width=400)\n\n # put testFrame in testCanvas\n self.testCanvas.create_window(\n (0, 0), window=self.testFrame, anchor='nw', width=800)\n\n # Create a custom font\n self.mainFont = tkFont.Font(\n family=\"Helvetica\", size=14, weight=tkFont.NORMAL)\n self.sideFont = tkFont.Font(\n family=\"Helvetica\", size=14, weight=tkFont.NORMAL)\n self.buttonFont = tkFont.Font(\n family=\"Helvetica\", size=10, weight=tkFont.BOLD)\n self.boldFont = tkFont.Font(\n family=\"Helvetica\", size=14, weight=tkFont.BOLD)\n\n # Initialize flags for background of the labels and loop count\n self.bgCounter = 0\n self.loopCount = IntVar()\n self.loopCount.set(1)\n self.loopCounterUI = IntVar() # loop counter UI\n self.loopCounterUI.set(0)\n self.deviceID = StringVar()\n self.stopLoop = False\n self.countLoopReset = 0\n\n # Initialize button so we can access it on any functions\n self.btnStart = Button()\n self.btnStop = Button()\n self.txtLoop = Entry()\n self.labelLoop = Label()\n self.txtDeviceID = Entry()\n self.labelDeviceID = Label()\n self.LabelLists = []\n self.tsFormat = '%Y-%m-%d, %I:%M:%S %p'\n self.playback_time = 0.3", "def canvas_api():\n pass", "def DrawTest(self,event=None):\n\n wx.GetApp().Yield(True)\n\n Range = (-10,10)\n colors = self.colors\n\n self.BindAllMouseEvents()\n Canvas = self.Canvas\n\n Canvas.InitAll()\n #\n ## these set the limits for how much you can zoom in and out\n Canvas.MinScale = 14\n Canvas.MaxScale = 500\n\n\n ############# Random tests of everything ##############\n\n # Rectangles\n for i in range(3):\n xy = (random.uniform(Range[0],Range[1]),random.uniform(Range[0],Range[1]))\n lw = random.randint(1,5)\n cf = random.randint(0,len(colors)-1)\n wh = (random.randint(1,5), random.randint(1,5))\n Canvas.AddRectangle(xy, wh, LineWidth = lw, FillColor = colors[cf])\n\n # Ellipses\n for i in range(3):\n xy = (random.uniform(Range[0],Range[1]),random.uniform(Range[0],Range[1]))\n lw = random.randint(1,5)\n cf = random.randint(0,len(colors)-1)\n h = random.randint(1,5)\n w = random.randint(1,5)\n Canvas.AddEllipse(xy, (h,w), LineWidth = lw,FillColor = colors[cf])\n\n # Points\n for i in range(5):\n xy = (random.uniform(Range[0],Range[1]),random.uniform(Range[0],Range[1]))\n D = random.randint(1,50)\n cf = random.randint(0,len(colors)-1)\n Canvas.AddPoint(xy, Color = colors[cf], Diameter = D)\n\n # SquarePoints\n for i in range(500):\n xy = (random.uniform(Range[0],Range[1]),random.uniform(Range[0],Range[1]))\n S = random.randint(1, 50)\n cf = random.randint(0,len(colors)-1)\n Canvas.AddSquarePoint(xy, Color = colors[cf], Size = S)\n\n # Circles\n for i in range(5):\n xy = (random.uniform(Range[0],Range[1]),random.uniform(Range[0],Range[1]))\n D = random.randint(1,5)\n lw = random.randint(1,5)\n cf = random.randint(0,len(colors)-1)\n cl = random.randint(0,len(colors)-1)\n Canvas.AddCircle(xy, D, LineWidth = lw, LineColor = colors[cl], FillColor = colors[cf])\n Canvas.AddText(\"Circle # %i\"%(i), xy, Size = 12, BackgroundColor = None, Position = \"cc\")\n # Lines\n for i in range(5):\n points = []\n for j in range(random.randint(2,10)):\n point = (random.randint(Range[0],Range[1]),random.randint(Range[0],Range[1]))\n points.append(point)\n lw = random.randint(1,10)\n cf = random.randint(0,len(colors)-1)\n cl = random.randint(0,len(colors)-1)\n Canvas.AddLine(points, LineWidth = lw, LineColor = colors[cl])\n # Polygons\n for i in range(3):\n points = []\n for j in range(random.randint(2,6)):\n point = (random.uniform(Range[0],Range[1]),random.uniform(Range[0],Range[1]))\n points.append(point)\n lw = random.randint(1,6)\n cf = random.randint(0,len(colors)-1)\n cl = random.randint(0,len(colors)-1)\n Canvas.AddPolygon(points,\n LineWidth = lw,\n LineColor = colors[cl],\n FillColor = colors[cf],\n FillStyle = 'Solid')\n\n ## Pointset\n for i in range(4):\n points = []\n points = RandomArray.uniform(Range[0],Range[1],(100,2))\n cf = random.randint(0,len(colors)-1)\n D = random.randint(1,4)\n Canvas.AddPointSet(points, Color = colors[cf], Diameter = D)\n\n # Text\n String = \"Unscaled text\"\n for i in range(3):\n ts = random.randint(10,40)\n cf = random.randint(0,len(colors)-1)\n xy = (random.uniform(Range[0],Range[1]),random.uniform(Range[0],Range[1]))\n Canvas.AddText(String, xy, Size = ts, Color = colors[cf], Position = \"cc\")\n\n # Scaled Text\n String = \"Scaled text\"\n for i in range(3):\n ts = random.random()*3 + 0.2\n cf = random.randint(0,len(colors)-1)\n Point = (random.uniform(Range[0],Range[1]),random.uniform(Range[0],Range[1]))\n Canvas.AddScaledText(String, Point, Size = ts, Color = colors[cf], Position = \"cc\")\n\n # Arrows\n N = 5\n Points = RandomArray.uniform(Range[0], Range[1], (N,2) )\n for i in range(N):\n Canvas.AddArrow(Points[i],\n random.uniform(20,100),\n Direction = random.uniform(0,360),\n LineWidth = random.uniform(1,5),\n LineColor = colors[random.randint(0,len(colors)-1)],\n ArrowHeadAngle = random.uniform(20,90))\n\n # ArrowLines\n for i in range(5):\n points = []\n for j in range(random.randint(2,10)):\n point = (random.randint(Range[0],Range[1]),random.randint(Range[0],Range[1]))\n points.append(point)\n lw = random.randint(1,10)\n cf = random.randint(0,len(colors)-1)\n cl = random.randint(0,len(colors)-1)\n Canvas.AddArrowLine(points, LineWidth = lw, LineColor = colors[cl], ArrowHeadSize= 16)\n\n\n Canvas.ZoomToBB()", "def _hijack_tk(self):\n import Tkinter\n orig_mainloop = gtk.main\n dumb_ml = _DummyMainloop(orig_mainloop, self, GUI_TK)\n Tkinter.Misc.mainloop = dumb_ml\n Tkinter.mainloop = dumb_ml", "def set_ui(self):\r\n\r\n self.canvas = tk.Canvas(self)\r\n self.canvas.pack()\r\n\r\n self.entry = ttk.Entry(self.canvas, justify=\"center\", font=(\"Calibri\", 12))\r\n\r\n self.grid = Grid(self.canvas)", "def create_board_canvas(master: Widget) -> None:\r\n\r\n self.canvas = Canvas(master, bg='black')\r\n self.canvas.bind('<Configure>', self.on_canvas_resize)\r\n self.canvas.bind(\"<B1-Motion>\", self.on_canvas_click)\r\n self.canvas.bind(\"<Button-1>\", self.on_canvas_click)\r\n self.canvas.bind(\"<ButtonRelease-1>\", self.on_canvas_mouse_release)\r\n self.canvas.pack(fill=BOTH, expand = TRUE)", "def setUpGUI(self):\n WHITE = '#ffffff'\n # Set up the GUI so that we can paint the fractal image on the screen\n canvas = Canvas(self.window, width=self.width, height=self.height, bg=WHITE)\n canvas.pack()\n canvas.create_image((self.width/2, self.height/2), image=self.img, state=\"normal\")", "def refresh_canvas(self):\r\n self._canvas.delete(tk.ALL)\r\n self._canvas.create_text(10, 10, anchor=tk.NW, fill='black', font=self.text_font,\r\n text=f'Player Hand Total: {self.player_hand.total}')\r\n self._canvas.create_text(10, 150, anchor=tk.NW, font=self.text_font, fill='black',\r\n text=f'Dealer Hand Total: {self.dealer_hand.total}')\r\n self._canvas.create_text(100, 300, anchor=tk.NW, fill=self.status_color, font=self.text_font,\r\n text=f'Game Status: {self.game_status}')\r\n self._canvas.create_text(10, 330, anchor=tk.NW, fill='black', font=self.text_font,\r\n text=f'Dealer Wins: {self.dealer_wins}')\r\n self._canvas.create_text(10, 355, anchor=tk.NW, fill='black', font=self.text_font,\r\n text=f'Player Wins: {self.player_wins}')\r\n self.player_hand.draw(self._canvas, 10, 35)\r\n self.dealer_hand.draw(self._canvas, 10, 175)", "def makeCanvas(self, master, size, side=LEFT, img=None, full=False,\n bg=\"#363636\", text=None, font=(\"TimesNewRoman\", 13, \"bold\")):\n print \"myCanvas.makeCanvas be called\"\n print \"\\tmyCanvas.makeCanvas: check the parameter...\"\n if img:\n if repr(img)[: 32] != \"<PIL.ImageTk.PhotoImage instance\":\n print \"Img not the ImageTk instance\"\n raise \"Widgets.myCanvas.makeCanvas:parameterError\"\n if full and not img:\n print \"Full Canvas should need at least one img\"\n raise \"Widgets.myCanvas.makeCanvas:parameterError\"\n if (repr(master)[: 20] != \"<Tkinter.Tk instance\" \n and repr(master)[: 23] != \"<Tkinter.Frame instance\"):\n print \"master not the Tkinter.Tk or Frame instance\"\n print \"now the master is\", repr(master)\n raise \"Widgets.myCanvas.makeCanvas:parameterError:\"\n if not full and not size:\n print \"you should input a size like 2-list\"\n raise \"Widgets.myCanvas.makeCanvas:parameterError:\"\n print \"\\tparameter is ok\"\n canvas = Canvas(master)\n if img:\n new_img = self.updateImagesize(pic=img, size=size)\n for key in self.CanvasStatus['Draw_options'][\"images\"]:\n if self.CanvasStatus['Draw_options'][\"images\"][key] == img:\n self.CanvasStatus['Draw_options'][\"images\"][key] = new_img\n img = new_img\n x_blank = (size[0] - new_img.width()) / 2\n y_blank = (size[1] - new_img.height()) / 2\n canvas.create_image(x_blank, y_blank, image=img, anchor=NW)\n self.CanvasStatus[\"cavs_imgAlbum\"].update({str(canvas): img})\n print \"\\timg:%s be loaded\" % str(canvas)\n else:\n x_blank = int(size[0] / 2)\n y_blank = int(size[1] / 2)\n text_id = canvas.create_text(x_blank, y_blank, text=u'\\u7f3a\\u7701',\n font=(\"TimesNewRoman\", 20, \"bold\"),\n anchor=CENTER, fill=\"white\")\n if text and self.whether_text:\n x_blank = 5\n y_blank = 0\n if full: y_blank = 5\n font=(\"TimesNewRoman\", 13, \"bold\")\n canvas.create_text(x_blank, y_blank, fill=\"red\",\n text=text, font=font, anchor=NW)\n try:\n print \"\\ttext:%s be loaded\" % text\n except:\n pass\n canvas.bind('<Button-3>', self.funcs['Image2Clipboard'])\n if not full:\n canvas.config(width=size[0], height=size[1], bg=bg)\n if img:\n canvas.bind(\"<Double-Button-1>\", self.fullCanvas)\n else:\n canvas.config(width=size[0], height=size[1], bd=0)\n self.thefullCanvas = canvas\n canvas.bind(\"<Double-Button-1>\", self.cellCanvas)\n canvas.pack(side=side, fill=BOTH)\n print '\\tcanvas:', canvas, 'pack ok!'\n return canvas", "def __createWidgets(self):\n # Widget canvas, used to draw rubik's cube\n self.cv = Canvas(self.master)\n self.cv['bg'] = 'white' # Background color\n self.cv['height'] = '440' # Height of canvas\n self.cv['width'] = '560' # Width of canvas\n self.cv.place(x=0, y=0)\n self.__drawCube()", "def main():\n top = Tk()\n dim = 400\n cnv = Canvas(top, width=dim, height=dim)\n cnv.pack()\n lines = []\n for _ in range(10):\n xrd = randint(6, dim-6)\n yrd = randint(6, dim-6)\n xrd2 = randint(6, dim-6)\n yrd2 = randint(6, dim-6)\n lines.append(ShowLine(cnv, Point(xrd,yrd), Point(xrd2, yrd2)))\n for line in lines:\n line.draw()\n top.mainloop()", "def init_picker(self):\n self.current_tile = MapRenderer(self.config, parent=self.button_frame, tileset=Tileset(id=self.map.map.tileset.id))\n self.current_tile.map.blockdata = [self.paint_tile]\n self.current_tile.map.width = 1\n self.current_tile.map.height = 1\n self.current_tile.init_canvas()\n self.current_tile.draw()\n self.current_tile.canvas.grid(row=0, column=4, padx=4)\n\n if hasattr(self, 'picker'):\n self.picker.kill_canvas()\n self.picker = MapRenderer(self.config, parent=self, tileset=Tileset(id=self.map.map.tileset.id))\n self.picker.map.blockdata = range(len(self.picker.map.tileset.blocks))\n self.picker.map.width = 4\n self.picker.map.height = len(self.picker.map.blockdata) / self.picker.map.width\n self.picker.init_canvas(self.picker_frame)\n\n if hasattr(self.picker_frame, 'vbar'):\n self.picker_frame.vbar.destroy()\n self.picker_frame.vbar = Scrollbar(self.picker_frame, orient=VERTICAL)\n self.picker_frame.vbar.pack(side=RIGHT, fill=Y)\n self.picker_frame.vbar.config(command=self.picker.canvas.yview)\n\n self.picker.canvas.config(scrollregion=(0,0,self.picker.canvas_width, self.picker.canvas_height))\n self.map_frame.update()\n\n # overwriting a property is probably a bad idea\n self.picker.canvas_height = self.map_frame.winfo_height()\n\n self.picker.canvas.config(yscrollcommand=self.picker_frame.vbar.set)\n self.picker.canvas.pack(side=LEFT, expand=True)\n\n self.picker.canvas.bind('<4>', lambda event : self.scroll_picker(event))\n self.picker.canvas.bind('<5>', lambda event : self.scroll_picker(event))\n self.picker_frame.vbar.bind('<4>', lambda event : self.scroll_picker(event))\n self.picker_frame.vbar.bind('<5>', lambda event : self.scroll_picker(event))\n\n self.picker.draw()\n self.picker.canvas.bind('<Button-1>', self.pick_block)", "def makeWidgets(self):\n # globals\n global CARD_SIZE, card_images, card_back, card_sheet, pil_card_cropped, curr_card_image, xloc, d_yloc\n \n canvas.configure(background='green4') \n canvas.pack()\n # add buttons to the frame\n tk.Button(root, text='Deal', command=self.deal).pack(side=\"left\")\n tk.Button(root, text='Hit', command=self.hit).pack(side=\"left\")\n tk.Button(root, text='Stay', command=self.stay).pack(side=\"left\")\n # add label for dealer's hand\n canvas_label_d = canvas.create_text(30, (d_yloc - CARD_SIZE[1]/2), anchor=\"sw\")\n canvas.itemconfig(canvas_label_d, text=\"Dealer's hand: \")\n # add label for player's hand\n canvas_label_p = canvas.create_text(30, (p_yloc - CARD_SIZE[1]/2), anchor=\"sw\")\n canvas.itemconfig(canvas_label_p, text=\"Player's hand: \")\n # add label which updates outcome\n tk.Label(root, textvariable=self.outcome, font=('Helvetica',12), fg='white', bg='black').pack(side=\"left\")\n # add label for updating score\n canvas_label_score = canvas.create_text(CANVAS_WIDTH - 50, 30, anchor=\"sw\")\n canvas.itemconfig(canvas_label_score, text=self.score.get())", "def setup(self):\n\n # push the frame for the toplevel window\n self.lumpy.pushfr(self.tl)\n self.lumpy.col([0,1])\n\n # the frame at the top contains buttons\n self.lumpy.row([0,0,1], bg='white')\n self.lumpy.bu(text='Close', command=self.close)\n self.lumpy.bu(text='Print to file:', command=self.printfile)\n self.en = self.lumpy.en(width=10, text='lumpy.ps')\n self.en.bind('<Return>', self.printfile)\n self.la = self.lumpy.la(width=40)\n self.lumpy.endrow()\n\n # the grid contains the canvas and scrollbars\n self.lumpy.gr(2)\n \n self.ca_width = 1000\n self.ca_height = 500\n self.canvas = self.ca(self.ca_width, self.ca_height, bg='white')\n\n yb = self.lumpy.sb(command=self.canvas.yview, sticky=N+S)\n xb = self.lumpy.sb(command=self.canvas.xview, orient=HORIZONTAL,\n sticky=E+W)\n self.canvas.configure(xscrollcommand=xb.set, yscrollcommand=yb.set,\n scrollregion=(0, 0, 800, 800))\n \n self.lumpy.endgr()\n self.lumpy.endcol()\n self.lumpy.popfr()\n\n # measure some sample letters to get the text height\n # and set the scale factor for the canvas accordingly\n self.canvas.clear_transforms()\n bbox = self.canvas.measure(['bdfhklgjpqy'])\n self.unit = 1.0 * bbox.height()\n transform = ScaleTransform([self.unit, self.unit])\n self.canvas.add_transform(transform)", "def __init__(self, master):\n self.window = tk.Canvas(master, width=500, height=300)\n self.reset_button = tk.Button(master, text=\"Reset\", command=self.reset_window)\n self.start_button = tk.Button(master, text=\"Start\", command=self.start_sorting)\n self.window.pack()\n self.reset_button.pack()\n self.start_button.pack()\n self.reset_window()", "def __init__(self, container, app):\n\n super(PlotCanvas, self).__init__()\n\n self.app = app\n\n # Options\n self.x_margin = 15 # pixels\n self.y_margin = 25 # Pixels\n\n # Parent container\n self.container = container\n\n # Plots go onto a single matplotlib.figure\n self.figure = Figure(dpi=50) # TODO: dpi needed?\n self.figure.patch.set_visible(False)\n\n # These axes show the ticks and grid. No plotting done here.\n # New axes must have a label, otherwise mpl returns an existing one.\n self.axes = self.figure.add_axes([0.05, 0.05, 0.9, 0.9], label=\"base\", alpha=0.0)\n self.axes.set_aspect(1)\n self.axes.grid(True)\n\n # The canvas is the top level container (FigureCanvasQTAgg)\n self.canvas = FigureCanvas(self.figure)\n # self.canvas.setFocusPolicy(QtCore.Qt.ClickFocus)\n # self.canvas.setFocus()\n\n #self.canvas.set_hexpand(1)\n #self.canvas.set_vexpand(1)\n #self.canvas.set_can_focus(True) # For key press\n\n # Attach to parent\n #self.container.attach(self.canvas, 0, 0, 600, 400) # TODO: Height and width are num. columns??\n self.container.addWidget(self.canvas) # Qt\n\n # Copy a bitmap of the canvas for quick animation.\n # Update every time the canvas is re-drawn.\n self.background = self.canvas.copy_from_bbox(self.axes.bbox)\n\n ### Bitmap Cache\n self.cache = CanvasCache(self, self.app)\n self.cache_thread = QtCore.QThread()\n self.cache.moveToThread(self.cache_thread)\n super(PlotCanvas, self).connect(self.cache_thread, QtCore.SIGNAL(\"started()\"), self.cache.run)\n # self.connect()\n self.cache_thread.start()\n self.cache.new_screen.connect(self.on_new_screen)\n\n # Events\n self.canvas.mpl_connect('button_press_event', self.on_mouse_press)\n self.canvas.mpl_connect('button_release_event', self.on_mouse_release)\n self.canvas.mpl_connect('motion_notify_event', self.on_mouse_move)\n #self.canvas.connect('configure-event', self.auto_adjust_axes)\n self.canvas.mpl_connect('resize_event', self.auto_adjust_axes)\n #self.canvas.add_events(Gdk.EventMask.SMOOTH_SCROLL_MASK)\n #self.canvas.connect(\"scroll-event\", self.on_scroll)\n self.canvas.mpl_connect('scroll_event', self.on_scroll)\n self.canvas.mpl_connect('key_press_event', self.on_key_down)\n self.canvas.mpl_connect('key_release_event', self.on_key_up)\n self.canvas.mpl_connect('draw_event', self.on_draw)\n\n self.mouse = [0, 0]\n self.key = None\n\n self.pan_axes = []\n self.panning = False", "def setup_draw(self):\n pass", "def __init__(self, target=None, height=0, width=0):\n\t\ttkinter.Canvas.__init__(self, target, height=height, width=width)\n\t\tself.Track_Record = Track.Track_Record()\n\t\tself.draw_points() #draw points on canvas\n\t\tself.draw_canvas() #draw grids on canvas", "def clear_canvas():\r\n global _canvas\r\n if _canvas == None:\r\n raise RuntimeError(\"Canvas is not open yet.\")\r\n else:\r\n _canvas.clear()", "def StartDraw(self):\r\n self.zoom = self.test.viewZoom\r\n self.center = self.test.viewCenter\r\n self.offset = self.test.viewOffset\r\n self.screenSize = self.test.screenSize", "def show(self):\n self.driver.send(self.canvas)", "def setUI(self):\n self.parent.title(\"Handwritten digits classification\")\n self.pack(fill=BOTH, expand=1)\n self.columnconfigure(6,weight=1)\n self.rowconfigure(2, weight=1)\n self.canv = Canvas(self, bg=\"white\")\n self.canv.grid(row=2, column=0, columnspan=7,\n padx=5, pady=5,\n sticky=E + W + S + N)\n self.canv.bind(\"<B1-Motion>\",\n self.draw)\n\t\t\t\n\t\t\t\n #size_lab = Label(self, text=\"Classificator: \")\n #size_lab.grid(row=0, column=0, padx=5)\n predict_btn = Button(self, text=\"Predict\", width=10, command=lambda: self.predict())\n predict_btn.grid(row=0, column=0)\n delete_btn = Button(self, text=\"Clear\", width=10, command=lambda: self.canv.delete(\"all\"))\n delete_btn.grid(row=1, column=0, sticky=W)", "def test_circle_draw():\n with TestingCanvas():\n ellipse = visuals.Ellipse(pos=(0.5, 0.3, 0), radius=0.4,\n color=(1, 0, 0, 1))\n ellipse.draw()\n assert_image_equal(\"screenshot\", 'visuals/circle1.png')\n\n gloo.clear()\n ellipse = visuals.Ellipse(pos=(0.5, 0.3, 0), radius=0.4,\n color=(1, 0, 0, 1),\n border_color=(0, 1, 1, 1))\n ellipse.draw()\n assert_image_equal(\"screenshot\", 'visuals/circle2.png')\n\n gloo.clear()\n ellipse = visuals.Ellipse(pos=(0.5, 0.3, 0), radius=0.4,\n border_color=(0, 1, 1, 1))\n ellipse.draw()\n assert_image_equal(\"screenshot\", 'visuals/circle3.png')", "def open_canvas(width, height):\r\n global _canvas\r\n if _canvas != None:\r\n raise RuntimeError(\"Canvas is already open.\")\r\n _canvas = Canvas(width, height)", "def clear_canvas():\n self.parent_class.canvas.delete(\"all\")", "def draw(self, canvas):\n canvas.delete(\"all\")\n width = canvas.winfo_reqwidth()\n height = canvas.winfo_reqheight()\n\n image = ImageTk.PhotoImage(self.image())\n canvas.create_image(width/2, height/2, image=image)\n canvas.img = image", "def __createCanvas(self):\r\n # create a canvas and pass a figure to it\r\n self.figure = plt.figure()\r\n self.canvas = FigureCanvas(self.figure)\r\n\r\n # create an axis\r\n self.canvas.axes = self.figure.add_subplot(1, 1, 1) # 1X1 grid, 1st subplot\r\n self.canvas.axes.set_title(\"Plot\")\r\n\r\n # create Navigation widget and pass a Canvas widget and the parent\r\n self.toolbar = NavigationToolbar(self.canvas, self)", "def init_canvas_frame(self, max_width=4000, max_height=4000):\n self.frames[\"canvas\"] = Frame(\n master=self.window, width=400, height=400)\n self.canvas = Canvas(\n master=self.frames[\"canvas\"],\n scrollregion=(0, 0, max_width, max_height),\n bg=\"white\")\n h_scrl_bar = Scrollbar(self.frames[\"canvas\"], orient=HORIZONTAL)\n h_scrl_bar.pack(side=BOTTOM, fill=X)\n h_scrl_bar.config(command=self.canvas.xview)\n v_scrl_bar = Scrollbar(self.frames[\"canvas\"], orient=VERTICAL)\n v_scrl_bar.pack(side=RIGHT, fill=Y)\n v_scrl_bar.config(command=self.canvas.yview)\n self.canvas.config(\n xscrollcommand=h_scrl_bar.set,\n yscrollcommand=v_scrl_bar.set)\n self.canvas.pack(side=LEFT, expand=True, fill=BOTH)\n self.frames[\"canvas\"].pack(\n anchor=\"nw\", side=LEFT, expand=True, fill=BOTH)\n\n self.canvas.bind(\"<ButtonPress-1>\", self.move_start)\n self.canvas.bind(\"<B1-Motion>\", self.move_move)\n self.canvas.bind(\"<Button-4>\", self.linux_zoomer_plus)\n self.canvas.bind(\"<Button-5>\", self.linux_zoomer_minus)\n # windows scroll\n self.canvas.bind(\"<MouseWheel>\", self.windows_zoomer)", "def set_canvas(self):\n self.ui.figure = plt.figure(figsize=(10, 10))\n self.ui.figure.patch.set_facecolor('None')\n self.ui.canvas = FigureCanvas(self.ui.figure)\n self.ui.canvas.setStyleSheet('background-color:transparent;')\n # Matplotlib toolbar\n self.ui.toolbar = NavigationToolbar(self.ui.canvas, self)\n self.ui.toolbar.setMaximumHeight(30)\n self.ui.figureLayout.addWidget(self.ui.toolbar)\n self.ui.figureLayout.addWidget(self.ui.canvas)\n self.ui.canvas.mpl_connect('button_press_event', self.onclick)\n self.ui.canvas.mpl_connect('pick_event', self.onclick_pick)", "def _configure_canvas(event):\n if self.internal_frame.winfo_reqwidth() != self.canvas.winfo_width():\n## print \"frame\",self.internal_frame.winfo_reqwidth()\n## print \"canvas\",self.canvas.winfo_width()\n # update the inner frame's width to fill the canvas\n## self.canvas.itemconfigure(interior_id, width=self.canvas.winfo_width())\n self.canvas.config(width=self.internal_frame.winfo_reqwidth())\n if self.internal_frame.winfo_reqheight() != self.canvas.winfo_height():\n # update the inner frame's width to fill the canvas\n## self.canvas.itemconfigure(interior_id, width=self.canvas.winfo_width())\n self.canvas.config(height=self.internal_frame.winfo_reqheight())", "def init_window(self, game, width, height, scale):\n self.controller = game\n self.window.geometry(\"{0}x{1}\".format((width * scale)+5, (height * scale)+5))\n self.window.resizable(False, False)\n\n self.canvas = tk.Canvas(self.window, width=width * scale, height=height * scale)\n self.canvas.grid(row=0, column=0, sticky=\"nesw\")\n\n self.draw_grid(width, height, scale)\n\n self.window.bind(\"<Button-1>\", lambda a: game.toggle_onclick(a))\n self.window.bind(\"<B1-Motion>\", lambda a: game.toggle_onclick(a))\n self.window.bind(\"<space>\", lambda a: game.toggle_pause())\n self.window.bind(\"<Return>\", lambda a: game.do_step())\n self.window.bind(\"<BackSpace>\", lambda a: game.reset())\n self.set_menu()", "def paint(self):\n self.paint_snake()\n self.paint_apple()\n root.mainloop()", "def draw_glycan_in_canvas(self, canvas, tree, root, names, h = 100., w = 100.):\n fig = mpl.figure.Figure(figsize=(h/self.dpi, w/self.dpi))\n ax = fig.add_subplot(111)\n \n self.myDrawer.draw_tree(tree, root, names, root_pos = [0, 0], direction = 1, ax = ax, axis = 0)\n ax.axis('equal')\n ax.axis('off')\n ax.set_ylim((-1, 6))\n ax.set_xlim((-3, 3))\n\n # Add to tk window\n figure_canvas_agg = FigureCanvasAgg(fig)\n figure_canvas_agg.draw()\n figure_x, figure_y, figure_w, figure_h = fig.bbox.bounds\n figure_w, figure_h = int(figure_w), int(figure_h)\n glycan_image = tk.PhotoImage(master = canvas, width=figure_w, height=figure_h)\n canvas.create_image(figure_w/2, figure_h/2, image = glycan_image)\n tkagg.blit(glycan_image, figure_canvas_agg.get_renderer()._renderer, colormode=2)\n return glycan_image", "def __init__(self, frame, width, height):\n \n self.canvas = Tkinter.Canvas(frame, width = int(width), \n height = int(height))\n self.canvas.pack(side = CANVAS[\"POSITION\"])\n self.canvas.configure(background = check_color(CANVAS[\"BACKGROUND_COLOR\"]))", "def __init__(self):\n self.root = Tk()\n self.root.title(\"Brick Breaker\")\n self.root.geometry(\"800x600\")\n self.root.maxsize(800, 600)\n self.root.minsize(800, 600)\n self.root.iconbitmap(\"data/wall.ico\")\n self.root.config(background=\"#000000\")\n self.score = 0\n self.life = 3\n self.canevas = Canvas(self.root, bg='light blue', highlightthickness=0)\n self.paddle = Paddle(self)\n self.ball = Ball(self)\n self.brick = Brick(self)\n self.create_score()\n self.window = Window\n self.end = False\n self.canevas.pack(fill=BOTH, expand=YES)", "def draw_twitter_canvas(self):\n\n self.TWITTER_WIDTH = 100 \n \n self.twitter_canvas= Canvas(self, width=self.TWITTER_WIDTH)\n self.twitter_scrollbar = ttk.Scrollbar(self, orient= VERTICAL,\n command=self.twitter_canvas.yview) \n self.twitter_canvas.configure(yscrollcommand=self.twitter_scrollbar.set)\n \n # make sure to add scrollbar before adding the canvas\n self.twitter_scrollbar.pack(side=RIGHT, fill=Y)\n self.twitter_canvas.pack(side=RIGHT, fill=Y)\n \n # adding a frame to hold all the widgets, ttk Frame doesn't support\n # background config option \n self.twitter_frame = Frame(self.twitter_canvas)\n self.twitter_canvas.create_window(0,0,window=self.twitter_frame,\n anchor='nw', width=self.TWITTER_WIDTH)\n\n \n # -- adding the twitter logo\n twitter_logo = PhotoImage(file=self.directory + '/images/twitter_icon.png')\n twitter_logo = twitter_logo.subsample(30, 30) \n\n # -- adding image to label\n twitter_label = ttk.Label(self.twitter_frame, image = twitter_logo)\n\n twitter_label.image = twitter_logo \n\n twitter_label.pack(side=TOP, pady=10)", "def display(self, canvas, x, y, width, height):\n # Do we need this?\n pass", "def _draw(self, canvas, options):\n pass # must override in subclass", "def _drawOnCanvas(self):\n self.canvas=np.ones(self.canvas.shape,dtype=np.uint8)*255\n for key in self.elements:\n graphElement=self.elements[key]\n graphElement.draw(self.canvas)\n self.sync=True", "def refresh_canvas(self):\n self.canvas.delete('all')\n self.draw_handler(self)\n self.canvas.after(CANVAS[\"REFRESH_TIME\"], self.refresh_canvas)", "def init_clear_canvas_button(self):\n def clear_canvas():\n \"\"\" Function to clear the canvas\"\"\"\n self.parent_class.canvas.delete(\"all\")\n\n self.buttons[\"btn_clear_canvas\"] = Button(\n self.frame, width=14, text=\"Clear\", command=clear_canvas)\n self.buttons[\"btn_clear_canvas\"].grid(row=3, column=1)", "def start(rows,cols):\n global delay\n delay = 1 #msec\n global letterIndex\n letterIndex = 0\n \n shapes = list('LTIZ') * 3000\n import random\n random.shuffle(shapes)\n\n root_window = Tk()\n pack_sim = PackingSimulation(rows, cols, shapes, root_window)\n\n # pack_sim.draw_square(0,0,'blue') #comment these out after you test\n # pack_sim.draw_square(2,2,'blue')\n\n #example test draw_shape; comment out before testing run\n #pack_sim.draw_shape(Shape('L', ((False, False, True),(True, True, True)), 'orange'))\n\n #test run\n grid = Grid(rows, cols, [])\n pack_sim.master.after(100, pack_sim.run, grid) \n root_window.mainloop()", "def __init__(self, master, async = 1, kw = {}, **opts):\n if not opts.has_key('bg'): opts['bg'] =\"white\"\n if not opts.has_key('highlightthickness'): opts['highlightthickness'] = 0\n ScrolledCanvas.__init__(self, master, kw, **opts)\n self.plusicon = self.minusicon = None\n self.nodeheight = 20\n self.sizetree = 0\n self.node = None\n self.first = None\n self.y = 0\n self.selection = []\n self.displayed = []\n self.async = async\n self.cancel_draw = None\n self[\"width\"] = 280\n self[\"height\"] = 200\n \n # There must be a better way to register Resize Event...\n self.bind(\"<Configure>\", self._resized)", "def __init__( self, width = 128, height = 128, *args, **kwargs ):\n ### ZIH - works with old Tk, not with ttk\n # part of this might be because ttk doesn't use fg/bg?\n #ttk.Frame.__init__( self, *args, **kwargs )\n tk.Tk.__init__( self, *args, **kwargs )\n self.config( padx = 0, pady = 0 )\n self.canvas = tk.Canvas(\n self,\n width = width,\n height = height,\n bg = 'black'\n )\n self.canvas.pack( ipadx = 0, ipady = 0, padx = 0, pady = 0 )\n self.raster = tk.PhotoImage( width = width, height = height )\n self.canvas.create_image(\n ### ZIH - why do i need this 2 pixel offset to position the\n # image? there also seems to be a superfluous 2 pixel padding\n # around the canvas\n #( ( ( width >> 1 ) + 2 ), ( ( height >> 1 ) + 2 ) ),\n ( 2, 2 ),\n anchor = tk.NW,\n image = self.raster,\n #state = 'normal' ### see if tk.NORMAL works\n state = tk.NORMAL\n )", "def display(self, canvas, x, y, width, height):\n pass", "def show(self):\n root = tkinter.Tk()\n root.title(self.name + ' in the Maze')\n canvas = tkinter.Canvas(root, background='light green',\n width=self.unit_size * self.maze_size,\n height=self.unit_size * self.maze_size)\n canvas.grid()\n\n # draw a representation of the robot in the maze\n if self.battery:\n upper_x = self.column * self.unit_size + self.unit_size / 4\n upper_y = self.row * self.unit_size\n lower_x = upper_x + self.unit_size / 2\n lower_y = upper_y + self.unit_size\n eye_x = lower_x - 3 * self.unit_size / 20\n eye_y = upper_y + self.unit_size / 10\n\n else: # the robot ran out of battery\n upper_x = self.column * self.unit_size\n upper_y = self.row * self.unit_size + self.unit_size / 2\n lower_x = upper_x + self.unit_size\n lower_y = upper_y + self.unit_size / 2\n eye_x = lower_x - 9 * self.unit_size / 10\n eye_y = lower_y - 3 * self.unit_size / 20\n\n rectangle = canvas.create_rectangle(upper_x,\n upper_y,\n lower_x,\n lower_y,\n fill=self.color)\n # draw the robot's eyes\n canvas.create_oval(upper_x + self.unit_size / 10,\n upper_y + self.unit_size / 10,\n upper_x + 3 * self.unit_size / 20,\n upper_y + 3 * self.unit_size / 20,\n fill='black')\n canvas.create_oval(eye_x,\n eye_y,\n eye_x + self.unit_size / 20,\n eye_y + self.unit_size / 20,\n fill='black')\n # draw the obstacles in the maze\n for row in range(self.maze_size):\n for col in range(self.maze_size):\n if not self.maze[row][col]:\n canvas.create_rectangle(col * self.unit_size,\n row * self.unit_size,\n (col + 1) * self.unit_size,\n (row + 1) * self.unit_size,\n fill='red')\n for row in range(self.maze_size):\n canvas.create_line(0,\n row * self.unit_size,\n self.maze_size * self.unit_size,\n row * self.unit_size)\n for col in range(self.maze_size):\n canvas.create_line(col * self.unit_size,\n 0,\n col * self.unit_size,\n self.maze_size * self.unit_size)\n root.mainloop()", "def __init__(self, master):\n\t\tFrame.__init__(self,master)\n\t\t\"\"\"Set the Window Title\"\"\"\n\t\tself.master.title(\"RXF Data Fit\")\n\t\tself.configure(height=200,width=200)\n\t\t\"\"\"Display the main window with a little bit of padding\"\"\"\n\t\tself.grid(padx=15, pady=15,sticky=N+S+E+W) \n\t\t#Create the Menu base\n\t\tself.menu = Menu(self)\n\t\t#Add the Menu\n\t\tself.master.config(menu=self.menu)\n\t\tself.menu.add_command(label=\"Open\", command=self.fileOpen)\n\t\tself.menu.add_command(label=\"Help\", command=self.Simple)\n\t\tself.menu.add_command(label=\"Quit\", command=self.exitProgram)\n\t\tself.pack()\n\t\tf = Figure(figsize=(5,4), dpi=100)\n\t\tcanvas=FigureCanvasTkAgg(f,master=root)\n\t\tcanvas.show()\n\t\tcanvas.get_tk_widget().pack(side=\"top\", fill=\"both\", expand=1)\n\t\ttoolbar = NavigationToolbar2TkAgg( canvas, root )\n\t\ttoolbar.update()\n\t\tcanvas._tkcanvas.pack(side=\"top\", fill=\"both\", expand=1)\t\t\n\n\n\t\txRangeLabel=Label(root,text=\"X Range\")\n\t\txRangeLabel.pack()\t\t\n\t\n\t\treplotButton=Button(root, text=\"Replot\", command=self.replot)\n\t\treplotButton.pack()\n\t\n\t\tclearButton=Button(root,text=\"Clear Plot\", command=self.clearPlot)\n\t\tclearButton.pack(padx=20,pady=5)", "def __init__(self, min_height=600, min_width=600):\n self.window = Tk()\n # set minimum size to which the window can be reduced\n self.window.minsize(min_width, min_height)\n self.canvas = None\n self.frames = {\n \"parameters\": None,\n \"canvas\": None\n }\n self.menubar = {\n \"menubar\": None,\n \"helpmenu\": None,\n \"filemenu\": None,\n \"editmenu\": None\n }\n self.combo_box = {\n \"class\": None,\n \"variable\": None\n }\n self.init_canvas_frame()\n self.init_parameters_frame()\n # self.init_menu_bar()\n self.classes = {\n \"parameters\": Parameters(self),\n \"fractal\": FastFractal(self)\n }\n self.init_parameter_combobox()", "def invalidate_canvas(self):\n\n if self.window:\n x, y, w, h = self.get_allocation()\n self.window.invalidate_rect((0,0,w,h), False)\n self.cr = self.window.cairo_create()\n self.cr.update_layout(self.pg)", "def mock_dear_py_gui():\n def _gui_thread(self):\n while not self.stop:\n _ = self.process_data.get()\n\n BaseRealTimeVisualizer._gui_thread = _gui_thread\n BaseRealTimeVisualizer.should_close = lambda self: False", "def refresh_self(self) -> None:\n self._logger.debug(\"running\")\n try:\n self.figure.canvas.draw()\n except Exception as e:\n self._logger.exception(\"issue with drawing canvas.\")\n self._logger.debug(\"done\")", "def Draw(self, images={}, texts=None, side=TOP, size=None, full=None, \\\n funcs=None):\n print \"Widgets.myCanvas.Draw be called\"\n print '\\tmyCanvas.Draw: check the parameter...'\n if size:\n if type(size) != list or len(size) != 2:\n raise(\"Widgets.myCanvas.Draw:parameterError:\"\n \"size not a 2-list\")\n if images:\n if type(images) != dict:\n print \"images should be a list or None\"\n print \"Now the images is %s\" % repr(images)\n raise \"Widgets.myCanvas.Draw:parameterError\"\n for image in images:\n if repr(images[image])[: 32] != '<PIL.ImageTk.PhotoImage instance':\n print \"the instance in images should be\",\n print \"PIL.ImageTk.PhotoImage instance\"\n raise 'Widgets.myCanvas.Draw:parameterError:'\n for eachkey in images.keys():\n if eachkey.lower() not in ['a1', 'b1', 'a2', 'b2', 'full']:\n print \"images's key should be in ['a1', 'b1', 'a2', 'b2', 'full']\"\n raise 'Widgets.myCanvas.Draw:parameterError:'\n\n print \"\\tmyCanvas.Draw: try to make frame...\"\n if funcs: self.funcs = funcs\n self.updateImagesize = self.funcs[\"updateImagesize\"]\n master = self.master\n if texts:\n self.funcs[\"drawText\"](texts)\n if not size: \n size = [master.winfo_width()-7, master.winfo_height()-28]\n self.win_size = size\n if images: self.CanvasStatus['Draw_options']['images'] = images\n self.CanvasStatus['Draw_options'].update({\"side\": side, \"size\": size, \"full\": full, \\\n 'texts': texts})\n self.whether_full = full\n self.CanvasStatus[\"whether_full\"] = self.whether_full\n\n if \"frame1\" in dir(self): self.frame1.pack_forget()\n if \"frame2\" in dir(self): self.frame2.pack_forget()\n if \"frame_full\" in dir(self): self.frame_full.pack_forget()\n self.CanvasStatus[\"cavs_imgAlbum\"] = {}\n\n if not full:\n frame_size = [size[0], (size[1]-50) / 2]\n frames = {'1': '', '2': ''}\n for each_frame in frames:\n frames[each_frame] = self.makeFrame(master=master, size=frame_size)\n self.frame1 = frames['1']\n self.frame2 = frames['2']\n \n print \"\\tMyCanvas.Draw: try to make canvas...\"\n cell_size = [size[0] / 2, size[1] / 2]\n self.cell_size = cell_size\n canvases = {\"a1\": '', \"b1\": '', \"a2\": '', \"b2\": ''}\n for each_grid in canvases:\n if each_grid[1] == '1':\n the_master = frames[each_grid[1]]\n the_image = None\n the_text = None\n if each_grid in images: the_image = images[each_grid]\n if each_grid in texts: the_text = texts[each_grid]\n canvases[each_grid] = self.makeCanvas(\n master=the_master, size=cell_size, img=the_image, text=the_text\n )\n elif each_grid[1] == '2':\n the_master = frames[each_grid[1]]\n the_image = None\n the_text = None\n if each_grid in images: the_image = images[each_grid]\n if each_grid in texts: the_text = texts[each_grid]\n canvases[each_grid] = self.makeCanvas(\n master=the_master, size=cell_size, img=the_image, text=the_text\n )\n self.CanvasStatus.update({'canvases': canvases})\n elif full:\n if \"full\" in images and images[\"full\"]: pic = images[\"full\"]\n else:\n for img in images:\n if images[img]:\n pic = images[img]\n break\n text = None\n if \"full\" in texts:\n if texts[\"full\"]: text = texts[\"full\"]\n self.fullCanvas(img=pic, text=text)", "def main():\n PanelDemo().mainloop()", "def get_ticker_canvas(self, asset):\n # Generate a fresh canvas\n canvas = self.matrix.CreateFrameCanvas()\n canvas.Clear()\n\n # Create fonts for displaying prices\n font_symbol = graphics.Font()\n font_symbol.LoadFont('fonts/7x13.bdf')\n\n font_price = graphics.Font()\n font_price.LoadFont('fonts/6x12.bdf')\n\n font_change = graphics.Font()\n font_change.LoadFont('fonts/6x10.bdf')\n\n # To right align, we have to calculate the width of the text\n change_width = sum(\n [font_change.CharacterWidth(ord(c)) for c in asset['change_24h']]\n )\n change_x = 62 - change_width\n\n # Get colors\n main_color = graphics.Color(255, 255, 0)\n change_color = (\n graphics.Color(194, 24, 7)\n if asset['change_24h'].startswith('-')\n else graphics.Color(46, 139, 87)\n )\n\n # Load a smaller font to andle 6-figure asset prices\n if len(asset['price']) > 10:\n font_price.LoadFont('fonts/5x8.bdf')\n\n # Draw the elements on the canvas\n graphics.DrawText(canvas, font_symbol, 3, 12, main_color, asset['symbol'])\n graphics.DrawText(canvas, font_price, 3, 28, main_color, asset['price'])\n graphics.DrawText(\n canvas, font_change, change_x, 10, change_color, asset['change_24h']\n )\n\n return canvas", "def __init__(self, parent_frame, plt_props=None):\n tk.Frame.__init__(self, master=parent_frame)\n if self.matplotlib_ready():\n \"\"\" the import statements are scoped so make new ones\"\"\"\n import matplotlib\n import matplotlib.pyplot as plt\n from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\n\n\n self.figure_bed = plt.figure(figsize=(7, 3.5))\n self.axis = self.figure_bed.add_subplot(111)\n\n if plt_props:\n for key, value in plt_props.iteritems():\n eval(\"plt.\" + key + \"(\" + value + \")\")\n # self.axis.set_axis_bgcolor('red')\n self.figure_bed.set_facecolor('white')\n self.canvas = FigureCanvasTkAgg(self.figure_bed, master=self)\n self.canvas._tkcanvas.config(highlightthickness=0)\n self.canvas.draw()\n self.canvas.get_tk_widget().pack(side='top')\n\n # self.make_matplotlib_area(parent, plt_props)\n self.embed_matplotlib()\n self.type = 'matplotlib'\n # TODO ADD TO THIS\n else:\n graph = tk.Canvas(master=self)\n graph.pack(side='left', expand=True, fill=tk.BOTH)\n self.type = 'canvas'", "def init_canvas(self, dirDicom):\n\n # Generate a widget that will house the canvas\n self.canvasMain = QWidget(self)\n self.canvasMain.move(0, 0)\n self.canvasMain.resize(770, 770)\n\n # Initialize the file names\n if not os.path.isdir(dirDicom):\n raise NotADirectoryError(f'Invalid directory: {dirDicom}')\n self.canvasMain.dirDicom = dirDicom\n\n # Create a box that acts as the container for the canvas\n self.canvasMain = DICOMcanvas(self.canvasMain)\n\n # Change focus to the widget\n self.canvasMain.setFocus()", "def configure_canvas(self):\r\n self.window.update_idletasks() # this updates window size\r\n\r\n border = 10\r\n self.canvas.config(\r\n width=self.window.winfo_reqwidth() + border,\r\n height=min(350, self.window.winfo_reqheight() + border,))\r\n self.canvas.configure(scrollregion=(\r\n 0, 0,\r\n self.window.winfo_reqwidth() + border,\r\n self.window.winfo_reqheight() + border))", "def __init__(self, parent, *args, **kwargs):\n tk.LabelFrame.__init__(self, parent, *args, **kwargs)\n self.canvas = MainCanvas(self, bg=\"orange\")\n self.canvas.pack(side='top', fill='both', expand=True)", "def initialize_graphics(self):\n self.renderer = vtk.vtkRenderer()\n self.window = vtk.vtkRenderWindow()\n self.window.AddRenderer(self.renderer)\n self.renderer.SetBackground(1.0, 1.0, 1.0)\n self.window.SetSize(1000, 1000)\n\n # Create a trackball interacter to transoform the geometry using the mouse.\n self.interactor = vtk.vtkRenderWindowInteractor()\n self.interactor.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera())\n self.interactor.SetRenderWindow(self.window)\n\n style = ClickInteractorStyle(self)\n self.interactor.SetInteractorStyle(style)\n style.SetCurrentRenderer(self.renderer)", "def __init__(self):\n self.figure = plt.figure()\n FigureCanvas.__init__(self, self.figure)\n self.figure.patch.set_facecolor('blue')\n self.figure.patch.set_alpha(0.0)\n self.pv_monitor = controls.PvMonitors.get_instance()", "def __init__(self, parent, maze_width, maze_height, scale=20):\n self.parent = parent\n self.parent.title(\"Maze Exploration Visualization\")\n\n self.maze_width = maze_width\n self.maze_height = maze_height\n self.scale = scale\n\n # Compute actual width and height\n self.width = maze_width * scale\n self.height = maze_height * scale\n\n # Store tkinter object\n self.frame = tkinter.Frame(self.parent,\n width=self.width,\n height=self.height,\n highlightthickness=1,\n highlightbackground=\"black\")\n self.canvas = tkinter.Canvas(self.frame,\n width=self.width, \n height=self.height)\n self.canvas.pack(expand=False)\n self.frame.pack(expand=False)\n\n # Initialize look of grid\n self.draw_gray_grid()\n\n self.person = None\n self.draw_person(self.maze_width // 2, self.maze_height // 2)", "def _draw_widget(self, *args) -> None:\n del args\n\n if self.canvas is None:\n return\n\n # TODO: allow user to set rotation/scale origin\n center = center_of_points_list(self.points)\n self.canvas.clear()\n\n with self.canvas:\n Color(*self.color)\n Scale(self.scale, origin=center)\n Rotate(angle=self.rotation, origin=center)\n KivyPoint(points=self.points,\n pointsize=self.pointsize)", "def display_object_on_canvas(self, tk_object, x, y):\n button1_window = self.canvas.create_window(\n x,\n y,\n anchor='nw',\n window=tk_object)", "def display_object_on_canvas(self, tk_object, x, y):\n button1_window = self.canvas.create_window(\n x,\n y,\n anchor='nw',\n window=tk_object)", "def display_object_on_canvas(self, tk_object, x, y):\n button1_window = self.canvas.create_window(\n x,\n y,\n anchor='nw',\n window=tk_object)", "def __init__(self, boss=None):\n Canvas.__init__(self, boss, width=SIZE, height=SIZE, bg=DEATH)\n self.make_grid()\n self.bind(\"<Button-1>\", self.draw)\n self.flag = 0\n # chart keeps track of deaths/births\n self.chart = {}\n for coord in INDICES:\n self.chart[coord] = DEAD\n self.state = StringVar() # these two variables are just for display\n self.state.set(\"AT REST\") # purposes later", "def draw(self):\n\n # Use update instead of update_idletasks because it works better\n # on some Windows machines.\n self.root.update()", "def startGUI(self):\n #cria uma nova janela chamada root com titulo\n self.root = Tk()\n self.root.title(\"Kalman Graphics\")\n\n #configura um frame na janela root\n mainframe = ttk.Frame(self.root, padding=\"0 0 0 0\")\n mainframe.grid(column=0, row=0, sticky=(N, W, E, S))\n mainframe.columnconfigure(0, weight=0)\n mainframe.rowconfigure(0, weight=0)\n\n #Acrescentando um plot\n self.initPlot()\n self.canvas1 = FigureCanvasTkAgg(self.myfig1, master=mainframe)\n self.canvas1.get_tk_widget().grid(column=1,row=1)\n \n #define variaveis que estarao na janela\n self.receivedMessage = StringVar(self.root)\n\n #define um label\n messageLabel = ttk.Label(mainframe, textvariable=self.receivedMessage)\n messageLabel.grid(column=1, row=2, sticky=(W, E))\n\n #para cada uma das janelas ou filhos do mainframe eu coloco um padding ao redor\n for child in mainframe.winfo_children():\n child.grid_configure(padx=0, pady=0)\n\n #schedule de uma funcao a cada 25ms\n self.root.after(10, self.processIncoming)\n\n #loop principal\n self.root.mainloop()", "def TestAnimation(self,event=None):\n wx.GetApp().Yield(True)\n Range = (-10,10)\n self.Range = Range\n\n self.UnBindAllMouseEvents()\n Canvas = self.Canvas\n Canvas.InitAll()\n\n ## Random tests of everything:\n colors = self.colors\n # Rectangles\n for i in range(3):\n xy = (random.uniform(Range[0],Range[1]), random.uniform(Range[0],Range[1]))\n lw = random.randint(1,5)\n cf = random.randint(0,len(colors)-1)\n wh = (random.randint(1,5), random.randint(1,5) )\n Canvas.AddRectangle(xy, wh, LineWidth = lw, FillColor = colors[cf])\n\n # Ellipses\n for i in range(3):\n xy = (random.uniform(Range[0],Range[1]), random.uniform(Range[0],Range[1]))\n lw = random.randint(1,5)\n cf = random.randint(0,len(colors)-1)\n wh = (random.randint(1,5), random.randint(1,5) )\n Canvas.AddEllipse(xy, wh, LineWidth = lw, FillColor = colors[cf])\n\n # Circles\n for i in range(5):\n xy = (random.uniform(Range[0],Range[1]),random.uniform(Range[0],Range[1]))\n D = random.randint(1,5)\n lw = random.randint(1,5)\n cf = random.randint(0,len(colors)-1)\n cl = random.randint(0,len(colors)-1)\n Canvas.AddCircle(xy, D, LineWidth = lw, LineColor = colors[cl], FillColor = colors[cf])\n Canvas.AddText(\"Circle # %i\"%(i), xy, Size = 12, BackgroundColor = None, Position = \"cc\")\n\n # Lines\n for i in range(5):\n points = []\n for j in range(random.randint(2,10)):\n point = (random.randint(Range[0],Range[1]),random.randint(Range[0],Range[1]))\n points.append(point)\n lw = random.randint(1,10)\n cf = random.randint(0,len(colors)-1)\n cl = random.randint(0,len(colors)-1)\n Canvas.AddLine(points, LineWidth = lw, LineColor = colors[cl])\n\n # Polygons\n for i in range(3):\n points = []\n for j in range(random.randint(2,6)):\n point = (random.uniform(Range[0],Range[1]),random.uniform(Range[0],Range[1]))\n points.append(point)\n lw = random.randint(1,6)\n cf = random.randint(0,len(colors)-1)\n cl = random.randint(0,len(colors)-1)\n Canvas.AddPolygon(points,\n LineWidth = lw,\n LineColor = colors[cl],\n FillColor = colors[cf],\n FillStyle = 'Solid')\n\n # Scaled Text\n String = \"Scaled text\"\n for i in range(3):\n ts = random.random()*3 + 0.2\n cf = random.randint(0,len(colors)-1)\n xy = (random.uniform(Range[0],Range[1]),random.uniform(Range[0],Range[1]))\n Canvas.AddScaledText(String, xy, Size = ts, Color = colors[cf], Position = \"cc\")\n\n\n # Now the Foreground Object:\n C = Canvas.AddCircle((0,0), 7, LineWidth = 2,LineColor = \"Black\",FillColor = \"Red\", InForeground = True)\n T = Canvas.AddScaledText(\"Click to Move\", (0,0), Size = 0.6, Position = 'cc', InForeground = True)\n C.Bind(FloatCanvas.EVT_FC_LEFT_DOWN, self.MoveMe)\n C.Text = T\n\n self.Timer = wx.PyTimer(self.ShowFrame)\n self.FrameDelay = 50 # milliseconds\n\n Canvas.ZoomToBB()", "def __init__(self):\n\n # Width and height of the window, in pixels.\n self.width = 800\n self.height = 600\n width = self.width\n height = self.height\n\n # Create the root window.\n self.root = tkinter.Tk()\n root = self.root\n\n #\n # Buttons etc.\n #\n controls = tkinter.Frame(root)\n controls.pack(side=tkinter.TOP, fill='x')\n\n build = tkinter.Button(controls, text='Build new maze')\n build.pack(side=tkinter.LEFT)\n\n reset = tkinter.Button(controls, text='Reset maze')\n reset.pack(side=tkinter.LEFT)\n\n solve = tkinter.Button(controls, text='Solve maze')\n solve.pack(side=tkinter.LEFT)\n\n # maze_type: 0 = prim, 1 = random.\n maze_type = tkinter.IntVar()\n prim = tkinter.Radiobutton(controls, text='Prim', variable=maze_type, \n value=0)\n prim.pack(side=tkinter.LEFT)\n rand = tkinter.Radiobutton(controls, text='Random', variable=maze_type,\n value=1)\n rand.pack(side=tkinter.LEFT)\n prim.select()\n\n def lbl_entry(lbl, v):\n l = tkinter.Label(controls, text=\"{}: \".format(lbl))\n l.pack(side=tkinter.LEFT)\n e = tkinter.Entry(controls, textvariable=v, width=5)\n e.pack(side=tkinter.LEFT)\n\n # Maze size\n nrows_var = tkinter.StringVar()\n lbl_entry('Rows', nrows_var)\n ncols_var = tkinter.StringVar()\n lbl_entry('Columns', ncols_var)\n nrows_var.set('30')\n ncols_var.set('50')\n\n # Sparseness\n sparse = tkinter.StringVar()\n lbl_entry('Sparseness', sparse)\n sparse.set('.05')\n\n # Delay\n delay = tkinter.StringVar()\n lbl_entry('Draw delay (s)', delay)\n delay.set('0.0')\n\n #\n # Canvas in which to display the maze.\n #\n self.cvs = tkinter.Canvas(width=width, height=height)\n cvs = self.cvs\n cvs.pack(side=tkinter.TOP, expand=True, fill='both')\n\n # Build callback\n def build_act():\n nrows = int(nrows_var.get())\n ncols = int(ncols_var.get())\n sparseness = float(sparse.get())\n self.maze = self.build_fn(nrows, ncols, sparseness)\n self.display_maze()\n build.configure(command=build_act)\n\n # Reset callback\n def reset_act():\n self.display_maze()\n reset.configure(command=reset_act)\n\n\n # Solve callback\n def solve_act():\n self.solve_maze(float(delay.get()))\n\n solve.configure(command=solve_act)\n\n # Prim callback\n def prim_act():\n self.build_fn = get_prebuilt_maze_instance\n \"\"\"\n nrows = int(nrows_var.get())\n ncols = int(ncols_var.get())\n sparseness = float(sparse.get())\n self.maze = get_prebuilt_maze_instance(nrows, ncols, sparseness)\n self.display_maze()\n \"\"\"\n prim.configure(command=prim_act)\n\n # Random callback\n def random_act():\n self.build_fn = get_random_maze_instance\n \"\"\"\n nrows = int(nrows_var.get())\n ncols = int(ncols_var.get())\n sparseness = float(sparse.get())\n self.maze = get_random_maze_instance(nrows, ncols, sparseness)\n self.display_maze()\n \"\"\"\n rand.configure(command=random_act)\n\n prim.invoke()\n\n root.mainloop()\n\n return", "def create_widgets(self):\n self.pack(fill=tk.BOTH, expand=True)\n self.top_frame = tk.Frame(self)\n self.top_frame.pack(fill=tk.X, expand=False)\n\n # Create obstacle button\n self.create_obstacle_button = tk.Button(\n self.top_frame,\n text=self.OBSTACLE_CREATION_INACTIVE_LABEL,\n command=self._toggle_creation_mode_cb\n )\n self.create_obstacle_button.pack(side=tk.LEFT)\n\n # Load button\n self.load_button = tk.Button(\n self.top_frame,\n text=self.LOAD_BUTTON_LABEL,\n command=self._load_button_cb\n )\n self.load_button.pack(side=tk.LEFT)\n\n # Export button\n export_button = tk.Button(\n self.top_frame,\n text=self.EXPORT_BUTTON_LABEL,\n command=self._export_button_cb\n )\n export_button.pack(side=tk.RIGHT)\n\n # Main canvas\n self.canvas = tk.Canvas(self, background='white')\n self.canvas.config(width=self.CANVAS_WIDTH, height=self.CANVAS_HEIGHT)\n self.canvas.bind('<ButtonRelease-1>', self._draw_line)\n self.canvas.pack(fill=tk.BOTH, expand=True)\n self.canvas.focus_set()", "def draw(self):\n ui.clear()\n ui.draw_board(self)\n ui.output_buffer()", "def draw_buttons(self): \n self.button_frame = Frame(self)\n\n # -- getting images\n prev_image = PhotoImage(file=self.directory + '/images/previous.png')\n prev_image = prev_image.subsample(10, 10) \n\n next_image = PhotoImage(file=self.directory + '/images/next.png')\n next_image = next_image.subsample(10, 10) \n \n # -- adding image to label\n prev_label = ttk.Label(self.button_frame, image = prev_image)\n next_label = ttk.Label(self.button_frame, image = next_image)\n\n prev_label.image = prev_image\n next_label.image = next_image\n\n # -- adding a twitter hide button\n self.twitter_hide = ttk.Button(self.button_frame, text='hide twitter')\n \n # -- adding the buttons to the frame \n prev_label.pack(side=RIGHT, padx=75) \n self.twitter_hide.pack(side=RIGHT, padx=200) \n next_label.pack(side=LEFT, padx=75)\n\n # -- adding bindings and commands\n prev_label.bind('<Button-1>', self.prev_article)\n next_label.bind('<Button-1>', self.next_article) \n self.twitter_hide.config(command=self.hide_twitter) \n\n # -- adding frame to canvas\n self.button_frame.pack(side=BOTTOM, fill=X)", "def draw(self): \n pygame.event.clear()\n self.window = ocempgui.widgets.Box(GG.utils.SCREEN_SZ[0], GG.utils.SCREEN_SZ[1])\n self.paintScreen()\n self.paintAvatar()\n self.paintTags()\n self.paintCustomizeZone()\n self.paintButtons()\n self.window.zOrder = 90000\n self.window.depth = 2\n return self.window", "def canvas(self):\n return self._canvas", "def draw(self):\n self.figure.canvas.draw_idle()", "def __init__(self, *args, **kwargs):\r\n\r\n # ============================================= INITIALISATION ========================================\r\n\r\n if args==():\r\n self.master = Tk();\r\n self.isFrame = False #Permet l'importation de la fenetre en tant que frame\r\n self.master.title(\"PAP screen id %s\" %(random.randint(0, 1000)))\r\n else: #ou en tant que standalone.\r\n self.isFrame = True\r\n Frame.__init__(self, *args, **kwargs)\r\n self.frame = self\r\n self.master = args[0]\r\n print(\"hello\", self.frame)\r\n\r\n # =============================================== BINDING =============================================\r\n\r\n # Ce Bind est primordial afin de pouvoir centrer continuelement l'image\r\n # et ne pas étendre le canvas vers un côté précis.\r\n\r\n self.master.bind(\"<Configure>\", self.__reconfig__)\r\n\r\n\r\n # =============================================== CANVAS ==============================================\r\n\r\n self.canvas = Canvas(self.getRoot(), bg=CANVAS_BACKGROUND)\r\n self.canvas.grid(sticky = W+E+N+S)\r\n\r\n self.getRoot().grid_rowconfigure(0, weight=1)\r\n self.getRoot().grid_columnconfigure(0, weight=1)\r\n\r\n # =============================================== FULLSCREEN ==============================================\r\n\r\n self.is_fullscreen = False\r\n self.master.bind(\"<F11>\", self.toogle_fullscreen)\r\n # ================================================ ID =================================================\r\n\r\n Screen.__count__ += 1\r\n self.id = \"Screen_\"+str(Screen.__count__)", "def display_db(self, master, glycans, glycan_images, glycan_canvas):\n i = 0\n j = 0\n counter = 0\n for name in glycans.keys():\n # put five images per row\n if j and not j%5:\n i += 1\n j = 0\n units = glycans[name]['UNIT']\n root,tree,names = self.build_glycan_tree(units)\n fig = mpl.figure.Figure(figsize=(70./self.dpi, 70./self.dpi))\n ax = fig.add_subplot(111)\n \n self.myDrawer.draw_tree(tree, root, names, root_pos = [0, 0], direction = 1, ax = ax, axis = 0)\n ax.axis('equal')\n ax.axis('off')\n ax.set_ylim((-1, 6))\n ax.set_xlim((-3, 3))\n\n # Add to tk window\n figure_canvas_agg = FigureCanvasAgg(fig)\n figure_canvas_agg.draw()\n figure_x, figure_y, figure_w, figure_h = fig.bbox.bounds\n figure_w, figure_h = int(figure_w), int(figure_h)\n canvas = tk.Canvas(master, width = 100, height = 100)\n glycan_image = tk.PhotoImage(master = canvas, width=figure_w, height=figure_h)\n canvas.create_image(figure_w/2, figure_h/2, image = glycan_image, tags = counter)\n canvas.bind(\"<Button-1>\", self.clicked_glycan)\n canvas.bind(\"<Double-Button-1>\", self.select_glycan)\n self.glycan_balloon.bind(canvas, 'Name: ' + name + '\\nStructure: ' + self.myDrawer.tree_to_text(tree, root, names, visited = []))\n tkagg.blit(glycan_image, figure_canvas_agg.get_renderer()._renderer, colormode=2)\n canvas.grid(column = j, row = i)\n glycan_images.append(glycan_image)\n glycan_canvas.append(canvas)\n j += 1\n counter += 1", "def __init__(self, parent, top, lmap):\n Canvas.__init__(self, parent, width=512, height=512)\n # Bind drag and drop events to canvas and pack it in mapcontainer\n self.bind('<ButtonPress-1>', self.grab)\n self.bind('<ButtonRelease-1>', self.drop)\n self.bind('<B1-Motion>', self.drag)\n self.pack(side='left', fill=BOTH, expand=1)\n\n self.xpos = 0 # X coord of mouse grab event\n self.ypos = 0 # Y coord of mouse grab event\n self.scale = 1 # Current zoom level\n self.im = None # Ref to original image, on which zoom is based\n self.original = None # image id, as first added to canvas\n self.zoomed = None # image id, as zoomed on canvas\n\n self.lmap = lmap\n self.drawMap(lmap)", "def YOURcanvas(name=\"icanvas\", size=(800, 600)):\n\n # Check if icanvas already exists\n canvas = ROOT.gROOT.FindObject(name)\n assert len(size) == 2\n if canvas:\n return canvas\n else:\n return ROOT.TCanvas(name, name, size[0], size[1])", "def draw(self):", "def clearCanvas():\n global c, coordinates\n c.delete(\"all\")\n drawMusicLines()\n coordinates.clear()", "def main(self):\n self.root.mainloop()" ]
[ "0.6882413", "0.67479193", "0.6690179", "0.64564663", "0.64564663", "0.64564663", "0.64194477", "0.6331927", "0.626784", "0.62565655", "0.61583567", "0.6147961", "0.6143228", "0.6132862", "0.6108119", "0.609335", "0.603408", "0.6019643", "0.59988374", "0.59641194", "0.59580326", "0.59578115", "0.5949998", "0.5949688", "0.5929907", "0.59198445", "0.59191287", "0.5915081", "0.59142154", "0.5901304", "0.5894316", "0.58823574", "0.5875158", "0.584496", "0.58424807", "0.5815879", "0.5808064", "0.58022916", "0.58006096", "0.57977706", "0.5791269", "0.57784563", "0.5770415", "0.5735039", "0.5714413", "0.56977046", "0.5680529", "0.56304556", "0.5630374", "0.5628494", "0.56200093", "0.5612814", "0.56109303", "0.55998725", "0.55964786", "0.55944085", "0.5590463", "0.55815595", "0.55579674", "0.5550223", "0.55473226", "0.554485", "0.55350333", "0.5497227", "0.5485177", "0.5482514", "0.54674625", "0.5447404", "0.5438161", "0.5437613", "0.54271", "0.54227406", "0.541956", "0.54085535", "0.54047865", "0.5404371", "0.54028547", "0.5402781", "0.5399318", "0.5389317", "0.5389317", "0.5389317", "0.5388083", "0.5386504", "0.53857", "0.5382549", "0.5373878", "0.53669906", "0.5366774", "0.5358783", "0.53564036", "0.5345115", "0.53355986", "0.53294855", "0.5328705", "0.5322098", "0.5322053", "0.5321422", "0.53179514", "0.5317308" ]
0.5550649
59
Test the AioBaseTurtle._calc_move function
def test_calc_move(self): t = AioBaseTurtle() t.speed(speed=5) steps, delta = t._calc_move(Vec2D(0, 100)) self.assertEqual(steps, 20) self.assertAlmostEqual(delta[0], 0.0) self.assertAlmostEqual(delta[1], 5.0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_move_step(self):\n t = AioBaseTurtle()\n t._move_step(Vec2D(-100, 0), 20, Vec2D(10,5))\n self.assertAlmostEqual(t._position[0], 100)\n self.assertAlmostEqual(t._position[1], 100)\n t.screen._drawline.assert_called_once_with(\n t.currentLineItem,\n ((-100.0, 0.0), (100.0, 100.0)), # called with mutable _position\n \"black\",\n 1,\n False\n )\n self.mock_update.assert_called_once_with()", "def DoMove(position, move):\n return position - move", "def test_set_position_after_travel(self):\n travelcalculator = TravelCalculator(25, 50)\n travelcalculator.start_travel(30)\n travelcalculator.set_position(80)\n assert travelcalculator.position_reached()\n assert travelcalculator.current_position() == 80", "def move():\n Robot.move()", "def test_change_direction(self):\n travelcalculator = TravelCalculator(50, 25)\n with patch(\"time.time\") as mock_time:\n mock_time.return_value = 1580000000.0\n travelcalculator.set_position(60)\n travelcalculator.start_travel(80)\n assert travelcalculator.travel_direction == TravelStatus.DIRECTION_DOWN\n\n # change direction after two seconds\n mock_time.return_value = 1580000002.0\n assert travelcalculator.current_position() == 64\n travelcalculator.start_travel(48)\n assert travelcalculator.travel_direction == TravelStatus.DIRECTION_UP\n\n assert travelcalculator.current_position() == 64\n assert not travelcalculator.position_reached()\n\n mock_time.return_value = 1580000004.0\n assert travelcalculator.current_position() == 56\n assert not travelcalculator.position_reached()\n\n mock_time.return_value = 1580000006.0\n assert travelcalculator.current_position() == 48\n assert travelcalculator.position_reached()", "def movement(self):", "def test_get_move_interface(self):\n h, w = 9, 9 # board size\n test_depth = 1\n starting_location = (2, 7)\n adversary_location = (0, 0) # top left corner\n iterative_search = False\n search_method = \"minimax\"\n heuristic = lambda g, p: 0. # return 0 everywhere\n\n # create a player agent & a game board\n agentUT = game_agent.CustomPlayer(\n test_depth, heuristic, iterative_search, search_method)\n\n # Test that get_move returns a legal choice on an empty game board\n board = isolation.Board(agentUT, 'null_agent', w, h)\n legal_moves = board.get_legal_moves()\n move = agentUT.get_move(board, legal_moves, lambda: 99)\n self.assertIn(move, legal_moves,\n (\"The get_move() function failed as player 1 on an \" +\n \"empty board. It should return coordinates on the \" +\n \"game board for the location of the agent's next \" +\n \"move. The move must be one of the legal moves on \" +\n \"the current game board.\"))\n\n # Test that get_move returns a legal choice for first move as player 2\n board = isolation.Board('null_agent', agentUT, w, h)\n board.apply_move(starting_location)\n legal_moves = board.get_legal_moves()\n move = agentUT.get_move(board, legal_moves, lambda: 99)\n self.assertIn(move, legal_moves,\n (\"The get_move() function failed making the first \" +\n \"move as player 2 on a new board. It should return \" +\n \"coordinates on the game board for the location \" +\n \"of the agent's next move. The move must be one \" +\n \"of the legal moves on the current game board.\"))\n\n # Test that get_move returns a legal choice after first move\n board = isolation.Board(agentUT, 'null_agent', w, h)\n board.apply_move(starting_location)\n board.apply_move(adversary_location)\n legal_moves = board.get_legal_moves()\n move = agentUT.get_move(board, legal_moves, lambda: 99)\n self.assertIn(move, legal_moves,\n (\"The get_move() function failed as player 1 on a \" +\n \"game in progress. It should return coordinates on\" +\n \"the game board for the location of the agent's \" +\n \"next move. The move must be one of the legal moves \" +\n \"on the current game board.\"))", "def move(x,y):\r\n pass", "def test_move_straight(controller):\n pos, angle = controller.odometry(20, 20, Vector2(0, 0), 0)\n assert pos == Vector2(\n 2 * math.pi * WHEEL_RADIUS * 10 / TICK_PER_REVOLUTION,\n 0,\n )\n assert angle == 0\n\n # Move backward in a straight line.\n pos, angle = controller.odometry(10, 10, Vector2(0, 0), math.pi / 2)\n assert pos.x < 1e-10\n assert pos.y == -2 * math.pi * WHEEL_RADIUS * 10 / TICK_PER_REVOLUTION\n assert angle == math.pi / 2", "def test_maze_move_6(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n old_sprout_count = maze.num_sprouts_left\n\n maze.move(rat_J, a2.DOWN, a2.NO_CHANGE)\n\n self.assertEqual(maze.num_sprouts_left, old_sprout_count)", "def test_maze_move_1(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n\n self.assertEqual(maze.move(rat_J, a2.UP, a2.NO_CHANGE), False)", "def test_verify_move(self):\n self._verify([self.applied_commands['move']])", "def step(self, move):", "def test_move():\n human = Human()\n coordinates = [2, 1]\n dimensions = [3, 4]\n\n new_coordinates = human.move(coordinates, dimensions)\n\n possible_new_coordinates = [[2, 0], [3, 0], [3, 1], [3, 2], [2, 2], [1, 2], [1, 1], [1, 0]]\n\n assert new_coordinates in possible_new_coordinates", "def test_get_move(self):\n\n class DynamicTimer():\n \"\"\"Dynamic Timer allows the time limit to be changed after the\n timer is initialized so that the search timeout can be triggered\n before the timer actually expires. This allows the timer to expire\n when an event occurs, regardless of the clock time required until\n the event happens.\n \"\"\"\n def __init__(self, time_limit):\n self.time_limit = time_limit\n self.start_time = curr_time_millis()\n\n def time_left(self):\n return self.time_limit - (curr_time_millis() - self.start_time)\n\n w, h = 11, 11 # board size\n adversary_location = (0, 0)\n method = \"minimax\"\n\n # The agent under test starts at the positions indicated below, and\n # performs an iterative deepening minimax search (minimax is easier to\n # test because it always visits all nodes in the game tree at every\n # level).\n origins = [(2, 3), (6, 6), (7, 4), (4, 2), (0, 5), (10, 10)]\n exact_counts = [(8, 8), (32, 10), (160, 39), (603, 35), (1861, 54), (3912, 62)]\n\n for idx in range(len(origins)):\n\n # set the initial timer high enough that the search will not\n # timeout before triggering the dynamic timer to halt by visiting\n # the expected number of nodes\n time_limit = 1e4\n timer = DynamicTimer(time_limit)\n eval_fn = makeEvalStop(exact_counts[idx][0], timer, time_limit)\n agentUT, board = self.initAUT(-1, eval_fn, True, method,\n origins[idx], adversary_location,\n w, h)\n legal_moves = board.get_legal_moves()\n chosen_move = agentUT.get_move(board, legal_moves, timer.time_left)\n\n diff_total = abs(board.counts[0] - exact_counts[idx][0])\n diff_unique = abs(board.counts[1] - exact_counts[idx][1])\n\n self.assertTrue(diff_total <= 1 and diff_unique == 0, ID_FAIL)\n\n self.assertTrue(chosen_move in legal_moves, INVALID_MOVE.format(\n legal_moves, chosen_move))", "def test_move(self):\n # Run a handful of GCMC moves\n n_moves = 10\n std_gcmc_sphere_sampler.move(std_gcmc_sphere_simulation.context, n_moves)\n\n # Check that all of the appropriate variables seem to have been updated\n # Hard to test individual moves as they are rarely accepted - just need to check the overall behaviour\n assert std_gcmc_sphere_sampler.n_moves == n_moves\n assert 0 <= std_gcmc_sphere_sampler.n_accepted <= n_moves\n assert len(std_gcmc_sphere_sampler.Ns) == n_moves\n assert len(std_gcmc_sphere_sampler.acceptance_probabilities) == n_moves\n assert isinstance(std_gcmc_sphere_sampler.energy, Quantity)\n assert std_gcmc_sphere_sampler.energy.unit.is_compatible(kilocalories_per_mole)\n\n return None", "def test_maze_move_3(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n\n self.assertEqual(maze.move(rat_J, a2.DOWN, a2.NO_CHANGE), True)", "def move(self, move):\n raise NotImplementedError()", "def test_maze_move_2(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n\n self.assertEqual(maze.move(rat_J, a2.DOWN, a2.RIGHT), False)", "def test_move(self):\n # Run a handful of GCMC moves\n n_moves = 10\n std_gcmc_system_sampler.move(std_gcmc_system_simulation.context, n_moves)\n\n # Check that all of the appropriate variables seem to have been updated\n # Hard to test individual moves as they are rarely accepted - just need to check the overall behaviour\n assert std_gcmc_system_sampler.n_moves == n_moves\n assert 0 <= std_gcmc_system_sampler.n_accepted <= n_moves\n assert len(std_gcmc_system_sampler.Ns) == n_moves\n assert len(std_gcmc_system_sampler.acceptance_probabilities) == n_moves\n assert isinstance(std_gcmc_system_sampler.energy, Quantity)\n assert std_gcmc_system_sampler.energy.unit.is_compatible(kilocalories_per_mole)\n\n return None", "def choose_move(self):\n return 0", "def test_maze_move_4(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n old_sprout_count = maze.num_sprouts_left\n\n maze.move(rat_J, a2.DOWN, a2.NO_CHANGE)\n maze.move(rat_J, a2.DOWN, a2.NO_CHANGE)\n maze.move(rat_J, a2.DOWN, a2.NO_CHANGE)\n\n self.assertEqual(maze.num_sprouts_left, old_sprout_count - 1)", "def getMovement(self):\n # store the robot's current location and set the directional movement to 0,0 so that the robot won't move by default\n currentLocation = (self.me['x'], self.me['y'])\n directionalMovement = (0,0)\n\n # ensure that target location is not none and not equal to the current location\n if self.targetLocation and not currentLocation == self.targetLocation:\n\n # store the direction, directional movement, and the new map location we will trying to move the robot to this round\n direction = self.getDirection(currentLocation, self.targetLocation)\n directionalMovement = self.getDirectionalMovement(currentLocation, direction)\n newLocation = self.getNewLocation(currentLocation, directionalMovement)\n\n # store the current direction for use later\n initialDirection = direction\n\n # by default, the robot is ready to move in the event that the new map location is already passable\n readyToMove = True\n\n # while the new map location is not passable\n while not self.isPassable(newLocation):\n # if unit is a crusader moving diagonally at their fastest pace, set their directional movement to (1,1)\n if self.isCrusader and directionalMovement[0] == 2 and directionalMovement[1] == 2:\n directionalMovement[0] = 1\n directionalMovement[1] = 1\n # or if the unit is traveling faster than 1 block East\n elif directionalMovement[0] > 1:\n # lower the unit's movement East by 1 block\n directionalMovement[0] -= 1\n # or if the unit is traveling faster than 1 block West\n elif directionalMovement[0] < -1:\n # lower the unit's movement West by 1 block\n directionalMovement[0] += 1\n # or if the unit is traveling faster than 1 block South\n elif directionalMovement[1] > 1:\n # lower the unit's movement South by 1 block\n directionalMovement[1] -= 1\n # or if the unit is traveling faster than 1 block North\n elif directionalMovement[1] < -1:\n # lower the unit's movement North by 1 block\n directionalMovement[1] += 1\n # else the unit is already moving the shortest distance they can in the current direction\n else:\n # rotate the robots direction clockwise and proceed\n direction = self.getRotatedDirection(direction, 1)\n\n # if we ened up facing the same direction we started in\n if direction == initialDirection:\n # let the code know we're not ready to move\n readyToMove = False\n # break out of the while loop\n break\n\n # overwrite the directional movement with a new one based on the direction we just got\n directionalMovement = self.getDirectionalMovement(currentLocation, direction)\n\n # overwrite the new location with the location we get from the directional movement we just got\n newLocation = self.getNewLocation(currentLocation, directionalMovement)\n\n # if the robot ended up not being ready to move\n if not readyToMove:\n # change the directional movement back to (0,0) so that it doesn't move\n directionalMovement = (0,0)\n else :\n self.targetLocation = self.getRandomPassableLocation()\n # return the directional movement\n return directionalMovement", "def testMove(intCurrentLeftPin, intCurrentRightPin, fltXCurrent, fltYCurrent,\r\n fltXTestDistance, fltYTestDistance):\r\n fltXNew = fltXCurrent + fltXTestDistance\r\n fltYNew = fltYCurrent + fltYTestDistance\r\n \r\n printMovement(fltXCurrent, fltYCurrent, fltXNew, fltYNew)\r\n \r\n # Calculate the operations required to move the drawing point.\r\n lsOperations = calculatePath(fltXCurrent, fltYCurrent, fltXNew, fltYNew)\r\n \r\n # Execute the operations.\r\n tpCurrentState = executeOperations(lsOperations, intCurrentLeftPin,\r\n intCurrentRightPin,\r\n fltXCurrent, fltYCurrent)\r\n \r\n (intCurrentLeftPin, intCurrentRightPin,\r\n fltXCurrent, fltYCurrent) = tpCurrentState\r\n \r\n return (intCurrentLeftPin, intCurrentRightPin, fltXCurrent, fltYCurrent)", "def test_move(self):\n neq_gcmc_sphere_sampler.reset()\n\n # Just run one move, as they are a bit more expensive\n neq_gcmc_sphere_sampler.move(neq_gcmc_sphere_simulation.context, 1)\n\n # Check some of the variables have been updated as appropriate\n assert neq_gcmc_sphere_sampler.n_moves == 1\n assert 0 <= neq_gcmc_sphere_sampler.n_accepted <= 1\n assert len(neq_gcmc_sphere_sampler.Ns) == 1\n assert len(neq_gcmc_sphere_sampler.acceptance_probabilities) == 1\n\n # Check the NCMC-specific variables\n assert isinstance(neq_gcmc_sphere_sampler.velocities, Quantity)\n assert neq_gcmc_sphere_sampler.velocities.unit.is_compatible(nanometers/picosecond)\n assert len(neq_gcmc_sphere_sampler.insert_works) + len(neq_gcmc_sphere_sampler.delete_works) == 1\n assert 0 <= neq_gcmc_sphere_sampler.n_left_sphere <= 1\n assert 0 <= neq_gcmc_sphere_sampler.n_explosions <= 1\n\n return None", "def test_move(self):\n neq_gcmc_system_sampler.reset()\n\n # Just run one move, as they are a bit more expensive\n neq_gcmc_system_sampler.move(neq_gcmc_system_simulation.context, 1)\n\n # Check some of the variables have been updated as appropriate\n assert neq_gcmc_system_sampler.n_moves == 1\n assert 0 <= neq_gcmc_system_sampler.n_accepted <= 1\n assert len(neq_gcmc_system_sampler.Ns) == 1\n assert len(neq_gcmc_system_sampler.acceptance_probabilities) == 1\n\n # Check the NCMC-specific variables\n assert isinstance(neq_gcmc_system_sampler.velocities, Quantity)\n assert neq_gcmc_system_sampler.velocities.unit.is_compatible(nanometers/picosecond)\n assert len(neq_gcmc_system_sampler.insert_works) + len(neq_gcmc_system_sampler.delete_works) == 1\n assert 0 <= neq_gcmc_system_sampler.n_explosions <= 1\n\n return None", "def motion(self):\n priority = {\"north\": [-1, 0], \"south\": [1, 0],\n \"east\": [0, 1], \"west\": [0, -1]}\n\n priority_list = [\"north\", \"south\", \"east\", \"west\"]\n\n critical_point = False\n while critical_point is False:\n row = self.curr_cell.row\n column = self.curr_cell.col\n\n if self.allow_to_move(priority_list[0],\n row + priority[priority_list[0]][0],\n column + priority[priority_list[0]][1]):\n\n self.move(priority_list[0])\n\n elif self.allow_to_move(priority_list[1],\n row + priority[priority_list[1]][0],\n column + priority[priority_list[1]][1]):\n\n self.move(priority_list[1])\n\n elif self.allow_to_move(priority_list[2],\n row + priority[priority_list[2]][0],\n column + priority[priority_list[2]][1]):\n\n self.move(priority_list[2])\n\n elif self.allow_to_move(priority_list[3],\n row + priority[priority_list[3]][0],\n column + priority[priority_list[3]][1]):\n\n self.move(priority_list[3])\n\n else:\n # Robot isolated\n critical_point = True\n\n return self.curr_cell, self.path", "def test_move(self):\n for row in range(self.game._dim):\n for col in range(self.game._dim):\n self.game.move(row, col, PLAYERX)\n self.assertEqual(self.game.get_square(row, col), PLAYERX)\n\n for row in range(self.game._dim):\n for col in range(self.game._dim):\n self.game.move(row, col, PLAYERO)\n self.assertEqual(self.game.get_square(row, col), PLAYERX)\n self.game._board[row][col] = PLAYERO\n self.game.move(row, col, PLAYERO)\n self.assertEqual(self.game.get_square(row, col), PLAYERO)", "def test_move_default_extra_steps(self):\n player = ss.ResilientPlayer()\n random.seed(2)\n player.move()\n random.seed(1)\n player.move()\n random.seed(2)\n player.move()\n assert player.position == 32", "def move(self, direction, step):\n for i in range(1, step + 1):\n y, x = self.robot_position\n if direction == \"N\" and y > 0:\n if self.carte[y - 1][x] in [\" \", \".\", \"U\"]:\n self.robot_position = (y - 1, x)\n elif direction == \"S\" and y <= self.height:\n if self.carte[y + 1][x] in [\" \", \".\", \"U\"]:\n self.robot_position = (y + 1, x)\n elif direction == \"E\" and x <= self.width+1:\n if self.carte[y][x + 1] in [\" \", \".\", \"U\"]:\n self.robot_position = (y, x + 1)\n elif direction == \"O\" and x > 0:\n if self.carte[y][x - 1] in [\" \", \".\", \"U\"]:\n self.robot_position = (y, x - 1)\n\n if self.robot_position == self.out_position:\n print(\"Bravo vous avez fini\")\n return True\n\n return False", "async def test_move_relative_implementation(\n decoy: Decoy,\n movement: MovementHandler,\n) -> None:\n subject = MoveRelativeImplementation(movement=movement)\n data = MoveRelativeParams(\n pipetteId=\"pipette-id\",\n axis=MovementAxis.X,\n distance=42.0,\n )\n\n decoy.when(\n await movement.move_relative(\n pipette_id=\"pipette-id\",\n axis=MovementAxis.X,\n distance=42.0,\n )\n ).then_return(Point(x=1, y=2, z=3))\n\n result = await subject.execute(data)\n\n assert result == MoveRelativeResult(position=DeckPoint(x=1, y=2, z=3))", "def consume_move(self) :\n return math.ceil(math.sqrt(self.speed[0]**2 + self.speed[1]**2))", "def move(self):\n pass", "def player_movement(self):", "def test_basic_movement(self):\n with PhysicsEngineHarness('tests/only-sun.json') as physics_engine:\n # In this case, the only entity is the Sun. It starts at (0, 0)\n # with a speed of (1, -1). It should move.\n initial = physics_engine.get_state(1)\n moved = physics_engine.get_state(100)\n t0 = initial.timestamp\n t1 = moved.timestamp\n self.assertEqual(initial.timestamp, 1)\n self.assertAlmostEqual(initial[0].x, 0)\n self.assertAlmostEqual(initial[0].y, 0)\n self.assertAlmostEqual(initial[0].vx, 1)\n self.assertAlmostEqual(initial[0].vy, -1)\n self.assertEqual(moved.timestamp, t1)\n self.assertAlmostEqual(moved[0].x, t1 - t0)\n self.assertAlmostEqual(moved[0].y, -(t1 - t0))\n self.assertAlmostEqual(moved[0].vx, 1)\n self.assertAlmostEqual(moved[0].vy, -1)", "def test_calc_rotation(self):\n t = AioBaseTurtle()\n t.speed(speed=2)\n orient, steps, delta = t._calc_rotation(120)\n self.assertEqual(steps, 21)\n self.assertAlmostEqual(delta, 120.0 / 21.0)\n self.assertAlmostEqual(orient[0], math.cos(math.radians(120)))\n self.assertAlmostEqual(orient[1], math.sin(math.radians(120)))", "def makeMove(self, move, player):", "def test_move_onto_terrain(self):\n # move onto Water (1 extra)\n b1 = board.Board(self.small_ter)\n start = np.array((0, 3), dtype='int')\n k1 = knight.Knight(b1, start)\n # set move choice\n move_choice = 2\n # determine move validity and cost\n (cost, isvalid) = k1.validate_move(move_choice)\n self.assertTrue(isvalid)\n self.assertEqual(cost, 2)\n\n # move onto Lava (4 extra)\n start = np.array((3, 4), dtype='int')\n k1 = knight.Knight(b1, start)\n # set move choice\n move_choice = 0\n # determine move validity and cost\n (cost, isvalid) = k1.validate_move(move_choice)\n self.assertTrue(isvalid)\n self.assertEqual(cost, 5)\n\n # move onto Barrier (illegal)\n start = np.array((1, 4), dtype='int')\n k1 = knight.Knight(b1, start)\n # set move choice\n move_choice = 1\n # determine move validity and cost\n (cost, isvalid) = k1.validate_move(move_choice)\n self.assertFalse(isvalid)\n\n # move onto Rock (illegal)\n start = np.array((1, 0), dtype='int')\n k1 = knight.Knight(b1, start)\n # set move choice\n move_choice = 7\n # determine move validity and cost\n (cost, isvalid) = k1.validate_move(move_choice)\n self.assertFalse(isvalid)", "def decide_next_move(self):\n pass", "def _move(self, dx, dy):\n pass # must override in subclass", "def move(): #py:move\n RUR._move_()", "def move(self, algMove):\n if self.d_engine.is_move_correct(algMove):\n print(\"correct\")", "def test_game_move_negative():\n\n file=\"/home/unit_test_grids/test_game_grids.txt\"\n my_game=Game(file)\n \n result=my_game.move('s')\n \n assert my_game.listOfMoves==[],\"The move function of game is changing \"\\\n \"the initial list of moves when the\"\\\n \"configuration file does not exists\"\n \n \n assert my_game.numberOfMoves==0,\"The move function of game is changing \"\\\n \"the initial number of moves when the\"\\\n \"configuration file does not exists\"\n \n \n assert result==None,\"The move function is not working\"\\\n \"correctly when the configuration \"\\\n \"file does not exists\"", "def moveStep(self):\n\t\tif self.pos[0] <= self.boundsX[0] or \\\n\t\t(self.pos[0]+ 2*(self.radius)) >= self.boundsX[1]:\n\t\t\tself.dir[0] *= -1\n\t\t\t\n\t\tself.pos[0] += self.dir[0]*self.speed\n\t\tself.pos[1] += self.dir[1]*self.speed", "def test_move():\n\n board = Board()\n\n # invalid moves: out of board boundaries\n assert board.move(board.P1, 100) is False\n assert board.move(board.P2, -2) is False\n\n print(board)\n\n # valid moves\n assert board.move(board.P1, 0) is True\n assert board.move(board.P2, 3) is True\n\n assert board.move(board.P1, 4) is True\n assert board.move(board.P2, 3) is True\n\n assert board.move(board.P1, 3) is True\n assert board.move(board.P2, 4) is True\n\n assert board.move(board.P1, 2) is True\n assert board.move(board.P2, 4) is True\n\n assert board.move(board.P1, 2) is True\n assert board.move(board.P2, 2) is True\n\n assert board.move(board.P1, 1) is True\n assert board.move(board.P2, 4) is True\n\n \"\"\"\n BEFORE:\n \n 0 1 2 3 4 5 6 \n +-+-+-+-+-+-+-+\n 0|-|-|-|-|-|-|-|0\n +-+-+-+-+-+-+-+\n 1|-|-|-|-|-|-|-|1\n +-+-+-+-+-+-+-+\n 2|-|-|-|-|-|-|-|2\n +-+-+-+-+-+-+-+\n 3|-|-|-|-|-|-|-|3\n +-+-+-+-+-+-+-+\n 4|-|-|-|-|-|-|-|4\n +-+-+-+-+-+-+-+\n 5|-|-|-|-|-|-|-|5\n +-+-+-+-+-+-+-+\n 0 1 2 3 4 5 6 \n \n AFTER:\n \n 0 1 2 3 4 5 6 \n +-+-+-+-+-+-+-+\n 0|-|-|-|-|-|-|-|0\n +-+-+-+-+-+-+-+\n 1|-|-|-|-|-|-|-|1\n +-+-+-+-+-+-+-+\n 2|-|-|-|-|O|-|-|2\n +-+-+-+-+-+-+-+\n 3|-|-|O|X|O|-|-|3\n +-+-+-+-+-+-+-+\n 4|-|-|X|O|O|-|-|4\n +-+-+-+-+-+-+-+\n 5|X|X|X|O|X|-|-|5\n +-+-+-+-+-+-+-+\n 0 1 2 3 4 5 6 \n \n \"\"\"", "def check4move(st, selected_unit, direction):\n return 1", "def test_move_over_terrain(self):\n # move over Water (0 extra)\n b1 = board.Board(self.small_ter)\n start = np.array((0, 1), dtype='int')\n k1 = knight.Knight(b1, start)\n # set move choice\n move_choice = 1\n # determine move validity and cost\n (cost, isvalid) = k1.validate_move(move_choice)\n self.assertTrue(isvalid)\n self.assertEqual(cost, 1)\n #\n # move over Lava (0 extra)\n start = np.array((5, 4), dtype='int')\n k1 = knight.Knight(b1, start)\n # set move choice\n move_choice = 6\n # determine move validity and cost\n (cost, isvalid) = k1.validate_move(move_choice)\n self.assertTrue(isvalid)\n self.assertEqual(cost, 1)\n #\n # move over Barrier (illegal)\n start = np.array((2, 3), dtype='int')\n k1 = knight.Knight(b1, start)\n # set move choice\n move_choice = 0\n # determine move validity and cost\n (cost, isvalid) = k1.validate_move(move_choice)\n self.assertFalse(isvalid)\n #\n # move over Rock (0 extra)\n start = np.array((2, 3), dtype='int')\n k1 = knight.Knight(b1, start)\n # set move choice\n move_choice = 2\n # determine move validity and cost\n (cost, isvalid) = k1.validate_move(move_choice)\n self.assertTrue(isvalid)\n self.assertEqual(cost, 1)", "def test_get_move_valid(self, inputted_value):\n # Give 5\n column = self.game.get_move(\"Rob-E\")\n\n # Ensure 4 is returned\n self.assertEqual(column, 4)", "def test_03_edit_move(self):\n # Create/validate PO\n order = self.create_and_validate_po()\n\n # Edit move qty\n picking = order.picking_ids[0]\n move = picking.move_lines[0]\n self.assertEqual(move.product_uom_qty, 10)\n move.write({'product_uom_qty': '3'})\n self.assertEqual(move.product_uom_qty, 3)\n\n # Try to validate picking\n self.assertEqual(picking.state, 'assigned')\n with self.assertRaisesRegexp(exceptions.Warning, 'EDITED move'):\n picking.do_transfer()\n self.assertEqual(picking.state, 'assigned')", "def move_to(\n self, position, move_down=True, lifting=800, relative_move=False, clearance=0.0\n ):\n self.log.debug(\"Try moving table to {!s}\".format(position))\n if self.table_ready and not self.variables[\"table_is_moving\"]:\n # get me the current position\n old_pos = self.get_current_position()\n if (\n move_down\n ): # So only parent move orders are stored and not every height movement\n self.new_previous_position(old_pos)\n desired_pos = position[:]\n\n # If the table is somehow moving or reported an error before, so check if all errors have vanished\n # success = self.check_if_ready()\n\n # Move the table down if necessary\n if move_down:\n success = self.move_down(lifting)\n if not relative_move:\n desired_pos[2] -= lifting # To counter the down movement\n if not success:\n return False\n\n # Change the state of the table\n self.variables[\"table_is_moving\"] = True\n\n # Move the table to the position\n if relative_move:\n # list(np.array(old_pos)+np.array(desired_pos))\n move_command = self.build_command(\n self.device, (\"set_relative_move_to\", desired_pos)\n )\n else:\n move_command = self.build_command(\n self.device, (\"set_move_to\", desired_pos)\n )\n\n # Set axis\n self.set_axis([True, True, True])\n self.vcw.write(self.device, move_command)\n success = self.check_if_ready()\n if not success:\n return False\n self.set_axis([True, True, False])\n\n # State that the table is not moving anymore\n self.variables[\"table_is_moving\"] = False\n\n # Move the table back up again\n if move_down:\n success = self.move_up(lifting - clearance)\n position[\n 2\n ] -= clearance # Adapt the position to make sure the check works\n if not success:\n return False\n\n # Finally make sure the position is correct\n if relative_move:\n success = self.check_position([sum(x) for x in zip(old_pos, position)])\n if success:\n self.log.debug(\n \"Successfully moved table relative to {!s}\".format(position)\n )\n else:\n success = self.check_position(position)\n if success:\n self.log.debug(\"Successfully moved table to {!s}\".format(position))\n if not success:\n return False\n\n self.variables[\"table_is_moving\"] = False\n\n return True\n\n elif self.variables[\"table_is_moving\"]:\n self.log.warning(\n \"Table is currently moving, no new move order can be placed...\"\n )\n else:\n self.log.error(\n \"Table could not be moved due to an error. This usually happens if no table is connected to\"\n \" the setup\"\n )\n return False", "def test_move_initialization():\r\n m = Move('A1', 'B2')\r\n assert m.get_from_str() == 'A1'\r\n assert m.get_to_str() == 'B2'\r\n assert m.get_from_xy() == (7, 0)\r\n assert m.get_to_xy() == (6, 1)", "def move(self, action):\n \n self.counter += 1\n\n if action not in self.ACTIONS:\n raise Exception(\"Invalid action\")\n\n \n\n d_x, d_y = self.MOVEMENTS[action]\n x, y = self.position\n new_x, new_y = x + d_x, y + d_y\n new_X,new_Y=self.position_to_xy(new_x, new_y)\n \n\n if (new_x, new_y) not in self.cases:\n return self._get_state(), -3, False, self.ACTIONS\n \n \n \n elif (self.openGoal(new_x,new_y))&(new_X>-400):\n self.position = new_x, new_y\n self.positionxy = self.position_to_xy(new_x, new_y)\n \n return self._get_state(), 20, True, self.ACTIONS\n \n # elif not self.openGoal(new_x,new_y):\n # self.position = new_x, new_y\n # self.positionxy = self.position_to_xy(new_x, new_y)\n # return self._get_state(), -1, False, self.ACTIONS\n \n elif self.counter > 100:\n self.position = new_x, new_y\n self.positionxy = self.position_to_xy(new_x, new_y)\n return self._get_state(), -1, True, self.ACTIONS\n \n else:\n self.position = new_x, new_y\n self.positionxy = self.position_to_xy(new_x, new_y)\n return self._get_state(), -1, False, self.ACTIONS", "def move(self,dt):\n raise NotImplementedError(\"Robot.move\")", "def test_maze_move_5(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n old_sprout_count = maze.num_sprouts_left\n\n maze.move(rat_J, a2.DOWN, a2.RIGHT)\n\n self.assertEqual(maze.num_sprouts_left, old_sprout_count)", "def move(self):\n \"\"\" Responsible for transformations \"\"\"\n pos, com, success = self.perception \n if self.destination is None:\n return array([0,0])\n\n if not self.awake:\n return array([0,0])\n\n\n if self.phase == 4 and self.proper_formation is not None:\n no_go = []\n for i in range(0,len(self.proper_formation)):\n if i != self.order and self.proper_formation[i][0] == self.proper_formation[self.order][0]:\n no_go.append(self.transform(self.proper_formation[i][1] - self.position))\n pos = merge_array_lists(pos, no_go)\n\n if self.phase == 2:\n point = self.destination.copy() - self.position\n elif self.phase > 2:\n point = self.transform(self.destination.copy() - self.position)\n else:\n point = self.destination.copy()\n\n if not array_equal(point, array([0,0])):\n reachable, path = findpathtoclosest(array([0,0]), point, pos)\n \n if len(path) == 0:\n move = array([0,0]) \n else:\n move = path[0]\n if not reachable and not array_equal(move,array([0,0])):\n if self.phase == 2:\n self.closest_i_could_get = path[-1] + self.position\n elif self.phase > 2:\n self.closest_i_could_get = self.transform2(path[-1]) + self.position\n else:\n self.closest_i_could_get = path[-1]\n elif not reachable:\n if self.phase > 1:\n self.closest_i_could_get = self.position\n else:\n self.closest_i_could_get = array([0,0])\n else:\n self.closest_i_could_get = None\n\n if reachable and self.phase == 4 and array_equal(move,array([0,0])):\n move = self.randomStep()\n self.closest_i_could_get = None\n\n else:\n move = array([0,0])\n self.closest_i_could_get = None\n\n return move", "def move(self):\n raise NotImplementedError", "def calMove(playerLocation, nextLocation):\n move_vector = tuple(np.subtract(nextLocation, playerLocation))\n for MOVE in DIRECTION_TO_CALCULATION:\n if move_vector == DIRECTION_TO_CALCULATION[MOVE]:\n return MOVE\n return \"Not right\"", "def AeroMove(self, pos):\r\n\r\n pass", "def move():\n # step 1 of task analysis: get data\n data = get_data('MovementData/Walking_02.txt')\n # step 2: get the initial orientation of the sensor\n sensor_orientation = get_init_orientation_sensor(data.acc[0])\n # step 3: get the vector of the right horizontal semi-circular canal's on-direction\n rhscc_init_on_dir = get_init_on_dir_rh_scc(15)\n # preparation for step 4: align the angular velocity sensor data with the global coordinate system\n angular_velocities_aligned_globally = align_sensor_data_globally(data.omega, sensor_orientation)\n # step 4: calculate the stimulation of the cupula\n stimuli = get_scc_stimulation(angular_velocities_aligned_globally, rhscc_init_on_dir)\n # step 5: get the transfer function of the scc with the dynamics provided in the lecture\n scc_trans_fun = get_scc_transfer_fun(0.01, 5)\n # step 6: get the cupular deflection\n max_cupular_deflection = calculate_max_cupular_deflection(scc_trans_fun, stimuli, data.rate)\n # preparation for step 7: align the acceleration sensor data with the global coordinate system\n accelerations_aligned_globally = align_sensor_data_globally(data.acc, sensor_orientation)\n # step 8: calculate the maxmimum left- and rightwards stimulation of the otolithic organ\n max_left_right_stimuli = calculate_otolithic_max_stimuli(accelerations_aligned_globally, 1)\n # step 9: calculate the head orientation\n head_orientations = calculate_head_orientation(angular_velocities_aligned_globally, data.rate)\n\n return max_cupular_deflection, max_left_right_stimuli, head_orientations", "def test_travel_up_with_updates(self):\n travelcalculator = TravelCalculator(25, 50)\n with patch(\"time.time\") as mock_time:\n mock_time.return_value = 1580000000.0\n travelcalculator.set_position(70)\n travelcalculator.start_travel(50) # 10 seconds to reach 50\n\n mock_time.return_value = 1580000005.0\n assert travelcalculator.current_position() == 60\n assert not travelcalculator.position_reached()\n # update from bus not matching calculation takes precedence (1 second faster)\n travelcalculator.update_position(58)\n assert travelcalculator.current_position() == 58\n assert not travelcalculator.position_reached()\n # position reached 1 second earlier than predicted\n mock_time.return_value = 1580000010.0 - 1\n assert travelcalculator.current_position() == 50\n assert travelcalculator.position_reached()", "def CheckMove(self,move):\n\t\tif(move=='w'):\n\t\t\tif(self.x==0):\n\t\t\t\treturn 0\n\t\t\treturn 1\n\t\telif(move=='s'):\n\t\t\tif(self.x==15):\n\t\t\t\treturn 0\n\t\t\treturn 1\n\t\telif(move=='d'):\n\t\t\tif(self.y==35):\n\t\t\t\treturn 0\n\t\t\treturn 1\n\t\telif(move=='a'):\n\t\t\tif(self.y==0):\n\t\t\t\treturn 0\n\t\t\treturn 1", "def test_findDirection_6(self):\n startCoordinate = coordinate.Coordinate(5, 5)\n endCoordinate = coordinate.Coordinate(3, 3)\n expected_result = 6\n actual_result = rules.findDirection(startCoordinate, endCoordinate)\n self.assertEqual(actual_result, expected_result)", "def table_move_update():\n pos = self.variables.table.get_current_position()\n self.table_move_ui.x_move.setProperty(\"value\", int(pos[0]))\n self.table_move_ui.y_move.setProperty(\"value\", int(pos[1]))\n self.table_move_ui.z_move.setProperty(\"value\", int(pos[2]))", "def test_perform_move(self):\n p = hw.create_tile_puzzle(3, 3)\n self.assertFalse(p.perform_move(\"taco\"))\n self.assertTrue(p.perform_move('up'))\n self.assertEqual(p.get_board(), [[1,2,3],[4,5,0],[7,8,6]])\n self.assertFalse(p.perform_move('right'))\n p = hw.create_tile_puzzle(2, 4)\n self.assertTrue(p.perform_move('left'))\n self.assertTrue(p.perform_move('up'))\n self.assertFalse(p.perform_move('up'))\n self.assertEqual(p.get_board(), [[1,2,0,4],[5,6,3,7]])\n p = hw.create_tile_puzzle(1, 4)\n self.assertTrue(p.perform_move('left'))\n self.assertTrue(p.perform_move('left'))\n self.assertTrue(p.perform_move('left'))\n self.assertFalse(p.perform_move('down'))\n self.assertFalse(p.perform_move('left'))\n self.assertEqual(p.get_board(), [[0,1,2,3]])", "def move_turtle(self):\n self.forward(self.move_speed)", "def _get_movement(self):\n raise NotImplementedError", "def makeMove(self, movable_statement):\n ### Student code goes here\n tile = str(movable_statement.terms[0])\n fromx = str(movable_statement.terms[1])\n fromy = str(movable_statement.terms[2])\n tox = str(movable_statement.terms[3])\n toy = str(movable_statement.terms[4])\n self.kb.kb_retract(parse_input('fact: (pos ' + tile + ' ' + fromx + ' ' + fromy + ')'))\n self.kb.kb_retract(parse_input('fact: (pos empty ' + tox + ' ' + toy + ')'))\n self.kb.kb_assert(parse_input('fact: (pos ' + tile + ' ' + tox + ' ' + toy + ')'))\n self.kb.kb_assert(parse_input('fact: (pos empty ' + fromx + ' ' + fromy + ')'))", "def test_rover_position(self):\n rover = Rover(self.plateau_dimensions, self.rover_initial_position, Rover.DIRECTIONS.get('E'))\n rover.execute_instructions(\"LMLM\")\n self.assertEqual(rover._position.x, 1)\n self.assertEqual(rover._position.y, 2)\n self.assertEqual(rover.get_heading, 'W')", "def move(self, direction):\n # replace with your code\n pass", "def move(self, direction):\n # replace with your code\n pass", "def move(self, board, move_dir):\n if move_dir == \"right\":\n # failsafe: do not move through other cars on board\n if board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)].isupper() or board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)] == 'r':\n print(\"No movement!\")\n return board\n \n # give board correct new positions (characters)\n else:\n board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)] = self.name[0]\n board.positions[self.get_rows()[0]][self.get_cols()[0]] = \"x\"\n\n # change car objects positions\n for i, col in enumerate(self.position):\n self.position[i] = str(self.get_rows()[0]) + \".\" + str(int(col[2]) + 1)\n return board\n elif move_dir == \"left\": \n if board.positions[self.get_rows()[0]][self.get_cols()[0] - 1].isupper() or board.positions[self.get_rows()[0]][self.get_cols()[0] - 1] == 'r':\n print(\"No movement!\")\n return board\n else: \n board.positions[self.get_rows()[0]][self.get_cols()[0] - 1] = self.name[0]\n board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 2)] = \"x\"\n\n for i, col in enumerate(self.position):\n self.position[i] = str(self.get_rows()[0]) + \".\" + str(int(col[2]) - 1)\n return board\n elif move_dir == \"up\":\n #print(board.positions[self.get_rows()[0] - 1][self.get_cols()[0]])\n if board.positions[self.get_rows()[0] - 1][self.get_cols()[0]].isupper() or board.positions[self.get_rows()[0] - 1][self.get_cols()[0]] == 'r':\n print(\"No movement!\")\n return board\n else:\n board.positions[self.get_rows()[0] - 1][self.get_cols()[0]] = self.name[0]\n board.positions[self.get_rows()[1] + (self.size - 2)][self.get_cols()[0]] = \"x\"\n\n for i, row in enumerate(self.position):\n self.position[i] = str(int(row[0]) - 1) + \".\" + str(self.get_cols()[0])\n\n #print(board)\n return board\n elif move_dir == \"down\": \n try: \n if board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]].isupper() or board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]] == 'r':\n print(\"No movement!\")\n return board\n except IndexError:\n return board\n else: \n board.positions[self.get_rows()[0]][self.get_cols()[0]] = \"x\" \n board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]] = self.name[0]\n\n for i, row in enumerate(self.position):\n self.position[i] = str(int(row[0]) + 1) + \".\" + str(self.get_cols()[0]) \n \n #print(self.position)\n #print(board)\n \n return board\n else:\n #print(\"NO MOVEMENT!\")\n return board", "def __check_move(self):\n move = self.communications.get_move()\n if move is not None and move in self.bot.movements:\n self.communications.set_status(\"Moving Bot {}\".format(move))\n self.make_move(move)\n\n self.communications.send_proximity_data(self.proximity_sensors.read_sensors())", "def move(self):\r\n if self.last_op_move is None:\r\n return rockyman.move(self)\r\n else:\r\n return self.last_op_move", "def move(self, t, s):\n raise NotImplementedError", "def test_move(self):\n # Shouldn't be able to run a move with this sampler\n self.assertRaises(NotImplementedError, lambda: gcmc_sphere_sampler.move(gcmc_sphere_simulation.context))\n\n return None", "def test_human_moves_number_of_squares_according_to_speed(mock_random):\n mock_random.randint.return_value = 6\n human = Human(speed=3)\n coordinates = [0, 0]\n grid_dimensions = [3, 3]\n assert human.move(coordinates, grid_dimensions) == [3, 0]", "def _move(self, direction, difference):\n future_tile_number = self.get_number() + difference\n if future_tile_number in range(1, Tile.total_tiles + 1):\n future_tile = Tile.get_tile(future_tile_number)\n if future_tile.walkable:\n self.set_target(future_tile)\n self.rotate(direction)", "def test_travel_down_with_updates(self):\n travelcalculator = TravelCalculator(25, 50)\n with patch(\"time.time\") as mock_time:\n mock_time.return_value = 1580000000.0\n travelcalculator.set_position(40)\n travelcalculator.start_travel(100) # 15 seconds to reach 100\n\n # time not changed, still at beginning\n assert travelcalculator.current_position() == 40\n assert not travelcalculator.position_reached()\n assert travelcalculator.travel_direction == TravelStatus.DIRECTION_DOWN\n\n mock_time.return_value = 1580000002.0\n assert travelcalculator.current_position() == 48\n assert not travelcalculator.position_reached()\n # update from bus matching calculation\n travelcalculator.update_position(48)\n assert travelcalculator.current_position() == 48\n assert not travelcalculator.position_reached()\n\n mock_time.return_value = 1580000010.0\n assert travelcalculator.current_position() == 80\n assert not travelcalculator.position_reached()\n # update from bus not matching calculation takes precedence (1 second slower)\n travelcalculator.update_position(76)\n assert travelcalculator.current_position() == 76\n assert not travelcalculator.position_reached()\n # travel time extended by 1 second due to update from bus\n mock_time.return_value = 1580000015.0\n assert travelcalculator.current_position() == 96\n assert not travelcalculator.position_reached()\n mock_time.return_value = 1580000015.0 + 1\n assert travelcalculator.current_position() == 100\n assert travelcalculator.position_reached()", "def test_move_along(self):\n\n global sendPlayCallParams\n \n req = self.get_moves(5)\n\n with patch.object(socket.socket, 'connect', return_value=True) as mock_connect, \\\n patch.object(socket.socket, 'bind', return_value=True) as mock_bind:\n src.drivers.hyundai_robot.udp = UdpConnector(\"localhost\", 8000)\n \n \n with patch('src.drivers.hyundai_robot.sendPlay', side_effect = mock_send_play) as m, \\\n patch.object(UdpConnector, 'appendToQueue') as u:\n src.drivers.hyundai_robot.move_along(req)\n\n assert m.called\n assert m.call_count == 1\n assert sendPlayCallParams['start'] == 1\n assert sendPlayCallParams['end'] == -1\n assert sendPlayCallParams['direction'] == 1\n assert sendPlayCallParams['poses'] == None\n\n src.drivers.hyundai_robot.udp.stopConsumeThread()", "def make_move(move):\n global manatee_pos\n global hyacinths\n global hyacinth_pos\n\n # Ends the program if movement is out of bounds\n if move == (0, 0):\n return None\n new_pos = (manatee_pos[0] + move[0], manatee_pos[1] + move[1])\n if new_pos[0] < 0 or new_pos[0] >= len(map):\n return None\n if new_pos[1] < 0 or new_pos[1] >= len(map[new_pos[0]]):\n return None\n\n entity = map[new_pos[0]][new_pos[1]]\n if entity == \"#\" or entity == \"G\":\n # Runs if movement is impossible\n return None\n if entity == \" \" or entity == \".\":\n # Runs if normal movement is possible\n map[new_pos[0]][new_pos[1]] = \"M\"\n map[manatee_pos[0]][manatee_pos[1]] = \" \"\n manatee_pos = new_pos\n return None\n if entity == \"O\":\n # Runs if manatee wins game\n map[new_pos[0]][new_pos[1]] = \"M\"\n map[manatee_pos[0]][manatee_pos[1]] = \" \"\n manatee_pos = new_pos\n return \"win\"\n if entity == \"\\\\\":\n # Runs if manatee eats hyacinth\n map[new_pos[0]][new_pos[1]] = \"M\"\n map[manatee_pos[0]][manatee_pos[1]] = \" \"\n manatee_pos = new_pos\n hyacinths += 1\n if len(hyacinth_pos) == hyacinths:\n map[grate_pos[0]][grate_pos[1]] = \"O\"\n return None\n if entity == \"*\":\n # Checks if manatee can push boat\n if move[0] == 0:\n new_boat_pos = (new_pos[0] + move[0], new_pos[1] + move[1])\n if new_boat_pos[0] < 0 or new_boat_pos[0] >= len(map):\n return None\n if new_boat_pos[1] < 0 \\\n or new_boat_pos[1] >= len(map[new_boat_pos[0]]):\n return None\n if map[new_boat_pos[0]][new_boat_pos[1]] == \" \":\n map[new_boat_pos[0]][new_boat_pos[1]] = \"*\"\n map[new_pos[0]][new_pos[1]] = \"M\"\n map[manatee_pos[0]][manatee_pos[1]] = \" \"\n manatee_pos = new_pos\n return None\n return None", "def move(self, rel_pos):\n self.pos = (self.pos[0] + rel_pos[0] * GRID, self.pos[1] + rel_pos[1] * GRID)", "def move(self, pos_to_move):\n if (self.current_pos + 1) - pos_to_move == 1: #Move to the left\n direction = 4\n elif (self.current_pos + 1) - pos_to_move == -1: #Move to the right\n direction = 2\n elif (self.current_pos + 1) - pos_to_move == 5: #Move to the top\n direction = 1\n else: #Move to the bottom\n direction = 3\n return [MOVE, direction]", "def test_object_move(self):\n self.assertTrue(self.obj1 in self.room1.contents)\n # use move_to hook\n self.obj1.move_to(self.room2)\n self.assertFalse(self.obj1 in self.room1.contents)\n self.assertTrue(self.obj1 in self.room2.contents)\n\n # move back via direct setting of .location\n self.obj1.location = self.room1\n self.assertTrue(self.obj1 in self.room1.contents)\n self.assertFalse(self.obj1 in self.room2.contents)", "def test_pos_1024() -> None:\n assert sw.walk_to(1024).distance == 31", "def _move_tetrino(self, tetrino, x, y):\n tetrino.location_offset[constant.X] += x\n tetrino.location_offset[constant.Y] += y\n tetrino.update_location()", "def move(self, coordinates, direction):\n pass", "def moveStep(self):\n\t\tif self.pos[0] < self.boundsX[0] or \\\n\t\t\tself.pos[0] > (self.boundsX[1] - self.width):\n\t\t\t\tself.dir[0] *= -1\n\t\tif self.pos[1] < self.boundsY[0] or \\\n\t\t self.pos[1] > (self.boundsY[1] - self.height):\n\t\t\t\tself.dir[1] *= -1\n\t\t\t\n\t\tself.pos[0] += self.dir[0]*self.speed\n\t\tself.pos[1] += self.dir[1]*self.speed", "def move(self, action):\n ligne = self.location_[0] + self.actions_[action][0]\n column = self.location_[1] + self.actions_[action][1]\n newLocation = (ligne, column)\n self.location_ = newLocation\n newState = (self.location_[0] * self.width ) + self.location_[1]\n\n if self.location_[0] == 0 and self.location_[0] == 0:\n return 0\n\n return newState", "def move(self, direction):\n\n self.direction = direction\n self.logger.debug('current direction: ' + direction)\n\n #remember axis name that instrument thinks in\n if 'Z' in self.current_axis:\n axis_string = 'ZPiezoStepper'\n else:\n if self.direction == 'left' or self.direction == 'right':\n axis_string = 'XPiezoStepper'\n else:\n axis_string = 'YPiezoStepper'\n\n if self.current_move == 'move absolute':\n #combine the spinbox and unit combobox user input to a pint quantity\n self.logger.info('moving to an absolute position')\n distance = self.gui.doubleSpinBox_distance.value()\n unit = self.gui.comboBox_unit.currentText()\n\n self.logger.debug('axis: ' + axis_string)\n local_distance = ur(str(distance) + unit)\n self.logger.debug('to position: ' + str(local_distance))\n\n self.moving_thread = WorkThread(self.anc350_instrument.move_to,axis_string, local_distance)\n self.moving_thread.start()\n\n elif self.current_move == 'move relative':\n # combine the spinbox and unit combobox user input to a pint quantity\n # add minussign to communicate correct direction to instrument\n self.logger.info('moving relative')\n distance = self.gui.doubleSpinBox_distance.value()\n unit = self.gui.comboBox_unit.currentText()\n self.logger.debug('axis:' + axis_string)\n self.logger.debug('direction: '+ direction)\n\n if self.direction == 'right' or self.direction == 'up':\n local_distance = ur(str(distance) + unit)\n self.logger.debug(str(local_distance))\n elif self.direction == 'left' or self.direction == 'down':\n local_distance = ur(str(-1 * distance) + unit)\n self.logger.debug(str(local_distance))\n\n self.moving_thread = WorkThread(self.anc350_instrument.move_relative,axis_string, local_distance)\n self.moving_thread.start()\n\n elif self.current_move == 'continuous' or self.current_move == 'step':\n # convert direction buttons clicked to direction integers that instrument wants\n # than move for 1s continuously, since the stop button doesnt work yet\n if self.direction == 'left':\n if 'Z' in self.current_axis:\n direction_int = 0 # correct direction, corresponds to labels closer and away\n else:\n direction_int = 1\n elif self.direction == 'right':\n if 'Z' in self.current_axis:\n direction_int = 1 # correct direction, corresponds to labels closer and away\n else:\n direction_int = 0\n elif self.direction == 'up':\n direction_int = 0\n elif self.direction == 'down':\n direction_int = 1\n\n if self.current_move == 'continuous':\n self.logger.info('moving continuously')\n self.moving_thread = WorkThread(self.anc350_instrument.move_continuous, axis_string, direction_int)\n self.moving_thread.start()\n\n elif self.current_move == 'step':\n self.logger.info('making a step')\n self.anc350_instrument.given_step(axis_string, direction_int, 1)", "def test_move_between(self):\n\n global sendPlayCallParams\n\n req = self.get_moves(50)\n\n with patch.object(socket.socket, 'connect', return_value=True) as mock_connect, \\\n patch.object(socket.socket, 'bind', return_value=True) as mock_bind:\n src.drivers.hyundai_robot.udp = UdpConnector(\"localhost\", 8000)\n\n with patch('src.drivers.hyundai_robot.sendPlay', side_effect = mock_send_play) as m, \\\n patch.object(UdpConnector, 'appendToQueue') as u:\n \n src.drivers.hyundai_robot.allPositions = []\n src.drivers.hyundai_robot.move_between(MoveBetweenRequest( start = 2, end = 3 ))\n assert u.called == False\n\n src.drivers.hyundai_robot.store_poses(req)\n assert u.call_count == math.ceil( len(req.moves) / src.drivers.hyundai_robot.batchSize )\n\n src.drivers.hyundai_robot.move_between(MoveBetweenRequest( start = 2, end = 3 ))\n \n assert sendPlayCallParams['start'] == 3\n assert sendPlayCallParams['end'] == 4\n assert sendPlayCallParams['direction'] == 1\n assert sendPlayCallParams['poses'] == None\n assert m.called\n\n src.drivers.hyundai_robot.move_between(MoveBetweenRequest( start = 3, end = 1 ))\n \n assert sendPlayCallParams['start'] == 4\n assert sendPlayCallParams['end'] == 2\n assert sendPlayCallParams['direction'] == -1\n assert sendPlayCallParams['poses'] == None\n assert m.call_count == 2\n\n src.drivers.hyundai_robot.udp.stopConsumeThread()", "def make_move(self, board: Board) -> int:\n raise NotImplementedError", "def test_check_move_with_valid(self):\n board = [\n [\" \"] * 6,\n [\" \"] * 6,\n [\" \"] * 6,\n [\" \"] * 6,\n [\"\\u25cb\"] + [\" \"] * 5,\n [\" \"] * 6,\n [\" \"] * 6\n ]\n valid = self.game.check_move(board, 3)\n self.assertTrue(valid)", "def move_me(self):\r\n\t\t#self.start_pos = self.rect.center\t\t\t\r\n\t\tif self.goal_pos is not None:\r\n\t\t\tprint(f'goal_pos: {self.goal_pos}, start_pos: {self.start_pos}')\r\n\t\t\tdx = self.goal_pos[0] - self.start_pos[0]\r\n\t\t\tdy = self.goal_pos[1] - self.start_pos[1]\r\n\r\n\t\t\tdistance = math.sqrt(dx*dx + dy*dy)\r\n\t\t\tself.shift += self.speed\r\n\r\n\t\ttry:\r\n\t\t\tif self.shift/distance < 0.99:\r\n\t\t\t\tself.rect.center = (self.start_pos[0] + self.shift/distance * dx,\r\n\t\t\t\t\t\t\t\t\t self.start_pos[1] + self.shift/distance * dy)\r\n\t\t\t\tprint(f'going to: {self.goal_pos}')\r\n\t\texcept ZeroDivisionError:\r\n\t\t\t\tpass\t\r\n\t\treturn True", "def move(self, position, direction):\n i, j = position\n direction %= 360\n if direction == 0:\n return (i - 1, j)\n if direction == 90:\n return (i, j + 1)\n if direction == 180:\n return (i + 1, j)\n if direction == 270:\n return (i, j - 1)\n raise ValueError(f\"Maze.move called with bad angle = {direction}\")", "def test_is_traveling(self):\n travelcalculator = TravelCalculator(25, 50)\n with patch(\"time.time\") as mock_time:\n mock_time.return_value = 1580000000.0\n assert not travelcalculator.is_traveling()\n assert travelcalculator.position_reached()\n\n travelcalculator.set_position(80)\n assert not travelcalculator.is_traveling()\n assert travelcalculator.position_reached()\n\n mock_time.return_value = 1580000000.0\n travelcalculator.start_travel_down()\n\n mock_time.return_value = 1580000004.0\n assert travelcalculator.is_traveling()\n assert not travelcalculator.position_reached()\n\n mock_time.return_value = 1580000005.0\n assert not travelcalculator.is_traveling()\n assert travelcalculator.position_reached()", "def move(self):\r\n if self.d == 'NORTH' and (self.y + 1) <= table_max_y:\r\n self.y += 1\r\n elif self.d == 'EAST' and (self.x + 1) <= table_max_x:\r\n self.x += 1\r\n elif self.d == 'SOUTH' and (self.y - 1) >= 0:\r\n self.y -= 1\r\n elif self.d == 'WEST' and (self.x - 1) >= 0:\r\n self.x -= 1\r\n else:\r\n print(\"Edge of Table Reached!\")", "def test_snake_snake_colision(self):\n manager = DummyLevelManager()\n game = Game(manager)\n state = game.move(GameMoves.DOWN)\n self.assertEqual(state, LevelState.RUNNING)\n state = game.move(GameMoves.LEFT)\n self.assertEqual(state, LevelState.RUNNING)\n state= game.move(GameMoves.UP)\n self.assertEqual(state, LevelState.LOSE)", "def move(self, direction):\n pass", "def test_did_not_move(controller):\n pos, angle = controller.odometry(10, 10, Vector2(0, 0), 0)\n assert pos == Vector2(0, 0)\n assert angle == 0", "def move(self):\n\n # get the location we WOULD go to\n newX = self.xcor() + self.dx\n newY = self.ycor() + self.dy\n while (abs (newX) > self.BOX_RANGE) or (abs(newY) > self.BOX_RANGE):\n # print(\"choosing new direction... \",end=\"\")\n self.chooseNewDirection()\n # print(self.dx, self.dy)\n newX = self.xcor() + self.dx\n newY = self.ycor() + self.dy\n\n # now move our monster\n super().move()" ]
[ "0.77467954", "0.67991954", "0.67526376", "0.6682421", "0.6611246", "0.66024905", "0.65450394", "0.65094215", "0.6458481", "0.6436995", "0.6424645", "0.64221406", "0.6376048", "0.63608104", "0.6327564", "0.63082105", "0.6292444", "0.62719584", "0.62709236", "0.62575185", "0.6252183", "0.62429845", "0.62324667", "0.6226187", "0.6206077", "0.61917627", "0.6178722", "0.6163082", "0.6157391", "0.61289006", "0.61185235", "0.61011845", "0.60983074", "0.60979474", "0.6082796", "0.6068275", "0.6066938", "0.6064281", "0.6053225", "0.60494673", "0.6042366", "0.604088", "0.6038469", "0.6021296", "0.6016971", "0.6014646", "0.60000646", "0.5991739", "0.5989117", "0.5986874", "0.5973047", "0.5972568", "0.59696966", "0.596747", "0.5962015", "0.5961629", "0.59580773", "0.59562737", "0.59539014", "0.59411824", "0.5933918", "0.59270066", "0.592695", "0.59222794", "0.5921619", "0.5906961", "0.59038997", "0.58976686", "0.5896139", "0.5896139", "0.58931047", "0.5893073", "0.58864427", "0.5884325", "0.5882541", "0.58787405", "0.58774126", "0.587221", "0.58702224", "0.5869317", "0.58641547", "0.586172", "0.5861543", "0.5859254", "0.5857692", "0.5850192", "0.5843519", "0.58374155", "0.58281606", "0.5824883", "0.5823004", "0.5819503", "0.580741", "0.57989824", "0.5791895", "0.57897276", "0.57882154", "0.57863873", "0.57811326", "0.5781081" ]
0.88943046
0
Test the AioBaseTurtle._calc_rotation function
def test_calc_rotation(self): t = AioBaseTurtle() t.speed(speed=2) orient, steps, delta = t._calc_rotation(120) self.assertEqual(steps, 21) self.assertAlmostEqual(delta, 120.0 / 21.0) self.assertAlmostEqual(orient[0], math.cos(math.radians(120))) self.assertAlmostEqual(orient[1], math.sin(math.radians(120)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_rotation_angle(self):\n\n self.test_shape.azimuth_placement_angle = [45, 135, 225, 315]\n test_volume = self.test_shape.volume()\n self.test_shape.rotation_angle = 180\n assert self.test_shape.volume() == pytest.approx(test_volume * 0.5)", "def test_rotation(self, tol):\n theta = 0.98\n S = symplectic.rotation(theta)\n expected = np.block([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])\n np.allclose(S, expected, atol=tol, rtol=0)", "def test_rotate(self):\n rotable = TestRotable()\n command = RotateCommand(rotable)\n collinear_to_new_direction = rotable.get_direction() + rotable.get_angular_velocity()\n\n command()\n\n ratio = norm(rotable.get_direction()) / norm(collinear_to_new_direction)\n self.assertTrue(allclose(collinear_to_new_direction * ratio, rotable.get_direction()))\n self.assertTrue(isclose(norm(rotable.get_direction()), 1))", "def test_rotated(self):\n self._calibration_test(\"rotated\")", "def compute_rotation(self):\n if self.predictions[self.iteration][0] == 90.0 or self.predictions[self.iteration][0] == 270.0:\n self.rotation = 20\n self.initial_adjust = True\n return\n\n if self.iteration == 0 or (self.iteration == 1 and self.initial_adjust):\n self.rotation = rotate.get_90_deg_rotation(self.predictions[self.iteration])\n elif self.iteration == 1 or (self.iteration == 2 and self.initial_adjust):\n self.rotation = rotate.get_45_deg_rotation(self.predictions, self.current_position)\n elif self.iteration >= 2 or (self.iteration > 2 and self.initial_adjust):\n self.rotation = rotate.get_fine_rotation(self.iteration)", "def test_rotation_isometry(self):\n import numpy\n\n # test for all kinds of curvatures K\n for k in (0, 1, -1, 1/11, -1/11, 11, -2):\n \n s = space(curvature=k)\n\n # use a small enough magnitude to not break math for very negative K\n magic = 0.33377777373737737777\n # 1/sqrt(2)\n s2_ref = 0.707106781186547524400844362104785\n\n o = s.make_origin(2)\n p = s.make_point((1, 0), magic)\n q = s.make_point((s2_ref, s2_ref), magic)\n\n rot = space_point_transform(\n numpy.array([[1,0,0],[0,s2_ref,-s2_ref],[0,s2_ref,s2_ref]]),\n curvature=k,\n math = common_math\n )\n\n f, g, i = map(space_point_transform, (p, q, o))\n\n def check_transform_eq(t1, t2, invert=False):\n for ref in (\n s.make_point((5/13, 12/13), magic),\n s.make_point((-3/5, 4/5), magic)\n ):\n self.assertTrue(invert ^ point_isclose(\n t1(ref),\n t2(ref),\n abs_tol = 1e-12\n ))\n\n # 1/8 turn, times 8\n check_transform_eq(rot*8, i)\n\n # rotate, shift, rotate\n check_transform_eq(g, rot + f + rot * -1)\n\n # the other way\n check_transform_eq(f, rot * -1 + g + rot)", "def test_arbitrary_rotation(self):\n \n # This test is run a bunch of times on various intervals, ranging from 50% to 1/6\n\t\t# (16.667%).\n for i in range(2, 7):\n \n interval = 1 / i # The amount to increase each qubit's probability by, relative to the previous qubit\n step_string = \"{:.4f}\".format(100 / i) # The decimal representation of the interval, as a percent\n target_probabilities = [0] * (i + 1) # This will store the desired probabilities of each qubit\n for j in range(0, i + 1):\n target_probability = j * interval\n target_probabilities[j] = target_probability\n\n # Run the test\n self.run_test(self.arbitrary_rotation_function, f\"Rotation with steps of 1/{i} ({step_string}%)\", 2000, target_probabilities, 0.05)", "def test_calculate_angle():\n r1 = np.array([0, 0, -1])\n r2 = np.array([0, 0, 0])\n r3 = np.array([1, 0, 0])\n\n expected_angle = 90\n calculated_angle = molecool.calculate_angle(r1, r2, r3, degrees = True)\n\n assert expected_angle == calculated_angle", "def test_extract_rot_angle():\n v = np.zeros((4,2))\n try:\n angle = extract_rot_angle(v,min_points=0)\n except AssertionError,err:\n assert err.args[0]==\"Zero velocities not allowed.\"\n \n v[:,1] = 1.\n try:\n angle = extract_rot_angle(v,min_points=0)\n except AssertionError,err:\n assert err.args[0]==\"Failed to get both forward and backward directions.\"\n\n # Forwards-backwards motion.\n v[:,1] = 0.\n v[:2,0] = -1.1\n v[2:,0] = 1.2\n angle = extract_rot_angle(v,min_points=0)\n assert np.isclose(angle,np.pi)\n\n # Forwards-backwards motion.\n v[:,0] = 0.\n v[:2,1] = -.9\n v[2:,1] = .8\n angle = extract_rot_angle(v,min_points=0)\n assert np.isclose(angle,-np.pi/2)\n\n # Forwards-backwards motion with noise.\n v[:2,1] += (np.random.rand(2)*2-1)/10\n v[2:,1] += (np.random.rand(2)*2-1)/10\n angle = extract_rot_angle(v,min_points=0)\n assert np.isclose(angle,-np.pi/2,atol=.1)", "def test_from_rotation_angle_coordinate_of_phi(rotationangle):\n\n # Get the coordinate at phi\n phi_dash = rotationangle[\"phi\"]\n c3 = rotationangle[\"cs\"].from_rotation_angle(phi_dash)\n\n # Ensure that it is at the origin\n assert c3 == pytest.approx(0.0)", "def test_skel_rotation(self):\n cmds.file(f=1, new=1)\n cmds.mayaUSDImport(file=self.skel_file, ani=1, aef=1)\n\n values = cmds.keyframe('joint1.rx', q=1, vc=1)\n self.assertAlmostEqual(0.0, values[-1])", "def test_calc_vector_rotation(time_location, moon_time_location, telescope_frame):\n if telescope_frame == \"itrs\":\n time, telescope_location = time_location\n else:\n time, telescope_location = moon_time_location\n\n source = SkyModel(\n name=\"Test\",\n ra=Longitude(12.0 * units.hr),\n dec=Latitude(-30.0 * units.deg),\n frame=\"icrs\",\n stokes=[1.0, 0.0, 0.0, 0.0] * units.Jy,\n spectral_type=\"flat\",\n )\n source.update_positions(time, telescope_location)\n\n coherency_rotation = np.squeeze(source._calc_coherency_rotation())\n\n assert np.isclose(np.linalg.det(coherency_rotation), 1)", "def test_rotation_angle_warning(self):\n\n def warning_trigger():\n try:\n paramak.CenterColumnStudyReactor(\n inner_bore_radial_thickness=20,\n inboard_tf_leg_radial_thickness=50,\n center_column_shield_radial_thickness_mid=50,\n center_column_shield_radial_thickness_upper=100,\n inboard_firstwall_radial_thickness=20,\n divertor_radial_thickness=100,\n inner_plasma_gap_radial_thickness=80,\n plasma_radial_thickness=200,\n outer_plasma_gap_radial_thickness=90,\n # first number must be between plasma inner/outer radius\n plasma_high_point=(245, 240),\n plasma_gap_vertical_thickness=40,\n center_column_arc_vertical_thickness=520,\n rotation_angle=360)\n\n except BaseException:\n pass\n\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n warning_trigger()\n assert len(w) == 1\n assert issubclass(w[-1].category, UserWarning)\n assert \"360 degree rotation may result in a Standard_ConstructionError or AttributeError\" in str(\n w[-1].message)", "def test_str_rotation_angle(self):\n xknx = XKNX()\n sensor = Sensor(\n xknx, \"TestSensor\", group_address_state=\"1/2/3\", value_type=\"rotation_angle\"\n )\n sensor.sensor_value.payload = DPTArray(\n (\n 0x2D,\n 0xDC,\n )\n )\n\n self.assertEqual(sensor.resolve_state(), 11740)\n self.assertEqual(sensor.unit_of_measurement(), \"°\")\n self.assertEqual(sensor.ha_device_class(), None)", "def rotation(self):\n\t\treturn self.piv.a.rotate.v", "def test_to_rotation(self):\r\n q = np.array([-1, 1, 3, 2])\r\n q = q / np.linalg.norm(q)\r\n R_gt = np.array([\r\n [-1/3., -14/15., -2/15.],\r\n [2/3., -1/3., 2/3.],\r\n [-2/3., 2/15., 11/15.]]).T\r\n R = to_rotation(q)\r\n\r\n zero_matrix = R - R_gt\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)\r\n\r\n for _ in range(20):\r\n q = np.random.randn(4)\r\n q /= np.linalg.norm(q)\r\n q_inv = quaternion_conjugate(q)\r\n\r\n R = to_rotation(q)\r\n R_inv = to_rotation(q_inv)\r\n\r\n zero_matrix = R @ R_inv - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)\r\n\r\n # orthogonal matrix\r\n zero_matrix = R @ R.T - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)", "def testCalculateRotationDiff(self):\n # Test identity\n transform1 = numpy.eye(4)\n transform2 = numpy.eye(4)\n (_, result) = self.evaluator._calculateDifference(transform1, transform2)\n self.assertEqual(result, 0.0)\n # Test arbitrary rotation\n rot1 = numpy.array(\n [[0.0, 1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, 1.0]])\n rot2 = numpy.array(\n [[0.0, 0.0, -1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]])\n transform1[0:3, 0:3] = numpy.matmul(transform1[0:3, 0:3], rot1)\n transform2[0:3, 0:3] = numpy.matmul(transform2[0:3, 0:3], rot2)\n (_, result) = self.evaluator._calculateDifference(transform1, transform2)\n self.assertAlmostEqual(result, 120.0 * numpy.pi / 180.0, 8)\n # Order shouldn't matter\n (_, result) = self.evaluator._calculateDifference(transform2, transform1)\n self.assertAlmostEqual(result, 120.0 * numpy.pi / 180.0, 8)\n # Test when the angle is pi\n transform1 = numpy.eye(4)\n transform2 = numpy.eye(4)\n transform2[0, 0] = -1.0\n transform2[1, 1] = -1.0\n (_, result) = self.evaluator._calculateDifference(transform1, transform2)\n # It might wrap to -pi, so check the absolute value\n self.assertAlmostEqual(abs(result), numpy.pi, 8)\n # Test an extreme value\n transform2 = -1.0 * numpy.eye(4)\n (_, result) = self.evaluator._calculateDifference(transform2, transform1)\n self.assertAlmostEqual(abs(result), numpy.pi)", "def get_rotation(self) -> np.array:\n axis = self.get_arms()[1]\n force = [self.d_x, self.d_y] # \"Force applied on the arm\"\n o_m = [self.target.x_obj - axis.x_obj, self.target.y_obj - axis.y_obj]\n torque = o_m[0]*force[1] - o_m[1] * force[0] # OM vectorial F\n if torque == 1: # Anti clockwise rotation\n rotation = np.array([[0, -1], [1, 0]])\n if torque == -1: # Clockwise rotation\n rotation = np.array([[0, 1], [-1, 0]])\n if torque == 0: # No rotation\n rotation = np.array([[0, 0], [0, 0]])\n return rotation", "def rotate_turtle(angle, mv_direction):\n \n if mv_direction == 1:\n turtle.right(angle)\n else:\n turtle.left(angle)", "def get_rotation_angle(self, image):\n \n # TODO: Make real functionality\n return 0", "def get_rot_dtdt(self) -> WAQuaternion:\n pass", "def test_rot(self):\n\n print(\"rot()\")\n obs = self.fixture\n\n # rotation(0) = identity\n for axis in [1, 2, 3]:\n # theta = 0.0\n rotation = obs.rot(0.0, axis)\n # find || eye - rot1 ||\n diff = np.linalg.norm(np.eye(3) - rotation)\n self.assertAlmostEqual(diff, 0.0, delta=1e-12)\n # theta = 2*pi\n rotation = obs.rot(2.0 * np.pi, axis)\n # find || eye - rot1 ||\n diff = np.linalg.norm(np.eye(3) - rotation)\n self.assertAlmostEqual(diff, 0.0, delta=1e-12)\n\n # perform many randomized tests\n num_tests = 100\n num_products = 10\n for _test_counter in range(num_tests):\n thetas = []\n axes = []\n base = np.eye(3)\n # we will multiply a series of rotations into \"base\"\n rot_all = base\n for _rot_counter in range(num_products):\n theta = np.random.uniform(2 * np.pi) # in [0,2 pi]\n axis = np.random.randint(3) + 1 # in {1,2,3}\n axes.append(axis)\n thetas.append(theta)\n rotation = obs.rot(theta, axis)\n # multiply rot1 into the cumulative rotation\n rot_all = np.dot(rot_all, rotation)\n # now, back all the rotations out\n for _rot_counter in range(num_products):\n theta = thetas.pop()\n axis = axes.pop()\n # apply the inverse rotation\n rotation = obs.rot(-theta, axis)\n rot_all = np.dot(rot_all, rotation)\n # find || base - rot1 * rot2 ||\n diff = np.linalg.norm(base - rot_all)\n self.assertAlmostEqual(diff, 0.0, delta=1e-10 * num_products)", "def comp_rot_dir(self):\n\n MMF = self.comp_mmf_unit()\n p = self.get_pole_pair_number()\n\n # Compute rotation direction from unit mmf\n results = MMF.get_harmonics(1, \"freqs\", \"wavenumber\")\n H1 = results[MMF.symbol]\n\n return sign(H1[0])", "def make_rotation(self, rotation):\n if rotation == \"r\":\n self.facing += 1\n else:\n self.facing -= 1\n\n if self.facing > 3:\n self.facing = self.facing - 4\n elif self.facing < 0:\n self.facing = self.facing + 4", "def rotation(self, *args, **kwargs) -> Any:\n pass", "def _rotate(self, tetrino):\n tetrino.rotate()", "def test_default_parameters(self):\n\n assert self.test_shape.rotation_angle == 360", "def test_rotate_down(self):\n # Testing 'down' rotation clockwise\n side = 'D'\n rotation = list(self.cube.rotate_cube(side))\n result = [np.array([['g', 'g'], ['r', 'r']], dtype='<U1'),\n np.array([['y', 'y'], ['y', 'y']], dtype='<U1'),\n np.array([['o', 'o'], ['g', 'g']], dtype='<U1'),\n np.array([['w', 'w'], ['w', 'w']], dtype='<U1'),\n np.array([['b', 'b'], ['o', 'o']], dtype='<U1'),\n np.array([['r', 'r'], ['b', 'b']], dtype='<U1')]\n\n np.testing.assert_array_equal(rotation, result)", "def rotation_angle(self):\n return self.container['rotation_angle']", "def test_rotate_without_moving(controller):\n distance = math.pi / 2 * (DISTANCE_BETWEEN_WHEELS / 2)\n revolution = distance / (2 * math.pi * WHEEL_RADIUS)\n ticks = revolution * TICK_PER_REVOLUTION\n pos, angle = controller.odometry(\n round(10 - ticks),\n round(10 + ticks),\n Vector2(0, 0),\n 0,\n )\n\n # Rotate 90 degrees without moving.\n assert pos == Vector2(0, 0)\n assert round(math.pi / 2 / angle, 1) == 1\n\n # Rotate back to 0 degrees without moving.\n pos, angle = controller.odometry(10, 10, Vector2(0, 0), 0)\n assert pos == Vector2(0, 0)\n assert round(-math.pi / 2 / angle, 1) == 1", "def test_calc_circle(self):\n t = AioBaseTurtle()\n steps, step_len, rot_step = t._calc_circle(100, extent=180)\n self.assertEqual(steps, 14)\n self.assertAlmostEqual(rot_step, 180.0 / 14.0)\n self.assertAlmostEqual(step_len, 22.3928952207)", "def steps_to_angle():\n pass", "def getRotationTrajectory(self) -> SO3Trajectory:\n return SO3Trajectory(self.times,[m[:9] for m in self.milestones])", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def get_rot(self) -> WAQuaternion:\n pass", "def test_vectors_angle(self):\n\n # Example 1.3\n vector_p = np.array([1.0, 2.0, 0.0])\n vector_q = np.array([3.0, 1.0, 1.0])\n crystal = crystal_system.Tetragonal(0.5, 1.0)\n angle_ref_deg = 53.300774799510123\n\n angle_rad = vector.angle_rad(crystal, vector_p, vector_q)\n angle_deg = np.degrees(angle_rad)\n self.assertAlmostEqual(angle_ref_deg, angle_deg, 6)\n\n angle_rad = vector.angle_rad(crystal, vector_q, vector_p)\n angle_deg = np.degrees(angle_rad)\n self.assertAlmostEqual(angle_ref_deg, angle_deg, 6)\n\n #self.fail(\"Test if the testcase is working.\")", "def test_comp_angle_opening(self, test_dict):\n test_obj = test_dict[\"test_obj\"]\n a = test_obj.slot.comp_angle_opening()\n self.assertEqual(a, 2 * pi / test_obj.slot.Zs)\n\n b = comp_angle_opening(test_obj.slot)\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)", "def check_angle(self):\n self.find_pixels()\n alpha_theta=np.deg2rad(70)\n alpha_phi=np.deg2rad(70)\n extreme_values=self.compute_extreme_values(alpha_phi, alpha_theta)\n x=np.linspace(extreme_values[0], extreme_values[1], self.number_of_pix[1])\n y=np.linspace(extreme_values[2], extreme_values[3], self.number_of_pix[0])\n phi_0=20\n phi_0=np.deg2rad(phi_0)\n j, diff=self.compute_phi(\"find_orient.png\")\n print \"j=\", j\n print \"diff=\", diff", "def get_rot(m_obj):\n mfn_obj = oMa.MFnTransform(m_obj)\n\n rot = mfn_obj.rotation()\n\n return rot", "def rotate(angle):\n global odom_list\n global pose\n\n #This node was created using Coordinate system transforms and numpy arrays.\n #The goal is measured in the turtlebot's frame, transformed to the odom.frame \n transformer = tf.TransformerROS()\t\n rotation = numpy.array([[math.cos(angle), -math.sin(angle), 0],\t#Create goal rotation\n [math.sin(angle), math.cos(angle), 0],\n [0, 0, 1]])\n\n #Get transforms for frames\n odom_list.waitForTransform('odom', 'base_footprint', rospy.Time(0), rospy.Duration(4.0))\n (trans, rot) = odom_list.lookupTransform('odom', 'base_footprint', rospy.Time(0))\n T_o_t = transformer.fromTranslationRotation(trans, rot)\n R_o_t = T_o_t[0:3,0:3]\n\n #Setup goal matrix\n goal_rot = numpy.dot(rotation, R_o_t)\n goal_o = numpy.array([[goal_rot[0,0], goal_rot[0,1], goal_rot[0,2], T_o_t[0,3]],\n [goal_rot[1,0], goal_rot[1,1], goal_rot[1,2], T_o_t[1,3]],\n [goal_rot[2,0], goal_rot[2,1], goal_rot[2,2], T_o_t[2,3]],\n [0, 0, 0, 1]])\n\n #Continues creating and matching coordinate transforms.\n done = False\n while (not done and not rospy.is_shutdown()):\n (trans, rot) = odom_list.lookupTransform('odom', 'base_footprint', rospy.Time(0))\n state = transformer.fromTranslationRotation(trans, rot)\n within_tolerance = abs((state - goal_o)) < .2\n if ( within_tolerance.all() ):\n spinWheels(0,0,0)\n done = True\n else:\n if (angle > 0):\n spinWheels(3,-3,.1)\n else:\n spinWheels(-3,3,.1)", "def test_rotate_right(self):\n # Testing 'down' rotation clockwise\n side = 'R'\n rotation = list(self.cube.rotate_cube(side))\n result = [np.array([['g', 'g'], ['g', 'g']], dtype='<U1'),\n np.array([['y', 'o'], ['y', 'o']], dtype='<U1'),\n np.array([['o', 'w'], ['o', 'w']], dtype='<U1'),\n np.array([['w', 'r'], ['w', 'r']], dtype='<U1'),\n np.array([['b', 'b'], ['b', 'b']], dtype='<U1'),\n np.array([['y', 'r'], ['y', 'r']], dtype='<U1')]\n\n np.testing.assert_array_equal(rotation, result)", "def test_xform_rotation(self):\n cmds.file(f=1, new=1)\n cmds.mayaUSDImport(file=self.xform_file, ani=1, aef=1)\n\n values = cmds.keyframe('pCube1.rx', q=1, vc=1)\n self.assertAlmostEqual(0.0, values[-1])", "def test_d_2():\n rs = 10\n d = 2\n np.random.seed(rs)\n num = 3\n theta = np.random.uniform(0, 2 * math.pi)\n rotation = np.identity(d)\n\n rotation[0, 0] = math.cos(theta)\n rotation[0, 1] = - math.sin(theta)\n rotation[1, 0] = math.sin(theta)\n rotation[1, 1] = math.cos(theta)\n\n np.random.seed(rs)\n rotation_function = mt_obj.calculate_rotation_matrix(d, num)\n assert(np.all(rotation == rotation_function))", "def test_helioviewer_rotation(lasco, lasco_helioviewer):\n np.testing.assert_allclose(lasco.rotation_matrix,\n [[0.999966, -0.008296], [0.008296, 0.999966]], rtol=1e-6)\n np.testing.assert_array_equal(lasco_helioviewer.rotation_matrix, [[1., 0.], [0., 1.]])", "def test_skel_rotation_fail(self):\n cmds.file(f=1, new=1)\n cmds.mayaUSDImport(file=self.skel_file, ani=1)\n\n values = cmds.keyframe('joint1.rx', q=1, vc=1)\n self.assertNotAlmostEqual(0.0, values[-1])", "def calc_tube_angle(r_min, r_max, tube_height):\n x = r_max - r_min\n hyp = tube_height\n rotor_angle = np.rad2deg(np.arcsin(x / hyp))\n return rotor_angle", "def test_calculate_tilt(tilt_reference):\n tilt_rad = np.radians(tilt_reference)\n # get the rotation axis\n # NOTE:\n # we need to tilt the image -tilt in order to get the tilt angle as + value.\n rot_aixs_tilted = get_tilted_rot_axis(tilt_inplane=-tilt_rad, tilt_outplane=0.0)\n # radiograph at 0 deg\n img0 = virtual_cam(two_sphere_system(0, rot_aixs_tilted, size=200))\n # radiograph at 180 deg\n img180 = virtual_cam(two_sphere_system(np.pi, rot_aixs_tilted, size=200))\n # calculate the tilt angle\n tilt_angle = calculate_tilt(img0, img180).x\n # verify\n # NOTE: tolerance is set to half a pixel at the edge of the FOV\n np.testing.assert_allclose(tilt_angle, tilt_reference, atol=np.degrees(0.5 / 100))", "def determine_rotation_angle(self, landmarks):\n lp = landmarks['left-eye-center-pos']\n rp = landmarks['right-eye-center-pos']\n return angle_between_points(lp, rp)", "def rotation(self) -> float:\n xs, ys = self.xcoords.data, self.ycoords.data\n rot = 0\n if xs.ndim == 2:\n ddx1 = xs[0, -1] - xs[0, 0]\n ddy1 = ys[0, -1] - ys[0, 0]\n if not np.isclose(ddx1, 0):\n rot = math.degrees(math.atan(ddy1 / ddx1))\n else:\n rot = -90\n if ddx1 < 0:\n rot = 180 + rot\n elif ddy1 < 0:\n rot = 360 + rot\n return rot", "def test_orientation_vector():\n\topening_angle = geom_instance.source_opening_angle\n\ttest_orientation = o_gen_instance.generate_orientation_vector()\n\tassert test_orientation[0] < np.cos(opening_angle)\n\tassert test_orientation[1] < np.sin(opening_angle)", "def estimate_rotation(bounding_box):\n # x,y coord of topleft corner\n x,y,w,h = bounding_box\n rotation_arg = np.abs(1 - (h/float(w)))*2\n return rad_to_deg( np.arctan(rotation_arg) )", "def rotate(self):\n pass", "def doRotation(self, delta):\n self.correctPending()\n self.rotation = (self.rotation + delta) % self.possibleRotations", "def angle(self) -> int:", "def getEllipsYZRotMatrix(a1, a2):\n adir = a2 - a1\n amid = a1 + 0.5 * adir\n kath = np.sqrt((adir[0] * adir[0] + adir[1] * adir[1]) / 4.0)\n octantA2 = octant(a2)\n theta = np.arctan( abs( (adir[2]/2) / kath) )\n #[1, 4, 6, 7 ] => left rotation\n #[2, 3, 5, 8 ] => right rotation\n if octantA2 in [2, 3, 5, 8]: \n theta = -theta \n print \"theta =\" , np.rad2deg(theta)\n RotY = np.matrix( [ [ np.cos(theta), 0.0, np.sin(theta) ],\n [ 0.0 , 1.0, 0.0 ],\n [ -np.sin(theta), 0.0, np.cos(theta) ]\n ]) \n \n psi = np.arctan( abs( adir[1] / adir[0] ) )\n #[2, 4, 6, 8 ] => left rotation\n #[1, 3, 5, 7 ] => right rotation\n if octantA2 in [1, 3, 5, 7]:\n psi = -psi\n print \"psi =\" , np.rad2deg(psi)\n RotZ = np.matrix( [ [ np.cos(psi), -np.sin(psi), 0.0 ],\n [ np.sin(psi), np.cos(psi), 0.0 ],\n [ 0.0 , 0.0 , 1.0 ]\n ])\n return np.asarray( RotY * RotZ )", "def angle(z):", "def relativeRotation(self):\n return self.rotation()", "def rotationDetermination(self):\n \n for index, row in enumerate(self.magdata):\n if index > 11 and index < (len(self.magdata) - 12):\n br1 = [row[0] for row in self.magdata[(index-12):(index-2)]]\n bt1 = [row[1] for row in self.magdata[(index-12):(index-2)]]\n bn1 = [row[2] for row in self.magdata[(index-12):(index-2)]]\n b1 = np.matrix((np.mean(br1), np.mean(bt1), np.mean(bn1)))\n\n br2 = [row[0] for row in self.magdata[(index+2):(index+12)]]\n bt2 = [row[1] for row in self.magdata[(index+2):(index+12)]]\n bn2 = [row[2] for row in self.magdata[(index+2):(index+12)]]\n b2 = np.matrix((np.mean(br2), np.mean(bt2), np.mean(bn2)))\n\n theta = np.arccos(np.dot(b1,b2.T)/(np.linalg.norm(b1)*np.linalg.norm(b2)))*180/np.pi\n\n self.detections.rotations.append(theta[0,0])\n self.detections.rotationTimeTags.append(self.timestamps[index])\n \n\n## self.b1 = b1\n## self.b2 = b2\n self.detections.rotationBoundary=[]\n if len(self.detections.rotations) != 0:\n \n for index, theta in enumerate(self.detections.rotations):\n if index > 0:\n if theta > 30 and self.detections.rotations[index-1] < 30:\n self.detections.rotationBoundary.append(self.detections.rotationTimeTags[index])\n if index < len(self.detections.rotations)-1:\n if theta > 30 and self.detections.rotations[index+1] < 30:\n self.detections.rotationBoundary.append(self.detections.rotationTimeTags[index])", "def rotation(self):\n return self.transform.getRotation() + [0]", "def euler_angle_to_rotation(ea, convention='zyx'):\n axis_names_to_vectors = dict([('x', (1, 0, 0)), ('y', (0, 1, 0)), ('z', (0, 0, 1))])\n axis0, axis1, axis2 = convention\n R0 = so3.rotation(axis_names_to_vectors[axis0], ea[0])\n R1 = so3.rotation(axis_names_to_vectors[axis1], ea[1])\n R2 = so3.rotation(axis_names_to_vectors[axis2], ea[2])\n return so3.mul(R0, so3.mul(R1, R2))", "def get_rot_dt(self) -> WAQuaternion:\n pass", "def rotation(self):\n\n return self._rotation", "def rotates(self, maze, game_display):\n if self.lidars[0].get_sense() <= self.lidars[0].radius // 3:\n if uniform(0, 1) > 0.7:\n self.rotate_right(angle=45, maze=maze, game_display=game_display)\n else:\n self.rotate_left(angle=45, maze=maze, game_display=game_display)\n # fix to left.\n if self.lidars[1].get_sense() <= 2 * self.lidars[1].radius // 3:\n self.rotate_left(angle=10, maze=maze, game_display=game_display)\n # fix to right.\n if self.lidars[2].get_sense() <= 2 * self.lidars[0].radius // 3:\n self.rotate_right(angle=10, maze=maze, game_display=game_display)", "def rotate90(self):", "def get_RotationsFromTiltSeries(self, TiltSeries_):\n # initialize alignment \n self.rotInPlane = len(TiltSeries_.Projections) * [0.]\n kk = 0\n for Proj in TiltSeries_.Projections:\n self.rotInPlane[kk] = Proj.rotInPlane\n kk = kk + 1\n return self.rotInPlane", "def rotate_orbit(self):\n try:\n ang = self.orbit_speed * self.time_scale / self.refresh_rate\n self.obj.rotate(angle=ang, axis=vector(0, 1, 0), origin=self.star.obj.pos)\n self.sum_ang += ang\n except ZeroDivisionError:\n print(\"ERROR: REFRESH_RATE is 0\")\n except (AttributeError, TypeError):\n print(\"ERROR: wrong arguments type while initializing!!\")", "def test_calc_basis_rotation_matrix(time_location, moon_time_location, telescope_frame):\n\n if telescope_frame == \"itrs\":\n time, telescope_location = time_location\n else:\n time, telescope_location = moon_time_location\n\n source = SkyModel(\n name=\"Test\",\n skycoord=SkyCoord(\n Longitude(12.0 * units.hr), Latitude(-30.0 * units.deg), frame=\"icrs\"\n ),\n stokes=[1.0, 0.0, 0.0, 0.0] * units.Jy,\n spectral_type=\"flat\",\n )\n source.update_positions(time, telescope_location)\n\n basis_rot_matrix = source._calc_average_rotation_matrix()\n\n assert np.allclose(np.matmul(basis_rot_matrix, basis_rot_matrix.T), np.eye(3))\n assert np.allclose(np.matmul(basis_rot_matrix.T, basis_rot_matrix), np.eye(3))", "def determine_rotation(arm, d, tip_data, rot_data):\n n_t = np.zeros(3)\n for this_n_t in tip_data['pos_ntip_wrt_r']:\n n_t += this_n_t\n n_t /= len(tip_data['pos_ntip_wrt_r'])\n print(\"Our n_t to use in this stage: {}\".format(n_t))\n\n K = len(rot_data['pos_ntip_wrt_s'])\n errors_zyz = []\n errors_zyx = []\n\n for k in range(K):\n lhs = rot_data['pos_ntip_wrt_s'][k]\n t_st = rot_data['pos_tool_wrt_s_code'][k]\n ypr = rot_data['rot_tool_wrt_s_code'][k]\n yaw, pitch, roll = ypr[0], ypr[1], ypr[2]\n\n # R_zyz\n R_z1 = U.rotation_matrix_3x3_axis(angle=roll, axis='z')\n R_y = U.rotation_matrix_3x3_axis(angle=pitch, axis='y')\n R_z2 = U.rotation_matrix_3x3_axis(angle=yaw, axis='z')\n R_zyz = R_z2.dot(R_y).dot(R_z1)\n\n # R_zyx\n R_x = U.rotation_matrix_3x3_axis(angle=roll, axis='x')\n R_y = U.rotation_matrix_3x3_axis(angle=pitch, axis='y')\n R_z = U.rotation_matrix_3x3_axis(angle=yaw, axis='z')\n R_zyx = R_z.dot(R_y).dot(R_x)\n\n # Evaluate!\n rhs_zyz = t_st + R_zyz.dot( n_t )\n rhs_zyx = t_st + R_zyx.dot( n_t )\n err_zyz = np.linalg.norm(lhs - rhs_zyz)\n err_zyx = np.linalg.norm(lhs - rhs_zyx)\n errors_zyz.append( err_zyz )\n errors_zyx.append( err_zyx )\n print(\"\\nerr_zyz: {:.3f} for {}-th sample\".format(err_zyz, k))\n print(\"err_zyx: {:.3f} for {}-th sample\".format(err_zyx, k))\n print(\"R_zyz:\\n{}\".format(R_zyz))\n print(\"R_zyx:\\n{}\".format(R_zyx))\n\n print(\"\\nDone with evaluation!\")\n print(\"zyz has avg error {:.5f}\".format(np.mean(errors_zyz)))\n print(\"zyx has avg error {:.5f}\".format(np.mean(errors_zyx)))", "def deg2rad(a):", "def get_road_rotation(self):\r\n if self.container is not None:\r\n rot = self.container.get_road_rotation()\r\n rot2 = self.rotation\r\n if rot2 is None:\r\n rot2 = rot\r\n return rot2\r\n \r\n rot = self.track.get_road_rotation()\r\n rot2 = self.rotation\r\n if rot2 is None:\r\n rot2 = rot\r\n return rot2", "def test_y_rot(self):\n\n # Create a Matrix representing 90 deg y rot.\n mat = Matrix44.from_rot_y(90)\n # Use from_matrix44()\n quat = Quat.from_matrix44(mat)\n\n # Ensure the quat matches a 90 degree x rotation.\n expected = Quat.from_axis_angle_deg(Vec3(0, 1, 0), 90)\n AssertQuatAlmostEqual(quat, expected, self)", "def rotation(self, p1, p2, p3):\n return (p2[0] - p1[0]) * (p3[1] - p1[1]) - (p2[1] - p1[1]) * (p3[0] - p1[0])", "def test_rotate_down_counter(self):\n # Testing 'down' rotation counter-clockwise\n side = 'Dr'\n rotation = list(self.cube.rotate_cube(side))\n result = [np.array([['g', 'g'], ['g', 'g']], dtype='<U1'),\n np.array([['y', 'y'], ['y', 'y']], dtype='<U1'),\n np.array([['o', 'o'], ['o', 'o']], dtype='<U1'),\n np.array([['w', 'w'], ['w', 'w']], dtype='<U1'),\n np.array([['b', 'b'], ['b', 'b']], dtype='<U1'),\n np.array([['r', 'r'], ['r', 'r']], dtype='<U1')]\n\n np.testing.assert_array_equal(rotation, result)", "def get_rotationalAngularPosition(self, t): # returns [rad]\n angle = self.theta0 + self.rotationalAngularVelocity * t # angular position [rad]\n return angle", "def rot180(a):\n return rot90(a, 2)", "def test_cylindrical(self):\n # Rotate around the z axis\n r = Joint.cylindrical(np.array([0, 0, 1]))\n t_mat = r(np.array([np.pi / 2, 1.0]))\n\n rot_vec = np.dot(t_mat[:3, :3], np.array([1, 0, 0]))\n\n self.assertTrue(np.allclose(\n rot_vec, np.array([0, 1, 0]), rtol=1e-5, atol=1e-5))\n self.assertTrue(np.allclose(t_mat[2, 3], 1))", "def test_rotate_and_move_right(controller):\n pos, angle = controller.odometry(11, 10, Vector2(0, 0), 0)\n assert pos.x > 0 # Moved forward.\n assert pos.y < 0 # Went a bit down.\n assert angle < 0 # Turned right.", "def coord_rotate_rad(x, y, z):\n #-- 1 --\n xt = math.asin ( math.sin(x) * math.sin(y) +\n math.cos(x) * math.cos(y) * math.cos(z) )\n #-- 2 --\n yt = math.acos ( ( math.sin(x) - math.sin(y) * math.sin(xt) ) /\n ( math.cos(y) * math.cos(xt) ) )\n #-- 3 --\n if math.sin(z) > 0.0:\n yt = TWO_PI - yt\n\n #-- 4 --\n return (xt, yt)", "def angle(self) -> float:\n ...", "def _rotation_from_gradient(self,m):\n\t\ttheta = -np.arctan(m)\n\t\tself.current_theta = theta\n\t\treturn self._rotation_from_angle(theta)", "def evaluate_rotation(self, box, instance):\n prediction = Box.Box(box)\n annotation = Box.Box(instance)\n gt_rotation_inverse = np.linalg.inv(annotation.rotation)\n rotation_error = np.matmul(prediction.rotation, gt_rotation_inverse)\n\n error_angles = np.array(\n rotation_util.from_dcm(rotation_error).as_euler('zxy'))\n abs_error_angles = np.absolute(error_angles)\n abs_error_angles = np.minimum(\n abs_error_angles, np.absolute(math.pi * np.ones(3) - abs_error_angles))\n error = np.linalg.norm(abs_error_angles)\n\n # Compute the error as the angle between the two rotation\n rotation_error_trace = abs(np.matrix.trace(rotation_error))\n angular_distance = math.acos((rotation_error_trace - 1.) / 2.)\n\n # angle = 2 * acos(|q1.q2|)\n box_quat = np.array(rotation_util.from_dcm(prediction.rotation).as_quat())\n gt_quat = np.array(rotation_util.from_dcm(annotation.rotation).as_quat())\n quat_distance = 2 * math.acos(np.dot(box_quat, gt_quat))\n\n # The rotation measure from \"3D Bounding box estimation using deep learning\n # and geometry\"\n rotation_error_log = scipy.linalg.logm(rotation_error)\n rotation_error_frob_norm = np.linalg.norm(rotation_error_log, ord='fro')\n rotation_distance = rotation_error_frob_norm / 1.4142\n\n return (error, quat_distance, angular_distance, rotation_distance)", "def test_asssert_rotation_matrix_behaves_like_check_matrix():\n random_state = np.random.RandomState(2345)\n for _ in range(5):\n a = pr.random_axis_angle(random_state)\n R = pr.matrix_from_axis_angle(a)\n original_value = R[2, 2]\n for error in [0, 1e-8, 1e-7, 1e-5, 1e-4, 1]:\n R[2, 2] = original_value + error\n try:\n pr.assert_rotation_matrix(R)\n pr.check_matrix(R)\n except AssertionError:\n assert_raises_regexp(\n ValueError, \"Expected rotation matrix\", pr.check_matrix, R)", "def create_tangent_angles_equal(self):\n\n self.text_mirror = TextMobject(r\"Specular reflection\")\n self.text_mirror.move_to(4.0 * RIGHT + 2.0 * UP)\n\n self.tex_derive_ti_tr = TexMobject(r\"\\theta_{i}\", r\"=\", r\"\\theta_{r}\", r\"=\", r\"\\theta_{0}\")\n self.tex_derive_ti_tr[0].set_color(self.tex_theta_in_color)\n self.tex_derive_ti_tr[2].set_color(self.tex_theta_ref_color)\n self.tex_derive_ti_tr[4].set_color(RED)\n self.tex_derive_ti_tr.move_to(4.0 * RIGHT + 1.0 * UP)\n\n self.tex_derive_tan_tin_tan_tr = TexMobject(r\"90^{\\circ}\", r\"-\", r\"\\theta_{i}\",\n r\"=\",\n r\"90^{\\circ}\", r\"-\", r\"\\theta_{r}\",\n r\"=\", r\"\\theta_{0}'\")\n for i in range(0,3):\n self.tex_derive_tan_tin_tan_tr[ i].set_color(self.tex_theta_in_color)\n self.tex_derive_tan_tin_tan_tr[4+i].set_color(self.tex_theta_ref_color)\n self.tex_derive_tan_tin_tan_tr[8].set_color(RED)\n self.tex_derive_tan_tin_tan_tr.move_to(4.0 * RIGHT + 0.0 * UP)\n\n self.theta_0 = TexMobject(r\"\\theta_{0}\"). set_color(RED)\n self.theta_0_d = TexMobject(r\"\\theta_{0}'\").set_color(RED)", "def _rotate(polyreg, i=None, j=None, u=None, v=None, theta=None, R=None):\n # determine the rotation matrix based on inputs\n if R is not None:\n logger.debug(\"rotate: R=\\n{}\".format(R))\n if i is not None:\n raise ValueError(i)\n if j is not None:\n raise ValueError(j)\n if theta is not None:\n raise ValueError(theta)\n if u is not None:\n raise ValueError(u)\n if v is not None:\n raise ValueError(v)\n elif i is not None and j is not None and theta is not None:\n logger.info(\"rotate via indices and angle.\")\n if R is not None:\n raise ValueError(R)\n if u is not None:\n raise ValueError(u)\n if v is not None:\n raise ValueError(v)\n if i == j:\n raise ValueError(\"Must provide two unique basis vectors.\")\n R = givens_rotation_matrix(i, j, theta, polyreg.dim)\n elif u is not None and v is not None:\n logger.info(\"rotate via 2 vectors.\")\n if R is not None:\n raise ValueError(R)\n if i is not None:\n raise ValueError(i)\n if j is not None:\n raise ValueError(j)\n if theta is not None:\n raise ValueError(theta)\n R = solve_rotation_ap(u, v)\n else:\n raise ValueError(\"R or (i and j and theta) or (u and v) \"\n \"must be defined.\")\n if isinstance(polyreg, Polytope):\n # Ensure that half space is normalized before rotation\n n, p = _hessian_normal(polyreg.A, polyreg.b)\n # Rotate the hyperplane normals\n polyreg.A = np.inner(n, R)\n polyreg.b = p\n else:\n # Rotate subregions\n for poly in polyreg.list_poly:\n _rotate(poly, None, None, R=R)\n # transform bbox and cheby\n if polyreg.bbox is not None:\n polyreg.bbox = (np.inner(polyreg.bbox[0].T, R).T,\n np.inner(polyreg.bbox[1].T, R).T)\n if polyreg._chebXc is not None:\n polyreg._chebXc = np.inner(polyreg._chebXc, R)\n return R", "def test_angle():\n # radians\n theta_coord = 45. * coord.degrees\n theta_astro = astropy.coordinates.Angle(pi/4., units.radian)\n\n # degrees\n np.testing.assert_almost_equal(theta_coord.rad, theta_astro.rad, decimal=12)\n np.testing.assert_almost_equal(theta_coord / coord.degrees, theta_astro.degree, decimal=12)\n np.testing.assert_almost_equal(theta_coord / coord.hours, theta_astro.hour, decimal=12)\n np.testing.assert_almost_equal(theta_coord / coord.arcmin, theta_astro.arcminute, decimal=12)\n np.testing.assert_almost_equal(theta_coord / coord.arcsec, theta_astro.arcsec, decimal=12)\n\n # Other constructors\n theta_astro2 = astropy.coordinates.Angle(23.09, units.arcsec)\n theta_coord2 = coord.Angle(23.09, coord.arcsec)\n np.testing.assert_almost_equal(theta_coord2.rad, theta_astro2.rad, decimal=12)\n\n theta_astro3 = astropy.coordinates.Angle(-0.17, unit='rad')\n theta_coord3 = coord._Angle(-0.17)\n np.testing.assert_almost_equal(theta_coord3.rad, theta_astro3.rad, decimal=12)\n\n # astropy wrapping uses a different convention than we do. Their argument is\n # the upper end of the target range, not the center.\n theta_astro4 = theta_astro3.wrap_at(360 * units.deg)\n theta_coord4 = theta_coord3.wrap(180 * coord.degrees)\n np.testing.assert_almost_equal(theta_coord4.rad, theta_astro4.rad, decimal=12)\n\n theta_astro5 = theta_astro3.wrap_at(-100 * units.deg)\n theta_coord5 = theta_coord3.wrap(-280 * coord.degrees)\n np.testing.assert_almost_equal(theta_coord5.rad, theta_astro5.rad, decimal=12)\n\n theta_astro6 = astropy.coordinates.Angle('03:34:12', unit='hourangle')\n theta_coord6 = coord.Angle.from_hms('03:34:12')\n np.testing.assert_almost_equal(theta_coord6.rad, theta_astro6.rad, decimal=12)\n\n theta_astro7 = astropy.coordinates.Angle('03:34:12', unit='deg')\n theta_coord7 = coord.Angle.from_dms('03:34:12')\n np.testing.assert_almost_equal(theta_coord7.rad, theta_astro7.rad, decimal=12)\n\n # Their default arguments to to_string are different from ours, but can make them compatible.\n print('theta_astro6.hms = ',theta_astro6.to_string(sep=':', pad=True))\n print('theta_coord6.hms = ',theta_coord6.hms())\n assert theta_coord6.hms() == theta_astro6.to_string(sep=':', pad=True)\n\n print('theta_astro7.dms = ',theta_astro7.to_string(sep=':', pad=True))\n print('theta_coord7.dms = ',theta_coord7.dms())\n assert theta_coord7.dms() == theta_astro7.to_string(sep=':', pad=True)\n\n print('theta_astro6.hms = ',theta_astro6.to_string())\n print('theta_coord6.hms = ',theta_coord6.hms(sep='hms', pad=False))\n assert theta_coord6.hms(sep='hms', pad=False) == theta_astro6.to_string()\n\n print('theta_astro7.hms = ',theta_astro7.to_string())\n print('theta_coord7.hms = ',theta_coord7.dms(sep='dms', pad=False))\n assert theta_coord7.dms(sep='dms', pad=False) == theta_astro7.to_string()", "def lookup_rotation(source_frame, target_frame, tf_listener = None):\n\n # Check the tf_listener and create new one if None\n if tf_listener is None:\n tf_listener = tf.TransformListener()\n\n # Get the transforamtion from baselink to frame\n (trans,rot) = tf_listener.lookupTransform(source_frame, target_frame, rospy.Time(0))\n\n # Compute dot product\n d = sum([a * b for (a,b) in zip([0,-1],trans)])\n d = d / math.sqrt(sum([a ** 2 for a in trans[0:2]]))\n\n return math.acos(d)", "def carla_rotation_to_RPY(carla_rotation):\n roll = -math.radians(carla_rotation.roll)\n pitch = -math.radians(carla_rotation.pitch)\n yaw = -math.radians(carla_rotation.yaw)\n\n return (roll, pitch, yaw)", "def test_xform_rotation_fail(self):\n cmds.file(f=1, new=1)\n cmds.mayaUSDImport(file=self.xform_file, ani=1)\n\n values = cmds.keyframe('pCube1.rx', q=1, vc=1)\n self.assertNotAlmostEqual(0.0, values[-1])", "def test_pose(self):\n t = self.t\n \n # Cyclic functions for orientation and position values\n delta = math.sin(t) * 1000\n alpha = math.cos(t) * math.pi * 2\n \n # Default values\n x = 0\n y = 0\n z = 0\n\n pitch = 0\n yaw = 0\n roll = 0\n \n # assign values cyclically\n if t % (math.pi * 12) < math.pi * 2:\n x = delta\n elif t % (math.pi * 12) < math.pi * 4:\n y = delta\n elif t % (math.pi * 12) < math.pi * 6:\n z = delta\n elif t % (math.pi * 12) < math.pi * 8:\n pitch = alpha\n elif t % (math.pi * 12) < math.pi * 10:\n yaw = alpha\n elif t % (math.pi * 12) < math.pi * 12:\n roll = alpha\n else:\n # Reset counter\n self.t = 0.0\n \n return ((x, y, z), (pitch, yaw, roll))", "def rotate_shape(shape, xy_center, angle_degrees):" ]
[ "0.72649866", "0.71997005", "0.7070056", "0.6858857", "0.67564845", "0.6559771", "0.65583205", "0.6543984", "0.65229213", "0.6519182", "0.64923644", "0.64011544", "0.63578784", "0.632081", "0.62979436", "0.62914723", "0.62909883", "0.62908113", "0.6243588", "0.62338036", "0.6228936", "0.6217549", "0.6204204", "0.6168009", "0.61678576", "0.6160663", "0.6157259", "0.61556774", "0.6139853", "0.61366385", "0.61277026", "0.61219805", "0.61167705", "0.6114191", "0.6114191", "0.6114191", "0.6114191", "0.6114191", "0.6114191", "0.6114191", "0.6114191", "0.6114191", "0.6114191", "0.6114191", "0.6112563", "0.6100067", "0.609062", "0.60854995", "0.60633385", "0.6059793", "0.60384166", "0.60381716", "0.60328305", "0.6028001", "0.6023711", "0.6011692", "0.6011015", "0.5998568", "0.5996819", "0.59844375", "0.5982559", "0.5951389", "0.5948605", "0.5936716", "0.592567", "0.5924005", "0.5920572", "0.59100574", "0.5898042", "0.5887418", "0.58868587", "0.5885562", "0.5881101", "0.5876074", "0.5870109", "0.58668214", "0.5866264", "0.5865761", "0.58573127", "0.58510345", "0.5840672", "0.5833966", "0.5823133", "0.5816574", "0.5805825", "0.5791181", "0.579013", "0.57879573", "0.5781197", "0.5774318", "0.57715034", "0.57714146", "0.57622486", "0.57594967", "0.57426834", "0.57422715", "0.57362354", "0.57353854", "0.57333237", "0.5731235" ]
0.91761756
0
Test the AioBaseTurtle._calc_circle function
def test_calc_circle(self): t = AioBaseTurtle() steps, step_len, rot_step = t._calc_circle(100, extent=180) self.assertEqual(steps, 14) self.assertAlmostEqual(rot_step, 180.0 / 14.0) self.assertAlmostEqual(step_len, 22.3928952207)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_circle(c):\n turtle.circle(c.radius)", "def draw_circle(c):\n turtle.circle(c.radius)", "def GetCircle(circle):\r\n pass", "def test_get_radius():\n center = Coordinates(7, 3)\n radius = 12\n\n returned_rad = get_radius(center, radius, 30)\n\n assert returned_rad == radius\n assert returned_rad != center.get_x()\n assert returned_rad != center.get_y()", "def circle(radius, extent=360):\n turtleTmp.circle(radius, extent)", "def test_circumference():\n assert func_difficult.circumference_circle(1) == 2 * np.pi, \"returns pi *2\"\n assert func_difficult.circumference_circle(0) == 0, \"is 0\"\n assert func_difficult.circumference_circle(10) == 2 * np.pi * 10", "def draw_circle(t, circle):\n t.pu()\n t.goto(circle.center.x, circle.center.y)\n t.pd()\n polygon.circle(t, circle.radius)", "def test_circumference():\n assert func1.circumference_circle(1) == 2 * np.pi, \"returns pi *2\"\n assert func1.circumference_circle(0) == 0, \"is 0\"\n assert func1.circumference_circle(10) == 2 * np.pi * 10", "def drawCircle(r):\r\n # create a turtle-painter instance using turtle library\r\n painter = turtle.Turtle()\r\n\r\n # turtle properties (we want the turtle to look nicer)\r\n painter.shape(\"turtle\") # setting painter shape to turtle\r\n painter.shapesize(3,3,1) # making turtle-painter 3 times bigger\r\n painter.color(\"limegreen\") # setting painting color to limegreen\r\n\r\n # move the turtle-painter to ready position\r\n painter.pu() # we just move without drawing anything\r\n x0 = coordX(r, 0) # compute initial coordinate x0\r\n y0 = coordY(r, 0) # compute initial coordinate y0\r\n\r\n painter.goto(x0,y0) # move the turtle to the ready position\r\n \r\n # tell the turtle to put pencil down on the paper\r\n painter.pd()\r\n\r\n # draw a circle\r\n for theta in range(0, 361, 1):\r\n x = coordX(r, theta, useradians = False)\r\n y = coordY(r, theta, useradians = False)\r\n\r\n painter.goto(x,y)\r\n\r\n # tell the turtle to put pencil up from the paper\r\n painter.pu()\r\n # hide the painter after he finished to draw\r\n painter.ht()\r\n print(\"Draw a circle of r = \", r )", "def area_of_circle(radius = radious):\n area = radius * radious * 3.142\n print(\"Calculating area...\")\n time.sleep(2)\n return area", "def test_generate_circle(self):\n\n # Generate a circle around Sydney airport with radius 3km\n radius = 3000\n C = self.Syd.generate_circle(radius)\n\n # Check distance around the circle\n # Note that not every point will be exactly 3000m\n # because the circle in defined in geographic coordinates\n for c in C:\n p = Point(c[1], c[0])\n d = self.Syd.distance_to(p)\n msg = ('Radius %f not with in expected tolerance. Expected %d'\n % (d, radius))\n assert numpy.allclose(d, radius, rtol=2.0e-1), msg\n\n # Store and view\n #from safe.storage.vector import Vector\n #Vector(geometry=[C],\n # geometry_type='polygon').write_to_file('circle.shp')\n #Vector(geometry=C,\n # geometry_type='point').write_to_file('circle_as_points.shp')\n #Vector(geometry=[[self.Syd.longitude, self.Syd.latitude]],\n # geometry_type='point',\n # data=None).write_to_file('center.shp')", "def area_of_circle(radius):\n return radius", "def circle():\n xmin=0\n xmax=6.5\n ymin=0.\n ymax=6.5\n\n x = arange(xmin, xmax, 0.005)\n y = x*1.\n [xx, yy] = meshgrid(x, y)\n\n zz=sqrt((xx-3.2475)**2.+(yy-3.2475)**2.)\n zz2=zz*1.\n zz2[(zz <= 3.25)]=1.\n zz2[(zz <= 3.25*0.2)]=0.\n zz2[(zz > 3.25)]=0.\n zz3=zeros(numpy.array(numpy.shape(zz2))/10)\n for i in arange(len(xx)/10):\n for j in arange(len(yy)/10):\n zz3[i,j]=numpy.sum(zz2[(i*10):(i*10+10),(j*10):(j*10+10)])/100.\n\n return zz3", "def circleCirc(radius):\n radius = float(radius)\n return 2*math.pi*radius", "def get_radius(self):", "def circle(draw, centrex, centrey, radius, color=\"#AAAAAAFF\") -> None:\n # convert cartesian centre to pixel centre\n cx, cy = pixelcoord(centrex, centrey)\n # top left and bottom right coordinates\n rect = [(cx-radius, cy-radius), (cx+radius, cy+radius)]\n # draw\n draw.arc(rect, 0, 360, color)", "def circle(center, radius, *args, **kwargs):\n return patch.Circle(center, radius, *args, **kwargs)", "def objects_radius(self, centre, radius):", "def value_circle(self):\r\n return self.circle", "def circle_circumference(a):\n return (2*a*math.pi)", "def make_circle(self):\n A = 2*np.random.rand(self.m, self.n)-1\n b = np.sign(np.sum(A**2, 1) - self.radius)\n return A, b", "def circle(t, r):\n circumference = math.pi * 2 * r\n n = 60\n length = circumference / n\n polygon(t, length, n)", "def fit_circle_func():\n pass", "def drawCircle(t, x, y, radius):\r\n t.up()\r\n t.goto(x + radius, y)\r\n t.setheading(90)\r\n t.down()\r\n for count in range(120):\r\n t.left(3)\r\n t.forward(2.0 * math.pi * radius / 120.0)", "def draw_circle(self, center, radius, line_width, line_color, fill_color=\"\"):\n line_color, fill_color = check_color(line_color), check_color(fill_color)\n SToval.oval(self.canvas, center, radius, line_width, line_color, fill_color)", "def test_circle_draw():\n with TestingCanvas():\n ellipse = visuals.Ellipse(pos=(0.5, 0.3, 0), radius=0.4,\n color=(1, 0, 0, 1))\n ellipse.draw()\n assert_image_equal(\"screenshot\", 'visuals/circle1.png')\n\n gloo.clear()\n ellipse = visuals.Ellipse(pos=(0.5, 0.3, 0), radius=0.4,\n color=(1, 0, 0, 1),\n border_color=(0, 1, 1, 1))\n ellipse.draw()\n assert_image_equal(\"screenshot\", 'visuals/circle2.png')\n\n gloo.clear()\n ellipse = visuals.Ellipse(pos=(0.5, 0.3, 0), radius=0.4,\n border_color=(0, 1, 1, 1))\n ellipse.draw()\n assert_image_equal(\"screenshot\", 'visuals/circle3.png')", "def circle_radius(a, b, c):\n # the sides cannot be negative\n if a < 0 or b < 0 or c < 0:\n return None\n else:\n # semi-perimeter of the circle\n p = (a + b + c) / 2\n\n # area of the traingle\n area = sqrt(p * (p - a) *\n (p - b) * (p - c))\n # Radius of the incircle\n radius = area / p\n # Return the radius\n return radius", "def test_update_radius():\n center = Coordinates(1, 1)\n rad1 = 20.3\n speed = 30\n\n i = Intersection(center, rad1, speed)\n\n assert i.get_radius() == 20.3\n\n i.update_radius(56.5)\n\n assert i.get_radius() == 56.5", "def iscircle(a):\n if isarc(a):\n start=a[1][1] \n end=a[1][2]\n ## these are special, integer values that flag a true full\n ## circle.\n if start==0 and end==360:\n return True\n else:\n return False", "def remote_concentric_circles(circle_turtle,dis_range,radius):\r\n for i in range(dis_range):\r\n color = random.choice(dark_colors)\r\n circle_turtle.color(color)\r\n circle_turtle.circle(radius*i)\r\n circle_turtle.up()\r\n circle_turtle.sety((radius*i)*(-1))\r\n circle_turtle.down()\r\n\r\n circle_turtle.up()\r\n circle_turtle.goto(0,0)\r\n circle_turtle.down()", "def inside_circle(total_count):\n\n x = np.float32(np.random.uniform(size=total_count))\n y = np.float32(np.random.uniform(size=total_count))\n\n radii = ##\n\n count = ##\n\n return count", "def test_circumference_area(self):\n self.assertEqual(9.425, circumference_area(self.values['radius']))", "def testRadial(self):\n self.doTest(afwGeom.makeRadialTransform([0, 1.01, 1e-7]))", "def testRadial(self):\n self.doTest(afwGeom.makeRadialTransform([0, 1.01, 1e-7]))", "def circle(self, center, rad):\n self.gc.show_circles(center[0], center[1], rad, facecolor='none', edgecolor=self.color, linewidth=0.5)", "def _circle(i, r=.05):\n\treturn Circle((i, 0), r, fill=True, color='black')", "def check_circle(self) -> list or None:\n x_text = self.ui.lineEditCX.text()\n xc = float(x_text) if checker.check_float(x_text) else None\n y_text = self.ui.lineEditCY.text()\n yc = float(y_text) if checker.check_float(y_text) else None\n radius_text = self.ui.lineEditRad.text()\n radius = (float(radius_text) if checker.check_float(radius_text) and\n float(radius_text) > 0 else None)\n if xc is None or yc is None or radius is None:\n return None\n else:\n return [xc, yc, radius]", "def circle_area(radius : number) -> number:\n area = pi*radius*radius\n #print(\"The area of circle is =\", area, \"sq.units\")\n return area", "def circle(self, radius, extent=None, steps=None):\n super().circle(radius, extent, steps)", "def circleArea(radius):\n return math.pi * radius * radius", "def get_radius(self):\r\n return 1", "def GetCircleMode(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_ConvertCurve3dToBezier_GetCircleMode(self, *args)", "def test_circle_winding(setup):\n I, a, r0, r_c = setup\n Bz_analytic = mu_0*I/(2*a)\n \n B_calc = generic_curve.biot_savart_integral(r0, r_c, integration_dim='phi',\n spatial_dim='s', I=I)\n np.testing.assert_allclose(B_calc.sel(s=['x', 'y']), [0,0])\n np.testing.assert_allclose(B_calc.sel(s='z'), Bz_analytic)", "def area_circle(r):\n return (r ** 2) * math.pi", "def circle(self, center_x, center_y, radius, color):\n x = radius - 1\n y = 0\n d_x = 1\n d_y = 1\n err = d_x - (radius << 1)\n while x >= y:\n self.pixel(center_x + x, center_y + y, color)\n self.pixel(center_x + y, center_y + x, color)\n self.pixel(center_x - y, center_y + x, color)\n self.pixel(center_x - x, center_y + y, color)\n self.pixel(center_x - x, center_y - y, color)\n self.pixel(center_x - y, center_y - x, color)\n self.pixel(center_x + y, center_y - x, color)\n self.pixel(center_x + x, center_y - y, color)\n if err <= 0:\n y += 1\n err += d_y\n d_y += 2\n if err > 0:\n x -= 1\n d_x += 2\n err += d_x - (radius << 1)", "def filled_circle(shape, radius, center=None):\n\tr2 = radius*radius\n\tif center is None:\n\t\t### set to center of array\n\t\tcenter = (shape[0]-1)/2.0,(shape[1]-1)/2.0\n\tdef func(i0, i1):\n\t\tii0 = i0 - center[0]\n\t\tii1 = i1 - center[1]\n\t\trr2 = ii0**2 + ii1**2\n\t\tc = numpy.where(rr2<r2, 0.0, 1.0)\n\t\treturn c\n\treturn numpy.fromfunction(func, shape)", "def change_circle_handler():\n global radius\n radius = size", "def circle_area(circle):\n return pi * circle.radius * circle.radius", "def circle(x2, y2, N=100):\n\n # Circle radius\n r = (x2**2 + y2**2)/2/x2\n\n def f(x):\n return np.sqrt(2*r*x - x**2)\n def fp(x):\n return (r-x)/f(x)\n\n x = np.linspace(0, x2, N)\n y = f(x)\n\n # Calcualte the time of travel by numerical integration.\n T = quad(func, 0, x2, args=(f, fp))[0]\n print('T(circle) = {:.3f}'.format(T))\n return x, y, T", "def area_circle(radius):\n \n pi = 3.1459\n area = pi * radius * radius\n return area", "def SetCircleMode(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_ConvertCurve3dToBezier_SetCircleMode(self, *args)", "def point_inside_circle(x,y,center_x,center_y,radius):\n return (x-center_x)**2 + (y - center_y)**2 < radius**2", "def get_circle_radius(self, point, center):\n x, y, z = point[:]\n x0, y0, z0 = center[:]\n return math.sqrt((x-x0)**2 + (y-y0)**2 + (z-z0)**2)", "def area_of_circle(r):\n a = r**2 * math.pi\n return a", "def area_of_circle(r):\n a = r**2 * math.pi\n return a", "def circleInfo(r):\n c = 2 * 3.14159 * r\n a = 3.14159 * r * r\n return (c, a)", "def incircle(self, a, b, c):\n m11, m12 = a.x - self.x, a.y - self.y\n m13 = m11 * m11 + m12 * m12\n m21, m22 = b.x - self.x, b.y - self.y\n m23 = m21 * m21 + m22 * m22\n m31, m32 = c.x - self.x, c.y - self.y\n m33 = m31 * m31 + m32 * m32\n det1 = m11 * (m22 * m33 - m23 * m32)\n det2 = m12 * (m21 * m33 - m23 * m31)\n det3 = m13 * (m21 * m32 - m22 * m31)\n return near(det1 - det2 + det3, 0)", "def testRadial(self):\n radialClass = xyTransformRegistry[\"radial\"]\n radialConfig = radialClass.ConfigClass()\n radialConfig.coeffs = (0, 1.05, 0.1)\n with lsst.utils.tests.getTempFilePath(\".py\") as filePath:\n self.checkConfig(radialClass, radialConfig, filePath)\n radial = radialClass(radialConfig)\n self.assertEqual(type(radial), RadialXYTransform)\n self.assertEqual(len(radial.getCoeffs()), len(radialConfig.coeffs))\n for coeff, predCoeff in zip(radial.getCoeffs(), radialConfig.coeffs):\n self.assertAlmostEqual(coeff, predCoeff)\n self.checkBasics(radial)\n for fromPoint in self.fromIter():\n fromRadius = math.hypot(fromPoint[0], fromPoint[1])\n fromAngle = math.atan2(fromPoint[1], fromPoint[0])\n predToRadius = fromRadius * \\\n (radialConfig.coeffs[2] *\n fromRadius + radialConfig.coeffs[1])\n predToPoint = Point2D(\n predToRadius * math.cos(fromAngle),\n predToRadius * math.sin(fromAngle))\n toPoint = radial.forwardTransform(fromPoint)\n for i in range(2):\n self.assertAlmostEqual(toPoint[i], predToPoint[i])", "def circle(self, x, y, r, solid = False):\n px = 0\n py = r\n d = 1 - 2 * r\n err = 0\n while py >= 0:\n if solid:\n for i in range(x - px, x + px + 1):\n self.pixel(i, y + py, 1)\n self.pixel(i, y - py, 1)\n else:\n self.pixel(x + px, y + py, 1)\n self.pixel(x + px, y - py, 1)\n self.pixel(x - px, y + py, 1)\n self.pixel(x - px, y - py, 1)\n err = 2 * (d + py) - 1\n if d < 0 and err <= 0:\n px += 1\n d += 2 *px + 1\n else:\n err = 2 * (d - px) - 1\n if d > 0 and err > 0:\n py -= 1\n d += 1 - 2 * py\n else:\n px += 1\n d += 2 * (px - py)\n py -= 1", "def compute_area(radius):\n radius = int(input(\"What is the radius of the circle? \\n> \"))\n \n while radius <=0:\n radius = int(input(\"Sorry, must give a number greater than 0. \\n> \"))\n \n area = (pi * pow(radius, 2))\n \n #t.circle(radius)\n \n return area", "def test_circular_scatter():\n area = [0, 1000, 0, 1000]\n size = 1000\n x, y = gridder.circular_scatter(area, size, random=False)\n distances = np.sqrt((x[1:] - x[:-1])**2 + (y[1:] - y[:-1])**2)\n npt.assert_allclose(distances, distances[0]*np.ones(size-1), rtol=1e-09)", "def plot_tpc_circle(radius):\n import holoviews as hv\n x = radius * np.cos(np.arange(-np.pi, np.pi + 0.1, 0.01))\n y = radius * np.sin(np.arange(-np.pi, np.pi + 0.1, 0.01))\n return hv.Curve((x, y)).opts(color='k')", "def get_circle(a, b, c):\n vec = [a[0]**2 + a[1]**2, b[0]**2 + b[1]**2, c[0]**2 + c[1]**2]\n x_mat = [vec, [a[1], b[1], c[1]], [1]*3]\n y_mat = [vec, [a[0], b[0], c[0]], [1]*3]\n d_mat = [[a[0], b[0], c[0]], [a[1], b[1], c[1]], [1] * 3]\n d = 2 * det(d_mat)\n x = 1 / d * det(x_mat)\n y = -1 / d * det(y_mat)\n center = [x, y]\n #r = norm(center - a)\n r = norm([center[0]-a[0], center[1]-a[1]])\n return center, r", "def in_circle(x0, y0, x, y, r):\n return ((x - x0) ** 2 + (y - y0) ** 2) <= (r ** 2)", "def main():\r\n x = int(input(\"Enter the x coordinate of the center point: \"))\r\n y = int(input(\"Enter the y coordinate of the center point: \"))\r\n radius = int(input(\"Enter the radius: \"))\r\n drawCircle(Turtle(), x, y, radius)\r\n sleep(5)", "def draw_circle(self, color, center, radius, width):\n _c = self.T.itrans(center)\n pg.draw.circle(self.screen, color, _c(), radius, width)", "def oncircle(size=None):\n if size is None:\n size = ()\n else:\n try:\n size = tuple(size)\n except TypeError:\n size = (size,)\n # This beats normalizing incircle for all sizes, even though that\n # should be the superior algorithm for compiled code.\n theta = 2.*pi * random(size + (1,))\n return concatenate((cos(theta), sin(theta)), axis=-1)", "def circle(self):\n return circle(self.N, self.o, self.r)", "def circle(self, xo: int, yo: int, radius: int, color: int, fill=False):\n for x in range(xo - radius, xo + radius + 1):\n square = sqrt(radius ** 2 - (x - xo) ** 2)\n y = yo + square\n self.pixel(x, floor(y), color)\n y = yo - square\n self.pixel(x, floor(y), color)\n for y in range(yo - radius, yo + radius + 1):\n square = sqrt(radius ** 2 - (y - yo) ** 2)\n x = xo + square\n self.pixel(floor(x), y, color)\n x = xo - square\n self.pixel(floor(x), y, color)\n if fill:\n if radius > 1:\n self.circle(xo, yo, radius - 1, color, True)\n else:\n self.circle(xo, yo, radius - 1, color, False)", "def circle(\n network,\n pore_diameter='pore.diameter',\n):\n return _pi/4 * network[pore_diameter]**2", "def circle_area(radius):\n return math.pi * radius ** 2", "def generate_circle(R,center,N=100,t0=0.0,t1=2.0*np.pi):\r\n theta = np.linspace(t0,t0+t1,N)\r\n y = R*np.sin(theta) + center[1]\r\n x = R*np.cos(theta) + center[0]\r\n return x,y", "def create_circle(self, x, y, r, **kwargs):\n return self.create_oval(*self.circ_to_oval(x, y, r), **kwargs)", "def circle(self,image,radius,i,j,c_x,c_y):\r\n major_axis=radius\r\n minor_axis=radius\r\n self.ellipse(image,major_axis,minor_axis,i,j,c_x,c_y)", "def fillcircle(draw, centrex, centrey, radius, color=\"#AAAAAAFF\") -> None:\n # convert cartesian centre to pixel centre\n cx, cy = pixelcoord(centrex, centrey)\n # top left and bottom right coordinates, must never reverse\n rect = [(cx-radius, cy-radius), (cx+radius, cy+radius)]\n # draw, same color for outline and fill\n draw.ellipse(rect, color, color)", "def _generate_circle(self, center, radius):\n assert len(center) in [2, 3], 'Center of circle must have 2 or 3 elements'\n assert radius > 0, 'Radius must be greater than zero'\n return Point(*center).buffer(radius)", "def draw_circle(self, x, y, radius, color=Color['white']):\n pygame.draw.circle(self.display, color, (x, y), radius)", "def _get_radial(self):\n return self.startRadius is not None and self.endRadius is not None", "def drawCircle(x,y,radius,ucoords=1):\n if ucoords:\n dislin.rlcirc(x,y,radius)\n else:\n dislin.circle(x,y,radius)", "def test_get_diameter():\n radius = 10\n c = Circle(radius)\n expected_diameter = 20 \n assert expected_diameter == c.diameter", "def circleArea(radius):\n radius = float(radius)\n return math.pi*(radius**2)", "def circle_area(radius):\n area = radius ** 2 * math.pi\n return area", "def shape_type(self):\n return \"circle\"", "def circular_movement(radius = 150, theta=None):\n y = radius * np.sin(theta)\n if theta == 0:\n x = radius\n elif np.pi*0.99 < theta < np.pi*1.01:\n x = -radius\n else:\n x = y/np.tan(theta)\n return x, y", "def add_circle(self, r_center, c_center, radius, color=BLUE, image=np.full((640, 480, 3), BLACK)):\n circle = np.fromfunction(lambda r, c, _: (r - r_center) ** 2 + (c - c_center) ** 2 <= radius ** 2, image.shape)\n return np.where(circle, color, image)", "def ball_intersection_check(r0, step, radius): \n t = -(step[0]*r0[0] + step[1]*r0[1] + step[2]*r0[2]) + math.sqrt((step[0]*r0[0] + step[1]*r0[1] + step[2]*r0[2])**2 \\\n - (r0[0]**2+r0[1]**2+r0[2]**2) + radius**2)\n return t", "def solve_circle(radius):\n\n if type(radius) is int:\n diameter = 2 * radius\n circumference = 2 * pi * radius\n area = pi * radius * radius\n\n answer = {\"diameter\": diameter, \"circumference\": circumference, \"area\": area}\n return answer\n else:\n return \"NAN\"", "def circle(r=0):\n\tteta = 2*pi*random()\n\tx = (r+1)*cos(teta) + L//2\n\ty = (r+1)*sin(teta) + L//2\n\t\n\ti = int(x) + 1\n\tj = int(y) + 1\n\tprint(r)\n\treturn i,j", "def circle(radius = 10, angle_resolution = 2.5, layer = 0):\n D = Device(name = 'circle')\n t = np.linspace(0, 360, int(np.ceil(360/angle_resolution) + 1)) * pi/180\n xpts = (radius*cos(t)).tolist()\n ypts = (radius*sin(t)).tolist()\n D.add_polygon(points = (xpts, ypts), layer = layer)\n return D", "def test_mul():\n circle = Circle(4)\n expected = circle * 3 \n assert expected.radius == Circle(12).radius", "def DrawSolidCircle(self, center, radius, axis, color):\r\n radius *= self.zoom\r\n if radius < 1:\r\n radius = 1\r\n else: radius = int(radius)\r\n\r\n pygame.draw.circle(self.surface, (color/2).bytes+[127],\r\n center, radius, 0)\r\n pygame.draw.circle(self.surface, color.bytes, center, radius, 1)\r\n pygame.draw.aaline(self.surface, (255, 0, 0), center,\r\n (center[0] - radius*axis[0], center[1] +\r\n radius*axis[1]))", "def circle(self, radius, extent=360):\n temp = self.bearing\n self.b_change = 0;\n tempSpeed = self.speedVar\n self.speedVar = 1\n\n for i in range(0, (extent//2)):\n n = math.fabs(math.radians(self.b_change) * radius)\n if(radius >= 0):\n self.forward(n)\n self.left(2)\n else:\n self.forward(n)\n self.right(2)\n if(radius >= 0):\n self.bearing = (temp + extent)\n else:\n self.bearing = (temp - extent)\n self.speedVar = tempSpeed", "def area_circle(radius: float) -> float:\r\n if radius < 0:\r\n raise ValueError(\"area_circle() only accepts non-negative values\")\r\n return pi * radius**2", "def Circle(self, center=(0.,0.), radius=1., nrad=16, ncirc=40,\n element_type=\"tri\", refinement=False, refinement_level=2, algorithm=\"standard\"):\n\n if not isinstance(center,tuple):\n raise ValueError(\"The center of the circle should be given in a tuple with two elements (x,y)\")\n\n self.__reset__()\n\n if algorithm == \"midpoint_subdivision\":\n from Florence.MeshGeneration.CustomMesher import SubdivisionCircle\n mesh = SubdivisionCircle(center=center, radius=radius, nrad=nrad, ncirc=ncirc,\n element_type=element_type, refinement=refinement, refinement_level=refinement_level)\n self.__update__(mesh)\n return\n\n if refinement:\n ndivider = refinement_level\n if nrad==1: nrad=2\n else:\n ndivider = 1\n\n ncirc = int(ncirc/ndivider)\n nrad = int(nrad/ndivider)\n\n\n if ncirc % 8 != 0 or ncirc < 8:\n ncirc = (ncirc // 8)*8 + 8\n\n radii = radius\n\n radius = np.linspace(0,radii,nrad+1)[1:]\n t = np.linspace(0,2*np.pi,ncirc+1)\n x = radius[0]*np.sin(t)[::-1][:-1]\n y = radius[0]*np.cos(t)[::-1][:-1]\n\n points = np.zeros((ncirc+1,2),dtype=np.float64)\n points[0,:] = [0.,0.]\n points[1:,:] = np.array([x,y]).T\n\n\n self.elements = np.zeros((ncirc // 2,4),dtype=np.int64)\n aranger = np.arange(ncirc // 2)\n self.elements[:,1] = 2*aranger + 1\n self.elements[:,2] = 2*aranger + 2\n self.elements[:,3] = 2*aranger + 3\n self.elements[-1,-1] = 1\n\n for i in range(1,nrad):\n t = np.linspace(0,2*np.pi,ncirc+1);\n x = radius[i]*np.sin(t)[::-1][:-1];\n y = radius[i]*np.cos(t)[::-1][:-1];\n points = np.vstack((points,np.array([x,y]).T))\n\n points[:,0] += center[0]\n points[:,1] += center[1]\n\n elements = np.zeros((ncirc,4),dtype=np.int64)\n for i in range(1,nrad):\n aranger = np.arange(1+ncirc*(i-1),ncirc*i+1)\n elements[:,0] = aranger\n elements[:,1] = aranger + ncirc\n elements[:,2] = np.append((aranger + 1 + ncirc)[:-1],i*ncirc+1)\n elements[:,3] = np.append((aranger + 1)[:-1],1+(i-1)*ncirc)\n\n self.elements = np.concatenate((self.elements,elements),axis=0)\n\n makezero(points)\n self.points = points\n self.elements[:ncirc // 2,:] = self.elements[:ncirc // 2, [1,2,3,0]]\n\n self.element_type = \"quad\"\n self.nelem = self.elements.shape[0]\n self.nnode = self.points.shape[0]\n self.GetBoundaryEdges()\n\n if refinement:\n mesh = self.QuadrilateralProjection(points=self.points[self.elements[0,:],:], npoints=ndivider)\n for i in range(1,self.nelem):\n mesh += self.QuadrilateralProjection(points=self.points[self.elements[i,:],:], npoints=ndivider)\n self.__update__(mesh)\n\n # SECOND LEVEL OF REFINEMENT IF NEEDED\n # mesh = self.QuadrilateralProjection(points=self.points[self.elements[0,:],:], npoints=2)\n # for i in range(1,self.nelem):\n # mesh += self.QuadrilateralProjection(points=self.points[self.elements[i,:],:], npoints=2)\n # self.__update__(mesh)\n\n if element_type == \"tri\":\n sys.stdout = open(os.devnull, \"w\")\n self.ConvertQuadsToTris()\n sys.stdout = sys.__stdout__", "def circle(self, clear_screen=True, x=50, y=50, radius=40, fill_color='black', outline_color='black'):\n\n if clear_screen:\n self.clear()\n\n x1 = x - radius\n y1 = y - radius\n x2 = x + radius\n y2 = y + radius\n\n return self.draw.ellipse((x1, y1, x2, y2), fill=fill_color, outline=outline_color)", "def __init__(self, c, radius, a0, da):\n Circle.__init__(self, Vector(c).to_2d(), radius)\n self.line = None\n self.a0 = a0\n self.da = da", "def __drawCircle(self, center, radius, color, drawwidth=1):\n radius *= self.viewZoom\n if radius < 1: radius = 1\n else: radius = int(radius)\n\n pygame.draw.circle(self.screen, color, center, radius, drawwidth)", "def test_reflected_numerics():\n circle = Circle(2)\n assert circle * 3 == 3 * circle", "def test_arc_draw1():\n with TestingCanvas():\n ellipse = visuals.Ellipse(pos=(0., 0.), radius=(0.4, 0.3),\n start_angle=90., span_angle=120.,\n color=(0, 0, 1, 1))\n ellipse.draw()\n assert_image_equal(\"screenshot\", 'visuals/arc1.png')\n\n gloo.clear()\n ellipse = visuals.Ellipse(pos=(0., 0.), radius=(0.4, 0.3),\n start_angle=90., span_angle=120.,\n border_color=(1, 0, 0, 1))\n ellipse.draw()\n assert_image_equal(\"screenshot\", 'visuals/arc2.png')", "def make_circle(x, y, r):\n\tnew_circle = Circle()\n\tnew_circle.x = x\n\tnew_circle.y = y\n\tnew_circle.r = r\n\treturn new_circle" ]
[ "0.734142", "0.734142", "0.72636837", "0.703413", "0.6813236", "0.6676832", "0.6654256", "0.66349846", "0.6573545", "0.65500736", "0.6521824", "0.6442423", "0.64218247", "0.6412573", "0.63668287", "0.63393223", "0.6279476", "0.6262953", "0.6237789", "0.6233661", "0.6233039", "0.6232965", "0.6225517", "0.6213921", "0.62127304", "0.61972475", "0.6193501", "0.61925143", "0.6183752", "0.6175291", "0.6173266", "0.61600983", "0.614445", "0.614445", "0.61366284", "0.6134544", "0.61160195", "0.611006", "0.6097189", "0.60877115", "0.60823464", "0.6082162", "0.6069828", "0.60694325", "0.6046869", "0.6032674", "0.6025991", "0.6017265", "0.60162055", "0.60101354", "0.59977645", "0.5994711", "0.5990104", "0.5989322", "0.5989322", "0.59722924", "0.5967401", "0.5967383", "0.59647423", "0.59614193", "0.59601665", "0.5955688", "0.5937065", "0.5933318", "0.5926042", "0.59258527", "0.59131515", "0.5912359", "0.59099823", "0.590519", "0.5898997", "0.58830255", "0.5878987", "0.58764875", "0.58723015", "0.58716846", "0.5870387", "0.58673245", "0.58588207", "0.58505774", "0.58490247", "0.58489794", "0.5840939", "0.5824458", "0.58211386", "0.5819442", "0.58172566", "0.58039176", "0.5802872", "0.5785152", "0.5780564", "0.57741714", "0.57681274", "0.5756111", "0.57457566", "0.5744945", "0.5743397", "0.5737195", "0.5725319", "0.57244253" ]
0.8547868
0
Test the AioBaseTurtle._move_step function
def test_move_step(self): t = AioBaseTurtle() t._move_step(Vec2D(-100, 0), 20, Vec2D(10,5)) self.assertAlmostEqual(t._position[0], 100) self.assertAlmostEqual(t._position[1], 100) t.screen._drawline.assert_called_once_with( t.currentLineItem, ((-100.0, 0.0), (100.0, 100.0)), # called with mutable _position "black", 1, False ) self.mock_update.assert_called_once_with()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def step(self, move):", "def test_calc_move(self):\n t = AioBaseTurtle()\n t.speed(speed=5)\n steps, delta = t._calc_move(Vec2D(0, 100))\n self.assertEqual(steps, 20)\n self.assertAlmostEqual(delta[0], 0.0)\n self.assertAlmostEqual(delta[1], 5.0)", "def move(self, direction, step):\n for i in range(1, step + 1):\n y, x = self.robot_position\n if direction == \"N\" and y > 0:\n if self.carte[y - 1][x] in [\" \", \".\", \"U\"]:\n self.robot_position = (y - 1, x)\n elif direction == \"S\" and y <= self.height:\n if self.carte[y + 1][x] in [\" \", \".\", \"U\"]:\n self.robot_position = (y + 1, x)\n elif direction == \"E\" and x <= self.width+1:\n if self.carte[y][x + 1] in [\" \", \".\", \"U\"]:\n self.robot_position = (y, x + 1)\n elif direction == \"O\" and x > 0:\n if self.carte[y][x - 1] in [\" \", \".\", \"U\"]:\n self.robot_position = (y, x - 1)\n\n if self.robot_position == self.out_position:\n print(\"Bravo vous avez fini\")\n return True\n\n return False", "def move(self, step):\n\n status = self.read()\n Logger.getLogger().debug(\"Status in move method: %s\", status)\n # while the motors are moving we don't want to start another movement\n if status > CurtainsStatus.OPEN or self.motor.value:\n return\n\n self.target = step\n\n # deciding the movement direction\n if self.steps() < self.target:\n self.__open__()\n elif self.steps() > self.target:\n self.__close__()", "def moveStep(self):\n\t\tif self.pos[0] <= self.boundsX[0] or \\\n\t\t(self.pos[0]+ 2*(self.radius)) >= self.boundsX[1]:\n\t\t\tself.dir[0] *= -1\n\t\t\t\n\t\tself.pos[0] += self.dir[0]*self.speed\n\t\tself.pos[1] += self.dir[1]*self.speed", "def moveStep(self):\n\t\tif self.pos[0] < self.boundsX[0] or \\\n\t\t\tself.pos[0] > (self.boundsX[1] - self.width):\n\t\t\t\tself.dir[0] *= -1\n\t\tif self.pos[1] < self.boundsY[0] or \\\n\t\t self.pos[1] > (self.boundsY[1] - self.height):\n\t\t\t\tself.dir[1] *= -1\n\t\t\t\n\t\tself.pos[0] += self.dir[0]*self.speed\n\t\tself.pos[1] += self.dir[1]*self.speed", "def _step(self) -> None:", "def move(self, head, steps):\n self.turn(head)\n if self.direction == 0:\n self.x += int(steps)\n if self.direction == 1:\n self.y += int(steps)\n if self.direction == 2:\n self.x -= int(steps)\n if self.direction == 3:\n self.y -= int(steps)", "def move_step(self, direction):\n x = self.objects[0].x\n y = self.objects[0].y\n if direction == 0 and y >= 1:\n self.objects[0].y -= 1\n elif direction == 1 and y <= self.size_y - 2:\n self.objects[0].y += 1\n elif direction == 2 and x >= 1:\n self.objects[0].x -= 1\n elif direction == 3 and x <= self.size_x - 2:\n self.objects[0].x += 1", "def move():\n Robot.move()", "def move(self, timestep):\n if self.trajectoryStep >= len(self.trajectory):\n # return trajectory completed\n return False\n\n target2DPosition = self.trajectory[self.trajectoryStep]\n vector = [-target2DPosition[0] - self.translation[0],\n -target2DPosition[1] - self.translation[1],\n 0.0]\n distance = math.sqrt(vector[0] * vector[0] + vector[1] * vector[1] +\n vector[2] * vector[2])\n maxStep = MovingTarget.SPEED * timestep\n\n if distance < maxStep:\n self.trajectoryStep += 1\n self.translation = [a + b for a, b in zip(self.translation, vector)]\n segmentChanged = True\n else:\n if math.isinf(self.rotationStep):\n self.rotationStepsCount = 10\n newAngle = math.acos(dotProduct([1.0, 0.0, 0.0], vector))\n if vector[1] < 0.01:\n newAngle = -newAngle\n diff = self.rotationAngle - newAngle\n while diff > math.pi:\n diff -= 2 * math.pi\n while diff < -math.pi:\n diff += 2 * math.pi\n self.rotationStep = -diff / self.rotationStepsCount\n\n factor = maxStep / distance\n self.translation[0] += vector[0] * factor\n self.translation[1] += vector[1] * factor\n segmentChanged = False\n\n self.translationField.setSFVec3f(self.translation)\n\n if self.rotationStepsCount > 0:\n if segmentChanged:\n self.rotationAngle += self.rotationStep * \\\n self.rotationStepsCount\n self.rotationStepsCount = 0\n else:\n self.rotationAngle += self.rotationStep\n self.rotationStepsCount -= 1\n self.rotationField.setSFRotation([0.0, 0.0, 1.0,\n self.rotationAngle])\n\n if segmentChanged:\n self.rotationStep = float('Inf')\n return True", "def move(self, *step):\n self.x += step[0]\n self.y += step[1]", "def move(self, t, s):\n raise NotImplementedError", "def do_step(self) -> None:", "def test_move_default_dropped_steps(self):\n player = ss.LazyPlayer()\n random.seed(2)\n player.move()\n random.seed(5)\n player.move()\n assert player.position == 44", "def move_time(self, step_before, step_after):\n raise NotImplementedError", "def test_move_default_extra_steps(self):\n player = ss.ResilientPlayer()\n random.seed(2)\n player.move()\n random.seed(1)\n player.move()\n random.seed(2)\n player.move()\n assert player.position == 32", "def test_set_position_after_travel(self):\n travelcalculator = TravelCalculator(25, 50)\n travelcalculator.start_travel(30)\n travelcalculator.set_position(80)\n assert travelcalculator.position_reached()\n assert travelcalculator.current_position() == 80", "def move(self,dt):\n raise NotImplementedError(\"Robot.move\")", "def move(self, move):\n raise NotImplementedError()", "def move_step(self, move):\n # Check that the move is valid\n steps = self.mgr.obj.steps\n if len(steps) == 0:\n return\n idx = self.stepsListWidget.currentRow()\n idx_max = len(steps) - 1\n if (idx+move < 0) or (idx+move > idx_max):\n return\n \n # Insert the step at its new location, then delete it at the old location\n steps.insert(idx+move+(move>0), steps[idx])\n del steps[idx if move>0 else idx+1]\n \n self.load_steps()\n self.stepsListWidget.setCurrentRow(idx+move)\n self.mgr.changed = True", "def step(self, action):\n x, y = self._move(action, *self._currentPos)\n\n if chr(self._grid[x, y]) == CASE_TYPES.Wall:\n # error - previous state was already a wall\n self._done = True\n self._trajectory.append(self._currentPos)\n return self._currentPos, -1, self._done, {}\n\n reward = {\n CASE_TYPES.Water: self.waterReward,\n CASE_TYPES.Sand: self.sandReward,\n CASE_TYPES.Open: self.stepReward,\n CASE_TYPES.Termination: self.successReward,\n CASE_TYPES.Trap: (\n -(self.maxSteps - len(self._trajectory)) + self.failureReward +\n self.trapReward)\n }[chr(self._grid[x, y])]\n\n # termination state\n if chr(self._grid[x, y]) in [CASE_TYPES.Termination, CASE_TYPES.Trap]:\n self._done = True\n\n self._currentPos = (x, y)\n\n self._trajectory.append(self._currentPos)\n self._nbSteps += 1\n\n if self._nbSteps >= self.maxSteps and not self._done:\n reward += self.failureReward\n\n return self._currentPos, reward, self._done, {}", "def test_move_dropped_steps_greater_than_move(self):\n player = ss.LazyPlayer(dropped_steps=3)\n random.seed(2)\n player.move()\n random.seed(2)\n player.move()\n assert player.position == 40", "def _step(self):\n pass", "def step(self, action):\n # print(\"############################\")\n # print(\"action: {}\".format(action))\n\n self.movement_complete.data = False\n\n # 1) Read last joint positions by getting the observation before acting\n old_observation = self.get_obs()\n\n # 2) Get the new joint positions according to chosen action (actions here are the joint increments)\n if self._joint_increment is None:\n next_action_position = action\n else:\n next_action_position = self.get_action_to_position(action, old_observation[1:7])\n\n # 3) Move to position and wait for moveit to complete the execution\n self.publisher_to_moveit_object.pub_joints_to_moveit(next_action_position)\n # rospy.wait_for_message(\"/pickbot/movement_complete\", Bool)\n while not self.movement_complete.data:\n pass\n\n start_ros_time = rospy.Time.now()\n while True:\n # Check collision:\n # invalid_collision = self.get_collisions()\n # if invalid_collision:\n # print(\">>>>>>>>>> Collision: RESET <<<<<<<<<<<<<<<\")\n # observation = self.get_obs()\n # reward = UMath.compute_reward(observation, -200, True)\n # observation = self.get_obs()\n # print(\"Test Joint: {}\".format(np.around(observation[1:7], decimals=3)))\n # return U.get_state(observation), reward, True, {}\n\n elapsed_time = rospy.Time.now() - start_ros_time\n if np.isclose(next_action_position, self.joints_state.position, rtol=0.0, atol=0.01).all():\n break\n elif elapsed_time > rospy.Duration(2): # time out\n break\n # time.sleep(s\n\n \"\"\"\n #execute action as long as the current position is close to the target position and there is no invalid collision and time spend in the while loop is below 1.2 seconds to avoid beeing stuck touching the object and not beeing able to go to the desired position \n time1=time.time()\n while np.linalg.norm(np.asarray(self.joints_state.position)-np.asarray(next_action_position))>0.1 and self.get_collisions()==False and time.time()-time1<0.1: \n rospy.loginfo(\"Not yet reached target position and no collision\")\n \"\"\"\n # 4) Get new observation and update min_distance after performing the action\n new_observation = self.get_obs()\n if new_observation[0] < self.min_distace:\n self.min_distace = new_observation[0]\n # print(\"observ: {}\".format( np.around(new_observation[1:7], decimals=3)))\n\n # 5) Convert Observations into state\n state = U.get_state(new_observation)\n\n # 6) Check if its done, calculate done_reward\n done, done_reward, invalid_contact = self.is_done(new_observation)\n\n # 7) Calculate reward based on Observatin and done_reward and update the accumulated Episode Reward\n reward = UMath.compute_reward(new_observation, done_reward, invalid_contact)\n\n ### TEST ###\n if done:\n joint_pos = self.joints_state.position\n print(\"Joint in step (done): {}\".format(np.around(joint_pos, decimals=3)))\n ### END of TEST ###\n\n self.accumulated_episode_reward += reward\n\n self.episode_steps += 1\n\n return state, reward, done, {}", "def move(self):\n pass", "def step(self, state):", "def test_maze_move_6(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n old_sprout_count = maze.num_sprouts_left\n\n maze.move(rat_J, a2.DOWN, a2.NO_CHANGE)\n\n self.assertEqual(maze.num_sprouts_left, old_sprout_count)", "def movement(self):", "def move_turtle(self):\n self.forward(self.move_speed)", "def move(self, direction):\n # replace with your code\n pass", "def move(self, direction):\n # replace with your code\n pass", "def test_pass_move(self):\n manager = DummyLevelManager()\n game = Game(manager)\n state = game.move(GameMoves.PASS)\n self.assertEqual(state, LevelState.RUNNING)\n self.assertEqual(game.current_level.snake_length, 6)\n self.assertEqual(game.current_level.snake_direction, (0, 1))\n self.assertEqual(game.current_level.snake,\n [(1, 9), (1, 8), (1, 7), (1, 6), (1, 5), (1, 4)])", "def step(self):\r\n\r\n self.velocity = 1\r\n new_pos = self.pos\r\n self.model.space.move_agent(self, new_pos)", "def step(self, move):\r\n self.board.push_uci(move)\r\n self.num_halfmoves += 1", "def _move(self, direction, difference):\n future_tile_number = self.get_number() + difference\n if future_tile_number in range(1, Tile.total_tiles + 1):\n future_tile = Tile.get_tile(future_tile_number)\n if future_tile.walkable:\n self.set_target(future_tile)\n self.rotate(direction)", "def step(self, step=None):\n pass", "def decide_next_move(self):\n pass", "def do_steps(self, motornum, val):\n #print \"Moving in steps...\"\n steps = abs(val)\n if val < 0:\n direction = 1\n else:\n direction = 2\n mag = steps\n\n self.takesteps(mag=mag, direction=direction, motornum=motornum)\n self.do_azangle()\n self.do_altangle()", "def move(t, length):\n pu(t)\n\t\n fd(t, length)\n pd(t)", "def move(x,y):\r\n pass", "def _move(self, dx, dy):\n pass # must override in subclass", "def __move(self, d: str, step: int):\n\n if d == 'UP':\n if self.num_line - step >= 0:\n self.num_line -= step\n else:\n self.num_line = 0\n if d == 'DOWN':\n if self.num_line + step < len(self.text) - (self.max_y - 2):\n self.num_line += step\n else:\n self.num_line = len(self.text) - (self.max_y - 2)\n if d == 'RIGHT':\n if self.num_char + step < self.maxlen:\n self.num_char += step\n else:\n self.num_char = self.maxlen\n if d == 'LEFT':\n if self.num_char - step > 0:\n self.num_char -= step\n else:\n self.num_char = 0", "def _moveSteps(self, direction, steps: int, speed: int, is_blocking=False):\n print(\"Move command: ({}, {}, {}, {})\".format(direction, speed, steps, is_blocking))\n if direction in Direction.FORWARD.value:\n self.drive.on_for_rotations(SpeedPercent(speed), SpeedPercent(speed), steps, block=is_blocking)\n\n if direction in Direction.BACKWARD.value:\n self.drive.on_for_rotations(SpeedPercent(-speed), SpeedPercent(-speed), steps, block=is_blocking)\n\n if direction in Direction.LEFT.value:\n self._turn(direction, speed)\n self.drive.on_for_rotations(SpeedPercent(speed), SpeedPercent(speed), steps, block=is_blocking)\n offset = -1\n self.index = self.new_index(self.index, offset)\n self.pointing = self.direction[self.index]\n\n if direction in Direction.RIGHT.value:\n self._turn(direction, speed)\n self.drive.on_for_rotations(SpeedPercent(speed), SpeedPercent(speed), steps, block=is_blocking)\n offset = 1\n self.index = self.new_index(self.index, offset)\n self.pointing = self.direction[self.index]\n\n if direction in Direction.STOP.value:\n self.drive.off()\n self.patrol_mode = False\n self.enemy_not_detected = False\n print(\"STOP!! patrol mode = {} y enemy not detected = {}\".format(self.patrol_mode, self.enemy_not_detected))\n\n if direction in Direction.PAUSE.value:\n self.drive.off()\n print(\"Pause to kill the enemy\")", "def move(self):\n raise NotImplementedError", "def test_maze_move_1(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n\n self.assertEqual(maze.move(rat_J, a2.UP, a2.NO_CHANGE), False)", "def test_maze_move_4(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n old_sprout_count = maze.num_sprouts_left\n\n maze.move(rat_J, a2.DOWN, a2.NO_CHANGE)\n maze.move(rat_J, a2.DOWN, a2.NO_CHANGE)\n maze.move(rat_J, a2.DOWN, a2.NO_CHANGE)\n\n self.assertEqual(maze.num_sprouts_left, old_sprout_count - 1)", "def test_turn(self):\n Action = SimObject.Action # shortcut\n actions = [\n Action.MOVE, Action.TURN_RIGHT, Action.MOVE, Action.TURN_LEFT]\n map_lines = [\" \", \" T\"]\n _MapContainer.MAP = \"\\n\".join(map_lines)\n configuration = {\n \"map\": _MapContainer,\n \"parameters\": {(1, 1): ([Direction.NORTH, actions], {})},\n \"steps_limiter_steps\": 4\n }\n sim = RecorderSimulator(configuration, {})\n sim.run()\n self.assertEqual(\n sim.maps,\n [map_lines, [\" \", \" T\"], [\" \", \"T \"], [\" \", \"T \"],\n [\"T \", \" \"]])", "def check4move(st, selected_unit, direction):\n return 1", "def step(self, steps):\n self._simulate(endStep=self.currentStep+steps)", "def teleop_step(self):\n # get current state\n state = self.panda.state\n self.step_number += 1\n\n return_state = self.return_state()\n\n # read in from keyboard\n key_input = self.key.get_controller_state()\n dpos, dquat, grasp, reset = (\n key_input[\"dpos\"],\n key_input[\"dquat\"],\n key_input[\"grasp\"],\n key_input[\"reset\"],\n )\n action = dpos\n self.close_gripper(state)\n # action[0:3] = dpos\n\n # action in this example is the end-effector velocity\n self.panda.step(dposition=dpos, dquaternion=dquat, grasp_open=not self.grasp)\n\n # take simulation step\n p.stepSimulation()\n\n # return next_state, reward, done, info\n next_state = self.panda.state\n return_next_state = self.return_state()\n reward, done = self.calculate_reward(next_state, action)\n print(f'step: {self.step_number}\\treward: {reward}\\tdone: {done}')\n if reset:\n done = True\n info = self.panda.state\n\n # self.grasp = grasp\n return return_state, action, reward, return_next_state, done, info", "def test_change_direction(self):\n travelcalculator = TravelCalculator(50, 25)\n with patch(\"time.time\") as mock_time:\n mock_time.return_value = 1580000000.0\n travelcalculator.set_position(60)\n travelcalculator.start_travel(80)\n assert travelcalculator.travel_direction == TravelStatus.DIRECTION_DOWN\n\n # change direction after two seconds\n mock_time.return_value = 1580000002.0\n assert travelcalculator.current_position() == 64\n travelcalculator.start_travel(48)\n assert travelcalculator.travel_direction == TravelStatus.DIRECTION_UP\n\n assert travelcalculator.current_position() == 64\n assert not travelcalculator.position_reached()\n\n mock_time.return_value = 1580000004.0\n assert travelcalculator.current_position() == 56\n assert not travelcalculator.position_reached()\n\n mock_time.return_value = 1580000006.0\n assert travelcalculator.current_position() == 48\n assert travelcalculator.position_reached()", "def test_move_onto_terrain(self):\n # move onto Water (1 extra)\n b1 = board.Board(self.small_ter)\n start = np.array((0, 3), dtype='int')\n k1 = knight.Knight(b1, start)\n # set move choice\n move_choice = 2\n # determine move validity and cost\n (cost, isvalid) = k1.validate_move(move_choice)\n self.assertTrue(isvalid)\n self.assertEqual(cost, 2)\n\n # move onto Lava (4 extra)\n start = np.array((3, 4), dtype='int')\n k1 = knight.Knight(b1, start)\n # set move choice\n move_choice = 0\n # determine move validity and cost\n (cost, isvalid) = k1.validate_move(move_choice)\n self.assertTrue(isvalid)\n self.assertEqual(cost, 5)\n\n # move onto Barrier (illegal)\n start = np.array((1, 4), dtype='int')\n k1 = knight.Knight(b1, start)\n # set move choice\n move_choice = 1\n # determine move validity and cost\n (cost, isvalid) = k1.validate_move(move_choice)\n self.assertFalse(isvalid)\n\n # move onto Rock (illegal)\n start = np.array((1, 0), dtype='int')\n k1 = knight.Knight(b1, start)\n # set move choice\n move_choice = 7\n # determine move validity and cost\n (cost, isvalid) = k1.validate_move(move_choice)\n self.assertFalse(isvalid)", "def test_object_move(self):\n self.assertTrue(self.obj1 in self.room1.contents)\n # use move_to hook\n self.obj1.move_to(self.room2)\n self.assertFalse(self.obj1 in self.room1.contents)\n self.assertTrue(self.obj1 in self.room2.contents)\n\n # move back via direct setting of .location\n self.obj1.location = self.room1\n self.assertTrue(self.obj1 in self.room1.contents)\n self.assertFalse(self.obj1 in self.room2.contents)", "def move(): #py:move\n RUR._move_()", "def _step(self, board, elapsedTime):\n\t\tpass", "def move(self, direction):\n pass", "def step(self, action):\n # TODO: code here\n y, x = self.state\n dy, dx = self.moves[action]\n next_x, next_y = x+dx, y+dy\n\n next_x = np.clip(next_x, 0, self.width-1) # clip the values to the world\n next_y = np.clip(next_y, 0, self.height-1) # clip the values to the world\n\n if next_y == 1:\n rand = np.random.uniform()\n if rand < 0.2:\n next_x += 1\n elif rand < 0.7:\n next_x += 2\n else:\n next_x += 3\n\n next_x = np.clip(next_x, 0, self.width - 1)\n\n if next_x == 4 and next_y == 1:\n reward = -1\n done = True\n elif next_x == 4 and next_y == 2:\n reward = 1\n done = True\n else:\n reward = 0\n done = False\n\n next_state = (next_y, next_x)\n self.state = next_state\n\n return next_state, reward, done, {}", "def test_verify_move(self):\n self._verify([self.applied_commands['move']])", "def test_maze_move_3(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n\n self.assertEqual(maze.move(rat_J, a2.DOWN, a2.NO_CHANGE), True)", "def robot_step(self, location, current_path, direction=None):\n new_location = self.take_step(location, direction)\n if new_location:\n if new_location != location:\n current_path.append(direction)\n if self.is_finish(new_location):\n return [current_path]\n right = self.robot_step(new_location,list(current_path),RIGHT)\n down = self.robot_step(new_location,list(current_path),DOWN)\n if right and down:\n return right + down\n elif right:\n return right\n elif down:\n return down\n else:\n return None\n else:\n return None", "def step(self, obs):\n self.obs = obs\n decision = self.nn_output[0]\n # sigmoid activation function so need to subtract 0.5 to allow movement in all directions\n displacement = [self.nn_output[1] - 0.5, self.nn_output[2] - 0.5]\n\n if self.first_move:\n if self.can_do_action(obs, actions.FUNCTIONS.Move_screen.id):\n self.first_move = False\n self.set_target_destination(self.retrieve_enemy_location(obs))\n return self.move_unit(obs)[\"function\"]\n\n if decision > 0.5: # fight\n if self.can_do_action(obs, actions.FUNCTIONS.Attack_screen.id):\n player_relative = obs.observation.feature_screen.player_relative\n enemy = self.xy_locs(player_relative == _PLAYER_ENEMY)\n if not enemy:\n return actions.FUNCTIONS.no_op()\n\n target = enemy[np.argmax(np.array(enemy)[:, 1])]\n return actions.FUNCTIONS.Attack_screen(\"now\", target)\n else: # flee\n if self.can_do_action(obs, actions.FUNCTIONS.Move_screen.id):\n move = self.move_unit(obs)\n if move[\"status\"] is \"ARRIVED_AT_TARGET\":\n step_size = 10\n self.movement_step(step_size, displacement, obs)\n return actions.FUNCTIONS.Move_screen(\"now\", (self.target[0], self.target[1]))\n else:\n return move[\"function\"]\n\n if self.can_do_action(obs, actions.FUNCTIONS.select_army.id):\n return actions.FUNCTIONS.select_army(\"select\")", "def step(self, action):\n # Implement your step method here\n # return (observation, reward, done, info)\n self._state = self._state + action\n # print('Step state:', self._state)\n x, y = self._state\n reward = - (x ** 2 + y ** 2) ** 0.5\n done = abs(x) < 0.01 and abs(y) < 0.01\n next_observation = np.copy(self._state)\n return Step(observation=next_observation, reward=reward, done=done)", "def move_turtle(self, x, y):\n tortuga = self.turtle\n if self.capture_mode:\n tortuga.setheading(tortuga.towards(x, y))\n tortuga.setpos(x, y)\n self.add_punto(Punto(x, y))", "def step(self):\r\n raise NotImplementedError", "def simulation_step(self):\n if not self.np_trajectory.size:\n #No trajectory to go to.....\n return\n closest_ind = self.find_closest_trajectory_pose()\n ref_ind = (closest_ind + 30) # closest_ind + numpy.round(self.v / 4)\n traj_len = len(self.np_trajectory[0])\n if self.loop is True:\n ref_ind = ref_ind % traj_len\n else:\n if ref_ind > traj_len-1:\n ref_ind = traj_len-1\n if closest_ind == traj_len-1:\n self.at_dest = True\n else:\n ref_ind = closest_ind\n ref_state = self.np_trajectory[:, int(ref_ind)]\n\n # update vehicle state.\n '''if self.class_name == 'TruckVehicle':\n self.update_vehicle_state_qualisys()\n self.UDP_receive()\n if self.data == \"-1.00\":\n self.set_control_commands_pp(ref_state, ref_ind)\n else:\n steer = int(self.data[-6:-3])\n throttle = int(self.data[:-6]) + 5\n hw_port.set_command(throttle,steer,2)\n self.update_truck_hardware()\n else:\n self.set_control_commands(ref_state)\n self.update_vehicle_state()'''\n\n self.set_control_commands(ref_state, ref_ind)\n self.update_vehicle_state()\n\n # publish vehicle state.\n vehicle_state = msgs.VehicleState(self.vehicle_id, self.class_name,\n self.x, self.y, self.yaw, self.v)\n self.pub_state.publish(vehicle_state)\n self.update_current_node()\n\n #The way that the stop light waiting works, this is necessary\n if not self.waiting_at_stop:\n self.check_for_traffic_light()\n self.get_traffic()", "def move(self, direction):\n\n self.direction = direction\n self.logger.debug('current direction: ' + direction)\n\n #remember axis name that instrument thinks in\n if 'Z' in self.current_axis:\n axis_string = 'ZPiezoStepper'\n else:\n if self.direction == 'left' or self.direction == 'right':\n axis_string = 'XPiezoStepper'\n else:\n axis_string = 'YPiezoStepper'\n\n if self.current_move == 'move absolute':\n #combine the spinbox and unit combobox user input to a pint quantity\n self.logger.info('moving to an absolute position')\n distance = self.gui.doubleSpinBox_distance.value()\n unit = self.gui.comboBox_unit.currentText()\n\n self.logger.debug('axis: ' + axis_string)\n local_distance = ur(str(distance) + unit)\n self.logger.debug('to position: ' + str(local_distance))\n\n self.moving_thread = WorkThread(self.anc350_instrument.move_to,axis_string, local_distance)\n self.moving_thread.start()\n\n elif self.current_move == 'move relative':\n # combine the spinbox and unit combobox user input to a pint quantity\n # add minussign to communicate correct direction to instrument\n self.logger.info('moving relative')\n distance = self.gui.doubleSpinBox_distance.value()\n unit = self.gui.comboBox_unit.currentText()\n self.logger.debug('axis:' + axis_string)\n self.logger.debug('direction: '+ direction)\n\n if self.direction == 'right' or self.direction == 'up':\n local_distance = ur(str(distance) + unit)\n self.logger.debug(str(local_distance))\n elif self.direction == 'left' or self.direction == 'down':\n local_distance = ur(str(-1 * distance) + unit)\n self.logger.debug(str(local_distance))\n\n self.moving_thread = WorkThread(self.anc350_instrument.move_relative,axis_string, local_distance)\n self.moving_thread.start()\n\n elif self.current_move == 'continuous' or self.current_move == 'step':\n # convert direction buttons clicked to direction integers that instrument wants\n # than move for 1s continuously, since the stop button doesnt work yet\n if self.direction == 'left':\n if 'Z' in self.current_axis:\n direction_int = 0 # correct direction, corresponds to labels closer and away\n else:\n direction_int = 1\n elif self.direction == 'right':\n if 'Z' in self.current_axis:\n direction_int = 1 # correct direction, corresponds to labels closer and away\n else:\n direction_int = 0\n elif self.direction == 'up':\n direction_int = 0\n elif self.direction == 'down':\n direction_int = 1\n\n if self.current_move == 'continuous':\n self.logger.info('moving continuously')\n self.moving_thread = WorkThread(self.anc350_instrument.move_continuous, axis_string, direction_int)\n self.moving_thread.start()\n\n elif self.current_move == 'step':\n self.logger.info('making a step')\n self.anc350_instrument.given_step(axis_string, direction_int, 1)", "def step(self, a):\n if self.mirror and self.phase >= self.max_phase / 2:\n a = self.reflect_action(a)\n self.time += 1\n\n self.posbefore = self.robot_skeleton.q[0]\n\n self.do_dart_clocks(a)\n self.set_phase(self.phase + 1)\n\n self.posafter = self.robot_skeleton.q[0]\n\n # ref_pos, ref_vel = self.get_kin_state()\n # self.set_state(ref_pos, ref_vel)\n\n # common behavior for returning step() results\n done = self.is_done()\n ob = self._get_obs()\n reward = self.compute_reward()\n self.reward_buffer.append(reward)\n self.total_reward += reward\n\n self.energy += np.square(a).sum()\n return ob, reward, done, {}", "def test_move_straight(controller):\n pos, angle = controller.odometry(20, 20, Vector2(0, 0), 0)\n assert pos == Vector2(\n 2 * math.pi * WHEEL_RADIUS * 10 / TICK_PER_REVOLUTION,\n 0,\n )\n assert angle == 0\n\n # Move backward in a straight line.\n pos, angle = controller.odometry(10, 10, Vector2(0, 0), math.pi / 2)\n assert pos.x < 1e-10\n assert pos.y == -2 * math.pi * WHEEL_RADIUS * 10 / TICK_PER_REVOLUTION\n assert angle == math.pi / 2", "def test_maze_move_2(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n\n self.assertEqual(maze.move(rat_J, a2.DOWN, a2.RIGHT), False)", "def move(self, a1dot, a2dot, timestep):\n\n # body_v, inertial_v = self.get_v(a1dot, a2dot)\n #\n # # lambdafication\n # x_dot = lambda t: inertial_v[0][0]\n # y_dot = lambda t: inertial_v[1][0]\n # theta_dot = lambda t: inertial_v[2][0]\n # _a1dot = lambda t: a1dot\n # _a2dot = lambda t: a2dot\n #\n # # find the increments\n # # fix to include time intervals\n # dx, _ = integrate.quad(x_dot, 0, timestep)\n # dy, _ = integrate.quad(y_dot, 0, timestep)\n # dtheta, _ = integrate.quad(theta_dot, 0, timestep)\n # da1, _ = integrate.quad(_a1dot, 0, timestep)\n # da2, _ = integrate.quad(_a2dot, 0, timestep)\n\n action = (a1dot, a2dot)\n t = timestep * self.t_interval\n x, y, theta, a1, a2 = self.perform_integration(action, t)\n\n # testing\n # print('the increments: ')\n # print(dx, dy, dtheta, da1, da2)\n\n # update robot variables\n self.x = x\n self.y = y\n # self.theta = theta\n # self.a1 = a1\n # self.a2 = a2\n self.time += t\n self.a1dot = a1dot\n self.a2dot = a2dot\n # self.body_v = (body_v[0][0], body_v[1][0], body_v[2][0])\n # self.inertial_v = (inertial_v[0][0], inertial_v[1][0], inertial_v[2][0])\n # self.state = (self.theta, self.a1, self.a2)\n\n # discretize state variables\n # print('before: ' + str(self.state))\n self.theta = self.rnd(self.discretize(theta, self.a_interval))\n\n # prevent theta from going out of -pi to pi range\n self.enforce_theta_range()\n\n self.a1 = self.rnd(self.discretize(a1, self.a_interval))\n self.a2 = self.rnd(self.discretize(a2, self.a_interval))\n self.state = (self.theta, self.a1, self.a2)\n # print('after: ' + str(self.state))\n\n return self.state", "def take_step(self):\n if self.facing == 0:\n self.new_loc = (self.new_loc[0], self.new_loc[1] + 1)\n elif self.facing == 1:\n self.new_loc = (self.new_loc[0] + 1, self.new_loc[1])\n elif self.facing == 2:\n self.new_loc = (self.new_loc[0], self.new_loc[1] - 1)\n else:\n self.new_loc = (self.new_loc[0] - 1, self.new_loc[1])", "def test_move():\n human = Human()\n coordinates = [2, 1]\n dimensions = [3, 4]\n\n new_coordinates = human.move(coordinates, dimensions)\n\n possible_new_coordinates = [[2, 0], [3, 0], [3, 1], [3, 2], [2, 2], [1, 2], [1, 1], [1, 0]]\n\n assert new_coordinates in possible_new_coordinates", "def move(self):\r\n segments = len(self.all_turtles) - 1\r\n for i in range(len(self.all_turtles)):\r\n if segments == 0:\r\n self.all_turtles[segments].forward(MOVE_DISTANCE)\r\n else:\r\n new_x = self.all_turtles[segments - 1].xcor()\r\n new_y = self.all_turtles[segments - 1].ycor()\r\n self.all_turtles[segments].goto(new_x, new_y)\r\n segments -= 1", "def teleop_step(self):\n # get current state\n state = self.panda.state\n self.step_number += 1\n\n return_state = self.return_state()\n\n # read in from keyboard\n key_input = self.key.get_controller_state()\n dpos, dquat, grasp, reset = (\n key_input[\"dpos\"],\n key_input[\"dquat\"],\n key_input[\"grasp\"],\n key_input[\"reset\"],\n )\n action = dpos\n\n # action in this example is the end-effector velocity\n self.panda.step(dposition=dpos, dquaternion=dquat, grasp_open=not self.grasp)\n\n # take simulation step\n p.stepSimulation()\n\n # return next_state, reward, done, info\n next_state = self.panda.state\n return_next_state = self.return_state()\n reward, done = self.calculate_reward(next_state, action)\n print(f'step: {self.step_number}\\treward: {reward}\\tdone: {done}')\n if reset:\n done = True\n info = self.panda.state\n\n return return_state, action, reward, return_next_state, done, info", "def turtle_movement(turtle_shape, bg_color, turtle_color, turtle_speed):\n turtle_name = initialize(turtle_shape, bg_color,\n turtle_color, turtle_speed)\n\n for i in range(36):\n for i in range(4):\n turtle_name.forward(200)\n turtle_name.right(90)\n turtle_name.right(10)", "def test_maze_move_5(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n old_sprout_count = maze.num_sprouts_left\n\n maze.move(rat_J, a2.DOWN, a2.RIGHT)\n\n self.assertEqual(maze.num_sprouts_left, old_sprout_count)", "def step(self):\n\n pass", "def repositionTurtle(t, x, y):\n t.up()\n t.goto(x, y)\n t.down()", "def step(self, action):", "def test_check_user_location_and_goal_location_match_state_and_next_state():\n for _ in range(50):\n env = Four_Rooms_Environment()\n env.reset()\n for _ in range(50):\n move = randint(0, 3)\n env.step(move)\n assert env.state == [env.location_to_state(env.current_user_location), env.location_to_state(env.current_goal_location)]\n assert env.next_state == [env.location_to_state(env.current_user_location), env.location_to_state(env.current_goal_location)]", "def _step(self, a):\n obs, rew, done, info = super()._step(a)\n # if self.robot.body_xyz[0] > self.threshold:\n # rew = 1.0\n # self.threshold += 1\n # else:\n # rew = 0.0\n # self.steps += 1\n # if self.steps > self.max_episode_steps:\n # done = True\n return obs, rew, done, info", "def step_forward(self):", "def step(self):\n raise NotImplementedError", "def test_basic_movement(self):\n with PhysicsEngineHarness('tests/only-sun.json') as physics_engine:\n # In this case, the only entity is the Sun. It starts at (0, 0)\n # with a speed of (1, -1). It should move.\n initial = physics_engine.get_state(1)\n moved = physics_engine.get_state(100)\n t0 = initial.timestamp\n t1 = moved.timestamp\n self.assertEqual(initial.timestamp, 1)\n self.assertAlmostEqual(initial[0].x, 0)\n self.assertAlmostEqual(initial[0].y, 0)\n self.assertAlmostEqual(initial[0].vx, 1)\n self.assertAlmostEqual(initial[0].vy, -1)\n self.assertEqual(moved.timestamp, t1)\n self.assertAlmostEqual(moved[0].x, t1 - t0)\n self.assertAlmostEqual(moved[0].y, -(t1 - t0))\n self.assertAlmostEqual(moved[0].vx, 1)\n self.assertAlmostEqual(moved[0].vy, -1)", "def move(self, algMove):\n if self.d_engine.is_move_correct(algMove):\n print(\"correct\")", "def test_actions_execute_correctly():\n env = Four_Rooms_Environment(stochastic_actions_probability=0.0)\n env.reset()\n env.move_user(env.current_user_location, (3, 3))\n\n env.step(0)\n assert env.current_user_location == (2, 3)\n\n env.step(1)\n assert env.current_user_location == (2, 4)\n\n env.step(2)\n assert env.current_user_location == (3, 4)\n\n env.step(3)\n assert env.current_user_location == (3, 3)\n\n env.step(0)\n assert env.current_user_location == (2, 3)\n\n env.step(0)\n assert env.current_user_location == (1, 3)\n\n env.step(0)\n assert env.current_user_location == (1, 3)\n\n env.step(1)\n assert env.current_user_location == (1, 4)\n\n env.step(1)\n assert env.current_user_location == (1, 5)\n\n env.step(1)\n assert env.current_user_location == (1, 5)", "def step(self, action):\n\n action[1] = 0 if action[1] < 0 else 1\n\n if not self.moving:\n self.agent_host.sendCommand(\"move 0.5\")\n time.sleep(.2)\n self.moving = True\n\n # Get Action\n command = \"strafe \" + str(action[0])\n if ((action[0] < 0 and self.allow_left) or (action[0] > 0 and self.allow_right)):\n self.agent_host.sendCommand(command)\n time.sleep(.2)\n self.agent_host.sendCommand(\"strafe 0\")\n time.sleep(.1)\n\n if action[1]:\n if self.checkCommand:\n self.jumpsOverDitches += 1\n self.checkCommand = False\n self.agent_host.sendCommand(\"jump 1\")\n time.sleep(.2)\n self.agent_host.sendCommand(\"jump 0\")\n\n # if (command == \"crouch 1\"):\n # self.agent_host.sendCommand(command)\n # time.sleep(.3)\n # self.agent_host.sendCommand(\"crouch 0\")\n # time.sleep(.2)\n\n self.episode_step += 1\n\n # Get Observation\n world_state = self.agent_host.getWorldState()\n for error in world_state.errors:\n print(\"Error:\", error.text)\n self.obs, self.allow_left, self.allow_right, curZPos, curXPos = self.get_observation(world_state)\n if curZPos:\n self.curZPos = curZPos\n if curXPos:\n if self.obs[3 + int(curXPos)]:\n self.checkCommand = True\n self.numDitchesEncountered += 1\n\n # Get Done\n done = not world_state.is_mission_running \n\n # Get Reward\n reward = 0\n for r in world_state.rewards:\n reward += r.getValue()\n self.episode_return += reward\n\n return self.obs, reward, done, dict()", "def step(self, action):\n\n \"\"\"\n Here we should convert the action num to movement action, execute the action in the\n simulation and get the observations result of performing that action.\n \"\"\"\n #if self.step_number > 200:\n #self.reset()\n rospy.logdebug(\"START STEP OpenAIROS\")\n\n self.gazebo.unpauseSim()\n self._set_action(action)\n #self._prey_step()\n self.gazebo.pauseSim()\n obs = self._get_obs()\n done = self._is_done(obs)\n info = {}\n reward = self._compute_reward(obs, done)\n \n self.cumulated_episode_reward = self.cumulated_episode_reward+ reward\n self.step_number += 1\n rospy.logdebug(\"END STEP OpenAIROS\")\n\n return obs, reward, done, info", "def random_walk(turtle, distance, steps):\n turtle.color(randcolor(), randcolor())\n for step in range(0,steps):\n random_move(turtle, distance)\n gohome(turtle)", "def makeMove(self, move, player):", "def update_position(steps):\n\n global position_x, position_y\n new_x = position_x\n new_y = position_y\n\n if directions[current_direction_index] == 'forward':\n new_y = new_y + steps\n elif directions[current_direction_index] == 'right':\n new_x = new_x + steps\n elif directions[current_direction_index] == 'back':\n new_y = new_y - steps\n elif directions[current_direction_index] == 'left':\n new_x = new_x - steps\n\n if is_position_allowed(new_x, new_y):\n position_x = new_x\n position_y = new_y\n return True\n return False", "def after_step():\n raise NotImplementedError", "def move(self) -> bool:\n pass", "def player_movement(self):", "def moveturtle(x,y,t):\n t.penup()\n t.goto(x,y)\n t.pendown()", "def _step(self, action: np.ndarray):\n self.robot.step({\n 'dkitty': action,\n })", "def step(self, speed):\n\n obstacle_speed_double = ctypes.c_double(speed[0])\n agent_x_speed_double = ctypes.c_double(speed[1])\n agent_y_speed_double = ctypes.c_double(speed[2])\n\n self.wrapper.step(self.instance, obstacle_speed_double, agent_x_speed_double, agent_y_speed_double)", "def step(self, action):\n pass", "def step(self, action):\n pass" ]
[ "0.7820133", "0.7763472", "0.6788755", "0.6762144", "0.6700953", "0.66414285", "0.6528104", "0.651918", "0.6467042", "0.644896", "0.6419937", "0.639203", "0.637763", "0.63649815", "0.6354341", "0.63443154", "0.63338375", "0.63002443", "0.6289629", "0.62895745", "0.62881094", "0.6285331", "0.62846184", "0.62506056", "0.6224792", "0.6191413", "0.6185826", "0.6178296", "0.61711407", "0.61653686", "0.6103989", "0.6103989", "0.60948807", "0.6082668", "0.6081535", "0.60764974", "0.60678655", "0.6059346", "0.60592926", "0.60487896", "0.60412896", "0.603052", "0.60209703", "0.6020674", "0.6011223", "0.6000751", "0.59946465", "0.5976784", "0.5966198", "0.5957743", "0.59506506", "0.59349275", "0.5929615", "0.5925344", "0.59249234", "0.59231675", "0.5922465", "0.5915984", "0.58932215", "0.5892871", "0.5891381", "0.5890028", "0.58852345", "0.5871197", "0.58641624", "0.58523744", "0.58417517", "0.58413815", "0.5835737", "0.5827662", "0.58239025", "0.5822273", "0.5819833", "0.5816142", "0.5815633", "0.5813432", "0.5787206", "0.57731503", "0.5772304", "0.5772078", "0.5770248", "0.5769219", "0.5767003", "0.57666814", "0.5766307", "0.57630324", "0.57621336", "0.57620513", "0.5761407", "0.57608426", "0.57602274", "0.5760035", "0.57551426", "0.5754627", "0.5753547", "0.5750385", "0.57503206", "0.5747921", "0.5744594", "0.5744594" ]
0.8622489
0
run the tesselation on a empty image
def test_on_map_of_0s(synthetic_checkerboard): img = synthetic_checkerboard['img'] di = np.zeros_like(img) cpp_vorimg = tess.tessellate_labimg(img,di) py_vorimg = pytess.tessellate_labimg(img,di) assert np.alltrue(py_vorimg[:4,:4] == 1) printers.store_ndarray("py_voronoi_on_map_of_0s_output.txt",py_vorimg) assert cpp_vorimg.size > 0 assert cpp_vorimg.shape == synthetic_checkerboard['img'].shape assert np.alltrue(synthetic_checkerboard['img'][1:3,1:3] == 1) printers.store_ndarray("cpp_voronoi_input.txt",img) printers.store_ndarray("cpp_voronoi_on_map_of_0s_output.txt",cpp_vorimg) assert np.alltrue(cpp_vorimg[:4,:4] == 1) assert np.alltrue(cpp_vorimg == py_vorimg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_stuff(self):\n self.create_tourism_raster()", "def write_stitched_image(self):\r\n\r\n self.write_debug(\"End of train detected. Writing stitched image.\")\r\n cv2.imwrite(os.path.join(self.output_dir_stitched, 'stitched.jpg'), self.stitched_image)", "def final_plain():\n\n\tconfig = Config()\n\tconfig.layer1_size = 256\n\tconfig.num_channels = 15\n\tconfig.target_channels = 3\n\tconfig.target_loss = 0.01\n\tconfig.lifetime = 32\n\tconfig.size = 32\n\tconfig.initial_state = 'sconf_center_black_dot'\n\tconfig.edge_strategy = 'EdgeStrategy.TF_SAME'\n\tconfig.growing_jump = 0\n\n\tfor path in glob.glob(\"images/final/*.png\"):\n\t\timg_name = os.path.basename(path)\n\t\tconfig.target_state = f'sconf_image(\"final/{img_name}\")'\n\t\tbuild_and_train(\"final_compare_gradual\", config)", "def make_t(self):\n self.img[1, 1:-1] = 1\n self.img[2:-1, self.l_i / 2] = 1\n self.img_name = 'T'", "def draw_T(self):\n for i in range(self.n):\n for j in range(self.m):\n t = self.T[i, j]\n if t != 0 and self.V[i, j] == 1:\n if len(self.images) > 0:\n self.draw_img(i, j, t)\n else:\n self.draw_text(i, j, str(t), BLACK)", "def run(self,workspace):\n image_name = self.image_name.value\n cpimage = workspace.image_set.get_image(image_name)\n image = cpimage.pixel_data\n mask = cpimage.mask\n workspace.display_data.statistics = []\n level = int(self.atrous_level.value)\n\n wavelet = self.a_trous(1.0*image, level+1)\n wlevprod = wavelet[:,:,level-1] * 3.0\n\n spotthresh = wlevprod.mean() + float(self.noise_removal_factor.value) * wlevprod.std()\n tidx = wlevprod < spotthresh\n wlevprod[tidx] = 0\n\n wlevprod = self.circular_average_filter(wlevprod, int(self.smoothing_filter_size.value))\n wlevprod = self.smooth_image(wlevprod, mask)\n\n max_wlevprod = scipy.ndimage.filters.maximum_filter(wlevprod,3)\n maxloc = (wlevprod == max_wlevprod)\n twlevprod = max_wlevprod > float(self.final_spot_threshold.value)\n maxloc[twlevprod == 0] = 0\n \n labeled_image,object_count = scipy.ndimage.label(maxloc,\n np.ones((3,3),bool))\n\n unedited_labels = labeled_image.copy()\n # Filter out objects touching the border or mask\n border_excluded_labeled_image = labeled_image.copy()\n labeled_image = self.filter_on_border(image, labeled_image)\n border_excluded_labeled_image[labeled_image > 0] = 0\n \n # Relabel the image\n labeled_image,object_count = relabel(labeled_image)\n new_labeled_image, new_object_count = self.limit_object_count(\n labeled_image, object_count)\n if new_object_count < object_count:\n # Add the labels that were filtered out into the border\n # image.\n border_excluded_mask = ((border_excluded_labeled_image > 0) |\n ((labeled_image > 0) & \n (new_labeled_image == 0)))\n border_excluded_labeled_image = scipy.ndimage.label(border_excluded_mask,\n np.ones((3,3),bool))[0]\n object_count = new_object_count\n labeled_image = new_labeled_image\n \n # Make an outline image\n outline_image = cellprofiler.cpmath.outline.outline(labeled_image)\n outline_border_excluded_image = cellprofiler.cpmath.outline.outline(border_excluded_labeled_image)\n \n if self.show_window:\n statistics = workspace.display_data.statistics\n statistics.append([\"# of accepted objects\",\n \"%d\"%(object_count)])\n\n workspace.display_data.image = image\n workspace.display_data.labeled_image = labeled_image\n workspace.display_data.border_excluded_labels = border_excluded_labeled_image\n\n # Add image measurements\n objname = self.object_name.value\n measurements = workspace.measurements\n cpmi.add_object_count_measurements(measurements,\n objname, object_count)\n # Add label matrices to the object set\n objects = cellprofiler.objects.Objects()\n objects.segmented = labeled_image\n objects.unedited_segmented = unedited_labels\n objects.parent_image = image\n \n workspace.object_set.add_objects(objects,self.object_name.value)\n cpmi.add_object_location_measurements(workspace.measurements, \n self.object_name.value,\n labeled_image)\n if self.should_save_outlines.value:\n out_img = cpi.Image(outline_image.astype(bool),\n parent_image = image)\n workspace.image_set.add(self.save_outlines.value, out_img)", "def main():\n test_image = load_image()\n\n pixelate_image(\n normalize_image(test_image)\n )\n pass", "def unpropagateImage(self, dryrun):\n pass", "def exercise():\n\n #\n # Convert Lena Tiff image to raw format\n #\n for f in glob.glob('*.jpg'):\n os.remove(f)\n \n for f in glob.glob('*.dat'):\n os.remove(f)\n \n input_raw_file = convert_to_raw('Lena.tiff')\n\n for device in ['cpu', 'gpu']:\n for interp in ['nn', 'bl']:\n for (w,h) in ((256, 300), (486, 486),(2000, 1000),(1000, 2000),(8000, 4000)):\n (t, f) = interpolate(input_raw_file, device + '_' + interp + '_lena.dat', device, 0, interp, w, h)\n convert_to_jpg(f)\n\n \n for f in glob.glob('*.dat'):\n convert_to_jpg(f)\n os.remove(f)\n \n quit()", "def process(self, image):", "def setUp(self):\n test_file_1 = path.join(\n path.dirname(datasets.__file__), \"twod_image_1.npy\"\n )\n\n original_image = np.load(test_file_1)\n\n # get a single tile from the image to test\n # note this image is currently unpadded.\n # how many boundary elements are needed to pad?\n extracted_image = original_image[0:32, 0:32]\n\n self.img = np.expand_dims(extracted_image, axis=-1)\n\n # Don't make this too huge for brevity.\n self.J = 3\n # 0 = no overlap etc.\n self.overlap_log_2 = 0\n # apply to all available orders\n self.order = 3\n # Should be one or more to avoid aliasing, if you want overlapping\n # tiles this can increase too.\n self.oversampling = 1\n\n self.num_angles = 3\n self.angles = tuple(\n [\n 90.0\n - np.rad2deg(\n (int(self.num_angles - self.num_angles / 2 - 1) - theta)\n * np.pi\n / self.num_angles\n )\n for theta in range(self.num_angles)\n ]\n )\n\n # details of the input data\n self.sample_rate = 0.004 * 3\n\n # vanilla filter bank\n wavelets = [\n vanilla_morlet_2d(self.sample_rate, j=i) for i in range(0, self.J)\n ]\n father_wavelet = vanilla_gabor_2d(self.sample_rate, j=self.J)\n\n # extract the kernels of each of the wavelets for manual convolution\n # we'll test using three different angles that we used to create the\n # transform above.\n wav1 = wavelets[0]\n wav2 = wavelets[1]\n wav3 = wavelets[2]\n\n # extract the kernels of each of the wavelets for manual convolution\n # we'll test using three different angles that we used to create the\n # transform above.\n wav1_k = wav1.kernel(self.angles[0])\n wav2_k = wav2.kernel(self.angles[1])\n wav3_k = wav3.kernel(self.angles[2])\n\n phi = father_wavelet.kernel(0.0)\n\n npad = 31\n img_pad = np.pad(\n self.img, ((npad, npad), (npad, npad), (0, 0)), mode=\"reflect\"\n )\n # get numpy array of the test input image\n x = img_pad[:, :, 0]\n\n # manual convolution, |x * psi_1|\n conv = np.abs(convolve2d(x, wav1_k, mode=\"same\"))\n conv2 = np.abs(convolve2d(conv, wav2_k, mode=\"same\"))\n conv3 = np.abs(convolve2d(conv2, wav3_k, mode=\"same\"))\n\n # unpad the original image, and convolve with the phi\n # note that the dimensions for phi are one less than the\n # conv result, so we get a 4x4 result. Take the first one\n self.manual_result1 = convolve2d(\n conv[npad:-npad, npad:-npad], phi.real, mode=\"valid\"\n )[0, 0]\n self.manual_result2 = convolve2d(\n conv2[npad:-npad, npad:-npad], phi.real, mode=\"valid\"\n )[0, 0]\n self.manual_result3 = convolve2d(\n conv3[npad:-npad, npad:-npad], phi.real, mode=\"valid\"\n )[0, 0]", "def execute(self, image):\n undist = self.undistort(image)\n result = self.threshold_image(undist, self.thresholds['ksize'],\n self.thresholds['sobel'],\n self.thresholds['magnitude'],\n self.thresholds['direction'],\n self.thresholds['saturation'],\n self.thresholds['lightness'],\n self.thresholds['blue-yellow'])\n warped = self.warp(result)\n if self.args.is_test:\n self.image_logger.save_image(warped, 'warped_image.png')\n ploty, left_fit, right_fit, left_fitx, right_fitx = self.get_line_fit(warped)\n left_rad, right_rad = measure_curvature(warped, left_fitx, right_fitx, self.args.is_test)\n self.left_line.update(left_fit, left_rad)\n self.right_line.update(right_fit, right_rad)\n result = self.draw_final_image(image, warped, undist, ploty, left_fitx, right_fitx, self.Minv,\n self.left_line.best_curvature,\n self.right_line.best_curvature)\n return result", "def prepare_test_img(self, idx):\n img_info = self.img_infos[idx]\n img_path = osp.join(self.img_prefix, img_info['filename'])\n\n if self.proposals is not None:\n proposal = self.proposals[idx][:self.num_max_proposals]\n if not proposal.shape[1] == 4 or proposal.shape[1] == 5:\n raise AssertionError(\n 'proposals should have shapes (n, 4) or (n, 5), '\n 'but found {}'.format(proposal.shape))\n else:\n proposal = None\n\n if self.with_background_erasing:\n ann = self.get_ann_info(idx)\n gt_bboxes = ann['bboxes']\n else:\n gt_bboxes = None\n\n def prepare_single_scale(img_path, expected_size, flip_ratio=0,\n proposal=None, bbox=None):\n _img, img_shape, pad_shape, scale_factor, \\\n flipped_flag, flipped_direction = self.img_transforms(\n img_path, expected_size, flip_ratio=flip_ratio)\n if bbox is not None:\n if not len(bbox) == 0:\n _gt_bboxes = self.bbox_transforms(bbox,\n img_shape,\n scale_factor,\n flipped_flag,\n flipped_direction)\n else:\n _gt_bboxes = bbox\n _img = self.background_erasing(\n _img, img_shape, _gt_bboxes,\n cell_size=self.be_cell_size,\n random_ratio=self.be_random_ratio)\n _img = to_tensor(_img)\n _img_meta = dict(\n filename=img_info['filename'],\n ori_shape=(img_info['height'], img_info['width'], 3),\n img_shape=img_shape,\n pad_shape=pad_shape,\n scale_factor=scale_factor,\n flipped_flag=flipped_flag,\n flipped_direction=flipped_direction\n )\n if proposal is not None:\n if proposal.shape[1] == 5:\n score = proposal[:, 4, None]\n proposal = proposal[:, :4]\n else:\n score = None\n _proposal = self.bbox_transforms(proposal,\n img_shape,\n scale_factor,\n flipped_flag,\n flipped_direction)\n _proposal = np.hstack([_proposal, score]) \\\n if score is not None else _proposal\n _proposal = to_tensor(_proposal)\n else:\n _proposal = None\n return _img, _img_meta, _proposal\n\n imgs = []\n img_metas = []\n proposals = []\n for expected_size in self.img_expected_sizes:\n # at first, we do not flip the image\n _img, _img_meta, _proposal = prepare_single_scale(\n img_path, expected_size, flip_ratio=0,\n proposal=proposal, bbox=gt_bboxes)\n imgs.append(_img)\n img_metas.append(DataContainer(_img_meta, cpu_only=True))\n proposals.append(_proposal)\n if self.flip_ratio > 0:\n _img, _img_meta, _proposal = prepare_single_scale(\n img_path, expected_size, flip_ratio=1,\n proposal=proposal, bbox=gt_bboxes)\n imgs.append(_img)\n img_metas.append(DataContainer(_img_meta, cpu_only=True))\n proposals.append(_proposal)\n data = dict(img=imgs, img_meta=img_metas)\n if self.proposals is not None:\n data['proposals'] = proposals\n return data", "def retarget_image(img, T, C, r, c):\n row, col = img.shape[:2]\n seam_path = optimal_path(T, C, r, c)\n img_final = img\n for i in seam_path:\n if i == 0:\n img_final, _ = seam_removal_horizontal(img_final)\n else:\n img_final, _ = seam_removal_vertical(img_final, [])\n return img_final", "def __update_tesseract__(self):\n if self.row_bitmaps != []:\n self.__write_out_row__()\n cv2.imwrite(\"active_weather.basic.exp\" + str(self.box_count) + \".tiff\", self.training_page)\n # call([\"convert\", \"-density 300\", \"-depth 4\", \"active_weather.basic.exp0.tiff\",\"active_weather.basic.exp0.tiff\"])\n call([\"/usr/bin/tesseract\", \"active_weather.basic.exp0.tiff\", \"active_weather.basic.exp0\", \"nobatch\", \"box.train\"])\n\n with open(\"font_properties\",\"w\") as f:\n f.write(\"basic 0 0 0 0 0\\n\")\n\n call([\"unicharset_extractor\", \"active_weather.basic.exp0.box\"])\n os.system(\"/home/ggdhines/github/tesseract/training/set_unicharset_properties -F font_properties -U unicharset -O unicharset --script_dir=/home/ggdhines/langdata\")\n # os.system(\"shapeclustering -F font_properties -U unicharset active_weather.basic.exp0.tr\")\n # os.system(\"shapeclustering -F font_properties active_weather.basic.exp0.tr\")\n os.system(\"mftraining -F font_properties -U unicharset -O active_weather.unicharset active_weather.basic.exp0.tr\")\n os.system(\"cntraining active_weather.basic.exp0.tr\")\n\n os.system(\"mv inttemp active_weather.inttemp\")\n os.system(\"mv normproto active_weather.normproto\")\n os.system(\"mv pffmtable active_weather.pffmtable\")\n os.system(\"mv shapetable active_weather.shapetable\")\n os.system(\"combine_tessdata active_weather.\")\n\n os.system(\"mv active_weather.basic.* /tmp/tessdata/\")\n os.system(\"mv active_weather.inttemp /tmp/tessdata/\")\n os.system(\"mv active_weather.normproto /tmp/tessdata/\")\n os.system(\"mv active_weather.pffmtable /tmp/tessdata/\")\n os.system(\"mv active_weather.shapetable /tmp/tessdata/\")\n os.system(\"mv active_weather.traineddata /tmp/tessdata/\")\n os.system(\"mv active_weather.unicharset /tmp/tessdata/\")\n os.system(\"mv font_properties /tmp/tessdata/\")", "def expose_test(self):\n with self.lock:\n self.dark = 1\n self.tstart = time.time()\n self.timestamp = time.strftime(\"%Y-%m-%dT%H:%M:%S\", time.localtime(self.tstart))\n imagesize = (self.expArea[3] - self.expArea[1],\n self.expArea[2] - self.expArea[0])\n self.data = np.ones(shape=imagesize, dtype=np.uint16)\n self.tend = time.time()", "def run_frame(self, ti, img):\n pass", "def reset_image_estimate(self):\n # reset_shared_var(self.t_A)\n self.t_A.set_value(self.t_QUAD_REG_MEAN.get_value())\n reset_shared_var(self.t_Ap)", "def main():\n \n # for inserting other images, add tem to /input folder and list them here\n images = (\n 'image-0',\n 'image-1',\n 'image-2'\n )\n\n for image_name in images:\n print(image_name, \"image:\")\n\n image = open_image(image_name)\n display_image(image, \"Original input \" + image_name)\n\n grayscale_v = transform_colors(image)\n display_image(grayscale_v[:,:,0], \"Grayscale \" + image_name)\n save_image(image_name + \"-grayscale\", grayscale_v[:,:,0])\n\n contours_v, contours = get_contours(grayscale_v)\n display_image(contours_v, \"Contours \" + image_name)\n save_image(image_name + \"-contours\", contours_v)\n\n labeled_img, areas = get_measures(image, contours[1:])\n display_image(labeled_img, \"Labeled \" + image_name)\n save_image(image_name + \"-labeled\", labeled_img)\n\n areas_histogram(areas, image_name)", "def teardown():\n os.remove('green-dot.tif')\n os.remove('green-dot.jpg')\n os.remove('green-dot.png')", "def single_channel_stacking(tifs):\n template_ID=int(len(tifs)/2)\n \n template_raster=gdal_array.LoadFile(tifs[template_ID-1])\n avg_raster=np.zeros_like(template_raster)\n avg_raster=avg_raster+1\n new_raster=np.copy(template_raster)\n # ones=np.full(template_raster.shape, 1)\n for i, tif in enumerate(tifs, start=1):\n if i==template_ID: \n continue\n \n tif_raster=gdal_array.LoadFile(tif)\n # tif_raster=cut_transformed_array_borders(tif_raster)\n result=ird.similarity(template_raster,tif_raster , numiter=1, order=1)\n img_transformed= ird.transform_img(tif_raster, scale=result['scale'], angle=result['angle'], tvec=result['tvec'], mode='constant', bgval=0, order=2)\n \n img_transformed=cut_transformed_array_borders(img_transformed)\n \n # ones_transformed=ird.transform_img(ones, scale=result['scale'], angle=result['angle'], tvec=result['tvec'], mode='constant', bgval=0, order=1)\n ones_transformed=np.zeros_like(template_raster)\n ones_transformed[np.where(img_transformed>0)]=1\n print(ones_transformed)\n \n print(np.mean(ones_transformed), np.max(ones_transformed), np.min(ones_transformed))\n print(ones_transformed[np.where(ones_transformed>0)])\n print(np.min(ones_transformed[np.where(ones_transformed>0)]))\n print(np.max(ones_transformed[np.where(ones_transformed>0)]))\n\n plt.imshow(ones_transformed)\n plt.show()\n plt.close()\n \n # ones_transformed=cut_transformed_array_borders(ones_transformed)\n \n avg_raster=avg_raster+ones_transformed\n # ird.imshow(template_raster, tif_raster, img_transformed)\n \n new_raster=new_raster+img_transformed\n \n # new_raster=new_raster+template_raster \n # new_raster=new_raster/len(tifs)\n\n gtz=np.where(avg_raster>0)\n \n\n \n\n \n \n plt.imshow(new_raster)\n plt.show()\n plt.close()\n # gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_not_abvertaghe_stacked_.tiff\")\n new_raster[gtz]=new_raster[gtz]/avg_raster[gtz] \n gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_stacked_.tiff\")\n plt.imshow(new_raster)\n plt.savefig(\"test.tif\", dpi=800)\n plt.show()\n plt.close()\n\n def discrete_cmap(N, base_cmap=None):\n \"\"\"Create an N-bin discrete colormap from the specified input map\"\"\"\n \n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n \n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)\n\n cmap=discrete_cmap(int(avg_raster.max())+1, base_cmap=\"ocean\") \n \n norm=mpl.colors.BoundaryNorm(np.arange(-0.5,int(avg_raster.max()+1)), cmap.N)\n fig=plt.figure()\n fig.set_size_inches(5,4)\n ax=fig.add_subplot(111)\n data=ax.matshow(avg_raster, cmap=cmap, norm=norm)\n fig.colorbar(data, ticks=np.linspace(0,int(avg_raster.max()),int(avg_raster.max()+1)), drawedges=True)\n\n plt.show()\n plt.close()\n\n\n # gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_stacked_.tiff\")", "def blackout_images(image,ticlass):\n rgb = ocropy.intarray()\n ticlass.textImageProbabilities(rgb,image)\n r = ocropy.bytearray()\n g = ocropy.bytearray()\n b = ocropy.bytearray()\n ocropy.unpack_rgb(r,g,b,rgb)\n components = ocropy.intarray()\n components.copy(g)\n n = ocropy.label_components(components)\n print \"[note] number of image regions\",n\n tirects = ocropy.rectarray()\n ocropy.bounding_boxes(tirects,components)\n for i in range(1,tirects.length()):\n r = tirects.at(i)\n ocropy.fill_rect(image,r,0)\n r.pad_by(-5,-5)\n ocropy.fill_rect(image,r,255)", "def imageprepare():\r\n file_name = '9-test.png'\r\n im = Image.open(file_name).convert('L')\r\n\r\n im.save(\"9-t.png\")\r\n plt.imshow(im)\r\n plt.show()\r\n tv = list(im.getdata())\r\n\r\n # normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\r\n tva = [(255 - x) * 1.0 / 255.0 for x in tv]\r\n return tva", "def test_on_tiff(self):\n im = np.random.randint(0, 127, size=(512, 512))\n path = Path(\".\\\\test_tif.tif\")\n\n # Annoying low contrast warning\n with suppress_warnings():\n imsave(str(path), im)\n\n from_skued = diffread(path)\n self.assertTrue(np.allclose(im, from_skued))\n os.remove(path)", "def main(image_path):\n temp_dir = tempfile.mkdtemp()\n print('Saving output to {}'.format(temp_dir))\n estimator = run_image(image_path)\n visualize(estimator, image_path, temp_dir)", "def run_algo(self, th):\n p = self.run_proc(['threshold', str(th), 'input_0.png',\n 'output.png'])\n self.wait_proc(p, timeout=self.timeout)\n return", "def freespaceImageAnalysis( fids, guesses = None, fit=True, bgInput=None, bgPcInput=None, shapes=[None], zeroCorrection=0, zeroCorrectionPC=0,\n keys=None, fitModule=bump, extraPicDictionaries=None, newAnnotation=False, onlyThisPic=None, pltVSize=5, \n plotSigmas=False, plotCounts=False, manualColorRange=None, calcTemperature=False, clearOutput=True, \n dataRange=None, guessTemp=10e-6, trackFitCenter=False, picsPerRep=1, startPic=0, binningParams=None, \n win=pw.PictureWindow(), transferAnalysisOpts=None, tferBinningParams=None, tferWin= pw.PictureWindow(),\n extraTferAnalysisArgs={}, emGainSetting=300, lastConditionIsBackGround=True, showTferAnalysisPlots=True,\n show2dFitsAndResiduals=True, plotFitAmps=False, indvColorRanges=False, fitF2D=gaussian_2d.f_notheta, \n rmHighCounts=True, useBase=True, weightBackgroundByLoading=True, returnPics=False, forceNoAnnotation=False):\n fids = [fids] if type(fids) == int else fids\n keys = [None for _ in fids] if keys is None else keys\n sortedStackedPics = {}\n initThresholds = [None]\n picsForBg = []\n bgWeights = []\n isAnnotatedList = []\n for filenum, fid in enumerate(fids):\n if transferAnalysisOpts is not None:\n res = ta.stage1TransferAnalysis( fid, transferAnalysisOpts, useBase=useBase, **extraTferAnalysisArgs )\n (initAtoms, tferAtoms, initAtomsPs, tferAtomsPs, key, keyName, initPicCounts, tferPicCounts, repetitions, initThresholds,\n avgPics, tferThresholds, initAtomImages, tferAtomImages, basicInfoStr, ensembleHits, groupedPostSelectedPics, isAnnotated) = res\n isAnnotatedList.append(isAnnotated)\n # assumes that you only want to look at the first condition. \n for varPics in groupedPostSelectedPics: # don't remember why 0 works if false...\n picsForBg.append(varPics[-1 if lastConditionIsBackGround else 0])\n bgWeights.append(len(varPics[0]))\n allFSIPics = [ varpics[0][startPic::picsPerRep] for varpics in groupedPostSelectedPics]\n if showTferAnalysisPlots:\n fig, axs = plt.subplots(1,2)\n mp.makeAvgPlts( axs[0], axs[1], avgPics, transferAnalysisOpts, ['r','g','b'] ) \n allFSIPics = [win.window( np.array(pics) ) for pics in allFSIPics]\n allFSIPics = ah.softwareBinning( binningParams, allFSIPics )\n elif type(fid) == int:\n ### For looking at either PGC imgs or FSI imgs \n with exp.ExpFile(fid) as file:\n # I think this only makes sense if there is a specific bg pic in the rotation\n picsForBg.append(list(file.get_pics()))\n allFSIPics = file.get_pics()[startPic::picsPerRep]\n _, key = file.get_key()\n if len(np.array(key).shape) == 2:\n key = key[:,0]\n file.get_basic_info()\n allFSIPics = win.window( allFSIPics )\n allFSIPics = ah.softwareBinning( binningParams, allFSIPics )\n allFSIPics = np.reshape( allFSIPics, (len(key), int(allFSIPics.shape[0]/len(key)), allFSIPics.shape[1], allFSIPics.shape[2]) )\n else:\n ### Assumes given pics have the same start pic and increment (picsPerRep).\n # doesn't combine well w/ transfer analysis\n picsForBg.append(fid)\n allFSIPics = fid[startPic::picsPerRep]\n print(\"Assuming input is list of all pics, then splices to get FSI pics. Old code assumed the given were FSI pics.\")\n allFSIPics = win.window( allFSIPics )\n allFSIPics = ah.softwareBinning( binningParams, allFSIPics )\n allFSIPics = np.reshape( allFSIPics, (len(key), int(allFSIPics.shape[0]/len(key)), allFSIPics.shape[1], allFSIPics.shape[2]) )\n # ##############\n if keys[filenum] is not None:\n key = keys[filenum]\n for i, keyV in enumerate(key):\n keyV = misc.round_sig_str(keyV)\n sortedStackedPics[keyV] = np.append(sortedStackedPics[keyV], allFSIPics[i],axis=0) if (keyV in sortedStackedPics) else allFSIPics[i] \n if lastConditionIsBackGround:\n bgInput, pcBgInput = getBgImgs(picsForBg, startPic = startPic, picsPerRep = picsPerRep, rmHighCounts=rmHighCounts, bgWeights=bgWeights, \n weightBackgrounds=weightBackgroundByLoading)\n elif bgInput == 'lastPic':\n bgInput, pcBgInput = getBgImgs(picsForBg, startPic = picsPerRep-1, picsPerRep = picsPerRep, rmHighCounts=rmHighCounts, bgWeights=bgWeights,\n weightBackgrounds=weightBackgroundByLoading )\n if bgInput is not None: # was broken and not working if not given bg\n bgInput = win.window(bgInput)\n bgInput = ah.softwareBinning(binningParams, bgInput)\n if bgPcInput is not None:\n bgPcInput = win.window(bgPcInput)\n bgPcInput = ah.softwareBinning(binningParams, bgPcInput) \n \n if extraPicDictionaries is not None:\n if type(extraPicDictionaries) == dict:\n extraPicDictionaries = [extraPicDictionaries]\n for dictionary in extraPicDictionaries:\n for keyV, pics in dictionary.items():\n sortedStackedPics[keyV] = (np.append(sortedStackedPics[keyV], pics,axis=0) if keyV in sortedStackedPics else pics) \n sortedStackedKeyFl = [float(keyStr) for keyStr in sortedStackedPics.keys()]\n sortedKey, sortedStackedPics = ah.applyDataRange(dataRange, sortedStackedPics, list(sorted(sortedStackedKeyFl)))\n numVars = len(sortedStackedPics.items())\n if len(np.array(shapes).shape) == 1:\n shapes = [shapes for _ in range(numVars)] \n if guesses is None:\n guesses = [[None for _ in range(4)] for _ in range(numVars)]\n if len(np.array(bgInput).shape) == 2 or bgInput == None:\n bgInput = [bgInput for _ in range(numVars)]\n if len(np.array(bgPcInput).shape) == 2 or bgPcInput == None:\n bgPcInput = [bgPcInput for _ in range(numVars)]\n \n datalen, avgFitSigmas, images, hFitParams, hFitErrs, vFitParams, vFitErrs, fitParams2D, fitErrs2D = [{} for _ in range(9)]\n titles = ['Bare', 'Photon-Count', 'Bare-mbg', 'Photon-Count-mbg']\n assert(len(sortedKey)>0)\n for vari, keyV in enumerate(sortedKey):\n keyV=misc.round_sig_str(keyV)\n if vari==0:\n initKeyv = keyV\n varPics = sortedStackedPics[keyV]\n # 0 is init atom pics for post-selection on atom number... if we wanted to.\n expansionPics = rmHighCountPics(varPics,7000) if rmHighCounts else varPics\n datalen[keyV] = len(expansionPics)\n expPhotonCountImage = photonCounting(expansionPics, 120)[0] / len(expansionPics)\n bgPhotonCountImage = np.zeros(expansionPics[0].shape) if bgPcInput[vari] is None else bgPcInput[vari]\n expAvg = np.mean(expansionPics, 0)\n bgAvg = np.zeros(expansionPics[0].shape) if (bgInput[vari] is None or len(bgInput[vari]) == 1) else bgInput[vari]\n \n if bgPhotonCountImage is None:\n print('no bg photon', expAvg.shape)\n bgPhotonCount = np.zeros(photonCountImage.shape)\n avg_mbg = expAvg - bgAvg\n avg_mbgpc = expPhotonCountImage - bgPhotonCountImage\n images[keyV] = [expAvg, expPhotonCountImage, avg_mbg, avg_mbgpc]\n hFitParams[keyV], hFitErrs[keyV], vFitParams[keyV], vFitErrs[keyV], fitParams2D[keyV], fitErrs2D[keyV] = [[] for _ in range(6)]\n for imnum, (im, guess) in enumerate(zip(images[keyV], guesses[vari])):\n if fit:\n # fancy guess_x and guess_y values use the initial fitted value, typically short time, as a guess.\n _, pictureFitParams2d, pictureFitErrors2d, v_params, v_errs, h_params, h_errs = ah.fitPic(\n im, guessSigma_x=5, guessSigma_y=5, showFit=False, \n guess_x=None if vari==0 else fitParams2D[initKeyv][imnum][1], guess_y=None if vari==0 else fitParams2D[initKeyv][imnum][2],\n fitF=fitF2D)\n fitParams2D[keyV].append(pictureFitParams2d)\n fitErrs2D[keyV].append(pictureFitErrors2d)\n hFitParams[keyV].append(h_params)\n hFitErrs[keyV].append(h_errs)\n vFitParams[keyV].append(v_params)\n vFitErrs[keyV].append(v_errs)\n # conversion from the num of pixels on the camera to microns at the focus of the tweezers\n cf = 16e-6/64\n mins, maxes = [[], []]\n imgs_ = np.array(list(images.values()))\n for imgInc in range(4):\n if indvColorRanges:\n mins.append(None)\n maxes.append(None)\n elif manualColorRange is None:\n mins.append(min(imgs_[:,imgInc].flatten()))\n maxes.append(max(imgs_[:,imgInc].flatten()))\n else:\n mins.append(manualColorRange[0])\n maxes.append(manualColorRange[1])\n numVariations = len(images)\n if onlyThisPic is None:\n fig, axs = plt.subplots(numVariations, 4, figsize=(20, pltVSize*numVariations))\n if numVariations == 1:\n axs = np.array([axs])\n bgFig, bgAxs = plt.subplots(1, 2, figsize=(20, pltVSize))\n else:\n numRows = int(np.ceil((numVariations+3)/4))\n fig, axs = plt.subplots(numRows, 4 if numVariations>1 else 3, figsize=(20, pltVSize*numRows))\n avgPicAx = axs.flatten()[-3]\n avgPicFig = fig\n bgAxs = [axs.flatten()[-1], axs.flatten()[-2]]\n bgFig = fig\n if show2dFitsAndResiduals:\n fig2d, axs2d = plt.subplots(*((2,numVariations) if numVariations>1 else (1,2)))\n keyPlt = np.zeros(len(images))\n (totalSignal, hfitCenter, hFitCenterErrs, hSigmas, hSigmaErrs, h_amp, hAmpErrs, vfitCenter, vFitCenterErrs, vSigmas, vSigmaErrs, v_amp, \n vAmpErrs, hSigma2D, hSigma2dErr, vSigma2D, vSigma2dErr) = [np.zeros((len(images), 4)) for _ in range(17)]\n \n for vari, ((keyV,ims), hParamSet, hErr_set, vParamSet, vErr_set, paramSet2D, errSet2D) in enumerate(zip(\n images.items(), *[dic.values() for dic in [hFitParams, hFitErrs, vFitParams, vFitErrs, fitParams2D, fitErrs2D]])):\n for which in range(4):\n if onlyThisPic is None:\n (im, ax, title, min_, max_, hparams, hErrs, vparams, vErrs, param2d, err2d\n ) = [obj[which] for obj in (ims, axs[vari], titles, mins, maxes, hParamSet, hErr_set, vParamSet, vErr_set, paramSet2D, errSet2D)] \n else:\n which = onlyThisPic\n ax = axs.flatten()[vari]\n (im, title, min_, max_, hparams, hErrs, vparams, vErrs, param2d, err2d\n ) = [obj[which] for obj in (ims, titles, mins, maxes, hParamSet, hErr_set, vParamSet, vErr_set, paramSet2D, errSet2D)] \n h_amp[vari][which], hfitCenter[vari][which], hSigmas[vari][which] = hparams[0], hparams[1], hparams[2]*cf*1e6\n hAmpErrs[vari][which], hFitCenterErrs[vari][which], hSigmaErrs[vari][which] = hErrs[0], hErrs[1], hErrs[2]*cf*1e6\n v_amp[vari][which], vfitCenter[vari][which], vSigmas[vari][which] = vparams[0], vparams[1], vparams[2]*cf*1e6\n vAmpErrs[vari][which], vFitCenterErrs[vari][which], vSigmaErrs[vari][which] = vErrs[0], vErrs[1], vErrs[2]*cf*1e6\n hSigma2D[vari][which], hSigma2dErr[vari][which], vSigma2D[vari][which], vSigma2dErr[vari][which] = [\n val*cf*1e6 for val in [param2d[-3], err2d[-3], param2d[-2], err2d[-2]]]\n \n totalSignal[vari][which] = np.sum(im.flatten())\n keyPlt[vari] = keyV\n res = mp.fancyImshow(fig, ax, im, imageArgs={'cmap':dark_viridis_cmap, 'vmin':min_, 'vmax':max_}, \n hFitParams=hparams, vFitParams=vparams, fitModule=fitModule, flipVAx = True, fitParams2D=param2d)\n ax.set_title(keyV + ': ' + str(datalen[keyV]) + ';\\n' + title + ': ' + misc.errString(hSigmas[vari][which],hSigmaErrs[vari][which]) \n + r'$\\mu m$ sigma, ' + misc.round_sig_str(totalSignal[vari][which],5), fontsize=12) \n if show2dFitsAndResiduals:\n X, Y = np.meshgrid(np.arange(len(im[0])), np.arange(len(im)))\n data_fitted = fitF2D((X,Y), *param2d)\n fitProper = data_fitted.reshape(im.shape[0],im.shape[1])\n ax1 = axs2d[0] if numVariations == 1 else axs2d[0,vari]\n ax2 = axs2d[1] if numVariations == 1 else axs2d[1,vari]\n imr = ax1.imshow(fitProper, vmin=min_, vmax=max_)\n mp.addAxColorbar(fig2d, ax1, imr)\n ax1.contour(np.arange(len(im[0])), np.arange(len(im)), fitProper, 4, colors='w', alpha=0.2)\n imr = ax2.imshow(fitProper-im)\n mp.addAxColorbar(fig2d, ax2, imr)\n ax2.contour(np.arange(len(im[0])), np.arange(len(im)), fitProper, 4, colors='w', alpha=0.2)\n if onlyThisPic is not None:\n break\n \n mp.fancyImshow(avgPicFig, avgPicAx, np.mean([img[onlyThisPic] for img in images.values()],axis=0), imageArgs={'cmap':dark_viridis_cmap},flipVAx = True)\n avgPicAx.set_title('Average Over Variations')\n ### Plotting background and photon counted background\n mp.fancyImshow(bgFig, bgAxs[0], bgAvg, imageArgs={'cmap':dark_viridis_cmap},flipVAx = True) \n bgAxs[0].set_title('Background image (' + str(len(picsForBg)/picsPerRep) + ')')\n mp.fancyImshow(bgFig, bgAxs[1], bgPhotonCountImage, imageArgs={'cmap':dark_viridis_cmap},flipVAx = True) \n bgAxs[1].set_title('Photon counted background image (' + str(len(picsForBg)/picsPerRep) + ')')\n fig.subplots_adjust(left=0,right=1,bottom=0.1, hspace=0.2, **({'top': 0.7, 'wspace': 0.4} if (onlyThisPic is None) else {'top': 0.9, 'wspace': 0.3}))\n \n disp.display(fig)\n temps, tempErrs, tempFitVs, = [],[],[]\n if calcTemperature: \n for sigmas, sigmaerrs in zip([hSigmas, vSigmas, hSigma2D, vSigma2D],[hSigmaErrs, vSigmaErrs, hSigma2dErr, vSigma2dErr]):\n mbgSigmas = np.array([elt[2] for elt in sigmas])\n mbgSigmaErrs = np.array([elt[2] for elt in sigmaerrs])\n myGuess = [0.0, min((mbgSigmas)*1e-6), guessTemp]\n temp, fitV, cov = ah.calcBallisticTemperature(keyPlt*1e-3, (mbgSigmas)*1e-6, guess = myGuess, sizeErrors = mbgSigmaErrs)\n error = np.sqrt(np.diag(cov))\n temps.append(temp)\n tempErrs.append(error[2])\n tempFitVs.append(fitV)\n numAxisCol = int(plotSigmas) + int(plotCounts) + int(trackFitCenter)\n if numAxisCol != 0:\n fig2, axs = plt.subplots(1, numAxisCol, figsize = (15, 5)) \n fig2.subplots_adjust(top=0.75, wspace = 0.4)\n colors = ['b','k','c','purple']\n if plotSigmas:\n ax = (axs if numAxisCol == 1 else axs[0]) \n stdStyle = dict(marker='o',linestyle='',capsize=3)\n if onlyThisPic is not None:\n ax.errorbar(keyPlt, hSigmas[:,onlyThisPic], hSigmaErrs[:,onlyThisPic], color=colors[0], label='h '+titles[onlyThisPic], **stdStyle);\n ax.errorbar(keyPlt, hSigma2D[:,onlyThisPic], hSigma2dErr[:,onlyThisPic], color=colors[1], label='2dh '+titles[onlyThisPic], **stdStyle);\n ax.errorbar(keyPlt, vSigmas[:,onlyThisPic], vSigmaErrs[:,onlyThisPic], color=colors[2], label='v '+titles[onlyThisPic], **stdStyle);\n ax.errorbar(keyPlt, vSigma2D[:,onlyThisPic], vSigma2dErr[:,onlyThisPic], color=colors[3], label='2dv '+titles[onlyThisPic], **stdStyle);\n else:\n for whichPic in range(4):\n ax.errorbar(keyPlt, hSigmas[:,whichPic], hSigmaErrs[:,whichPic], color='b', label='h '+titles[whichPic], **stdStyle);\n ax.errorbar(keyPlt, vSigmas[:,whichPic], vSigmaErrs[:,whichPic], color='c', label='v '+titles[whichPic], **stdStyle);\n ax.set_ylim(max(0,ax.get_ylim()[0]),min([ax.get_ylim()[1],5]))\n ax.set_ylabel(r'Fit Sigma ($\\mu m$)')\n \n if calcTemperature:\n # converting time to s, hSigmas in um \n xPoints = np.linspace(min(keyPlt), max(keyPlt))*1e-3\n for num, fitV in enumerate(tempFitVs):\n #ax.plot(xPoints*1e3, LargeBeamMotExpansion.f(xPoints, *myGuess)*1e6, label = 'guess')\n ax.plot(xPoints*1e3, LargeBeamMotExpansion.f(xPoints, *fitV)*1e6, color=colors[num])\n ax.legend()\n\n if plotFitAmps: \n ax = (axs if numAxisCol == 1 else axs[0])\n ampAx = ax.twinx()\n\n if onlyThisPic is not None:\n ampAx.errorbar(keyPlt, h_amp[:,onlyThisPic], hAmpErrs[:,onlyThisPic], label='h '+titles[onlyThisPic], color = 'orange', **stdStyle);\n ampAx.errorbar(keyPlt, v_amp[:,onlyThisPic], vAmpErrs[:,onlyThisPic], label='v '+titles[onlyThisPic], color = 'r', **stdStyle);\n else:\n for whichPic in range(4):\n ampAx.errorbar(keyPlt, h_amp[:,whichPic], hAmpErrs[:,whichPic], label='h '+titles[whichPic], color = 'orange', **stdStyle);\n ampAx.errorbar(keyPlt, v_amp[:,whichPic], vAmpErrs[:,whichPic], label='v '+titles[whichPic], color = 'r', **stdStyle);\n [tick.set_color('red') for tick in ampAx.yaxis.get_ticklines()]\n [tick.set_color('red') for tick in ampAx.yaxis.get_ticklabels()]\n ampAx.set_ylabel(r'Fit h_amps', color = 'r')\n \n hTotalPhotons, vTotalPhotons = None, None\n if plotCounts:\n # numAxCol = 1: ax = axs\n # numAxCol = 2: plotSigmas + plotCounts -- ax = axs[1]\n # numAxCol = 2: plotCounts + trackFitCenter -- ax = axs[0]\n # numAxCol = 3: ax = axs[1]\n if numAxisCol == 1:\n ax = axs\n elif numAxisCol == 2:\n ax = axs[1 if plotSigmas else 0]\n else:\n ax = axs[1]\n # Create axis to plot photon counts\n ax.set_ylabel(r'Integrated signal')\n photon_axis = ax.twinx()\n # This is not currently doing any correct for e.g. the loading rate.\n countToCameraPhotonEM = 0.018577 / (emGainSetting/200) # the float is is EM200. \n countToScatteredPhotonEM = 0.018577 / 0.07 / (emGainSetting/200)\n\n if onlyThisPic is not None:\n # calculate number of photons\n hamp = h_amp[:,onlyThisPic]*len(expansionPics[0][0]) # Horizontal \"un\"normalization for number of columns begin averaged.\n vamp = v_amp[:,onlyThisPic]*len(expansionPics[0]) \n hsigpx = hSigmas[:,onlyThisPic]/(16/64) # Convert from um back to to pixels.\n vsigpx = vSigmas[:,onlyThisPic]/(16/64)\n htotalCountsPerPic = bump.area_under(hamp, hsigpx)\n vtotalCountsPerPic = bump.area_under(vamp, vsigpx)\n hTotalPhotons = countToScatteredPhotonEM*htotalCountsPerPic\n vTotalPhotons = countToScatteredPhotonEM*vtotalCountsPerPic\n ax.plot(keyPlt, totalSignal[:,onlyThisPic], marker='o', linestyle='', label=titles[onlyThisPic]);\n photon_axis.plot(keyPlt, hTotalPhotons, marker='o', linestyle='', color = 'r', label='Horizontal')\n photon_axis.plot(keyPlt, vTotalPhotons, marker='o', linestyle='', color = 'orange', label='Vertical')\n else:\n for whichPic in range(4):\n # See above comments\n amp = h_amp[:,whichPic]*len(expansionPics[0][0]) \n sig = hSigmas[:,whichPic]/(16/64) \n totalCountsPerPic = bump.area_under(amp, sig)\n hTotalPhotons = countToScatteredPhotonEM*totalCountsPerPic\n ax.plot(keyPlt, totalSignal[:,whichPic], marker='o', linestyle='', label=titles[whichPic]);\n photon_axis.plot(keyPlt, hTotalPhotons, marker='o', linestyle='', color = ['red', 'orange', 'yellow', 'pink'][whichPic]) \n ax.legend()\n photon_axis.legend()\n [tick.set_color('red') for tick in photon_axis.yaxis.get_ticklines()]\n [tick.set_color('red') for tick in photon_axis.yaxis.get_ticklabels()]\n photon_axis.set_ylabel(r'Fit-Based Avg Scattered Photon/Img', color = 'r')\n if trackFitCenter:\n #numaxcol = 1: ax = axs\n #numaxcol = 2: trackfitcenter + plothSigmas: ax = axs[1]\n #numaxcol = 2: trackfitcenter + plotCounts: ax = axs[1]\n #numaxcol = 3: ax = axs[2]\n ax = (axs if numAxisCol == 1 else axs[-1])\n if onlyThisPic is not None:\n #ax.errorbar(keyPlt, hfitCenter[:,onlyThisPic], hFitCenterErrs[:,onlyThisPic], marker='o', linestyle='', capsize=3, label=titles[onlyThisPic]);\n ax.errorbar(keyPlt, vfitCenter[:,onlyThisPic], vFitCenterErrs[:,onlyThisPic], marker='o', linestyle='', capsize=3, label=titles[onlyThisPic]);\n #def accel(t, x0, a):\n # return x0 + 0.5*a*t**2\n #accelFit, AccelCov = opt.curve_fit(accel, keyPlt*1e-3, hfitCenter[:,onlyThisPic], sigma = hFitCenterErrs[:,onlyThisPic])\n #fitx = np.linspace(keyPlt[0], keyPlt[-1])*1e-3\n #fity = accel(fitx, *accelFit)\n #ax.plot(fitx*1e3, fity)\n else:\n for whichPic in range(4):\n ax.errorbar(keyPlt, hfitCenter[:,whichPic], hFitCenterErrs[:,whichPic], marker='o', linestyle='', capsize=3, label=titles[whichPic]);\n #accelErr = np.sqrt(np.diag(AccelCov))\n fig2.legend()\n ax.set_ylabel(r'Fit Centers (pix)')\n ax.set_xlabel('time (ms)')\n \n if numAxisCol != 0:\n disp.display(fig2) \n \n if not forceNoAnnotation:\n for fid, isAnnotated in zip(fids, isAnnotatedList):\n if not isAnnotated:\n if type(fid) == int or type(fid) == type(''):\n if newAnnotation or not exp.checkAnnotation(fid, force=False, quiet=True, useBase=useBase):\n exp.annotate(fid, useBase=useBase)\n if clearOutput:\n disp.clear_output()\n if calcTemperature: \n for temp, err, label in zip(temps, tempErrs, ['Hor', 'Vert', 'Hor2D', 'Vert2D']): \n print(label + ' temperature = ' + misc.errString(temp*1e6, err*1e6) + 'uk')\n\n for fid in fids:\n if type(fid) == int:\n expTitle, _, lev = exp.getAnnotation(fid)\n expTitle = ''.join('#' for _ in range(lev)) + ' File ' + str(fid) + ': ' + expTitle\n disp.display(disp.Markdown(expTitle))\n with exp.ExpFile(fid) as file:\n file.get_basic_info()\n if trackFitCenter:\n pass\n #print('Acceleration in Mpix/s^2 = ' + misc.errString(accelFit[1], accelErr[1]))\n if transferAnalysisOpts is not None and showTferAnalysisPlots:\n colors, colors2 = misc.getColors(len(transferAnalysisOpts.initLocs()) + 2)#, cmStr=dataColor)\n pltShape = (transferAnalysisOpts.initLocsIn[-1], transferAnalysisOpts.initLocsIn[-2])\n # mp.plotThresholdHists([initThresholds[0][0],initThresholds[1][0]], colors, shape=pltShape)\n mp.plotThresholdHists([initThresholds[0][0], initThresholds[0][0]], colors, shape=[1,2])\n returnDictionary = {'images':images, 'fits':hFitParams, 'errs':hFitErrs, 'hSigmas':hSigmas, 'sigmaErrors':hSigmaErrs, 'dataKey':keyPlt, \n 'hTotalPhotons':hTotalPhotons, 'tempCalc':temps, 'tempCalcErr':tempErrs, 'initThresholds':initThresholds[0], \n '2DFit':fitParams2D, '2DErr':fitErrs2D, 'bgPics':picsForBg, 'dataLength':datalen}\n if returnPics: \n returnDictionary['pics'] = sortedStackedPics\n return returnDictionary", "def test_full(args, model, device): \n test_path = '../data/full/original/'\n generate_path = '../data/full/generate/'\n test_image_num = len([name for name in os.listdir(test_path)\n if os.path.isfile(os.path.join(test_path, name))])\n\n score_psnr, score_ssim_skimage, score_ssim_minstar, score_msssim_minstar = 0.0, 0.0, 0.0, 0.0\n ind = 0\n for name in os.listdir(test_path):\n if os.path.isfile(os.path.join(test_path, name)):\n ind += 1\n test_original, test_style, image_height, image_width = load_test_dataset(name)\n x = torch.from_numpy(test_original).float()\n y_real = torch.from_numpy(test_style).float()\n x = x.view(image_height, image_width, config.channels).permute(2, 0, 1).to(device)\n y_real = y_real.view(image_height, image_width, config.channels).permute(2, 0, 1).to(device)\n \n y_fake = model.gen_g(x.view(-1, config.channels, image_height, image_width))\n y_fake = y_fake.view(config.channels, image_height, image_width)\n \n # Calculate PSNR & SSIM scores\n score_psnr += psnr_full(y_fake, y_real)\n \n y_fake_np = y_fake.detach().cpu().numpy().transpose(1, 2, 0)\n y_real_np = y_real.cpu().numpy().transpose(1, 2, 0)\n temp_ssim, _ = compare_ssim(y_fake_np, y_real_np, multichannel=True, gaussian_weights=True, full=True)\n score_ssim_skimage += temp_ssim\n \n temp_ssim, _ = ssim(y_fake, y_real, kernel_size=11, kernel_sigma=1.5)\n score_ssim_minstar += temp_ssim\n \n score_msssim_minstar += multi_scale_ssim(y_fake, y_real, kernel_size=11, kernel_sigma=1.5)\n print('PSNR & SSIM scores of {} images are calculated.'.format(ind))\n \n utils.save_image(y_fake, os.path.join(generate_path, '{}-x.jpg'.format(name[:5] + args.model_type)))\n\n score_psnr /= test_image_num\n score_ssim_skimage /= test_image_num\n score_ssim_minstar /= test_image_num\n score_msssim_minstar /= test_image_num\n print('PSNR : {:.4f}, SSIM_skimage : {:.4f}, SSIM_minstar : {:.4f}, SSIM_msssim: {:.4f}'.format(\n score_psnr, score_ssim_skimage, score_ssim_minstar, score_msssim_minstar))", "def science_reduction(input_file):\n #name of the planet\n planet = input_file['exoplanet']\n #set original directory\n original_path = os.getcwd()\n save_path = input_file['save_path']\n data_path = input_file['data_path']\n #Change your directory to data diretory\n os.chdir(data_path)\n #list all flat images\n exoplanet = glob.glob(planet+'*.fits')\n print '\\nLoading exoplanet images \\nTotal of '+planet+'*.fits files = ',len(exoplanet),'\\nFiles = \\n'\n print exoplanet\n #if save_path exist, continue; if not, create.\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n #create a list of bias images and copy images to save_path\n print '\\nCopy science images to save_path directory to main reduction: ....'\n os.system('cp '+planet+'*.fits '+save_path)\n print '\\n .... done. \\n'\n #change to save_path\n os.chdir(save_path)\n #create the names for exoplanet science mages with bias subtracted\n bexoplanet = []\n for i in exoplanet:\n bexoplanet.append('B'+i)\n #verify if previous superbias exist\n if os.path.isfile('B'+i) == True:\n os.system('rm B'+i)\n print '\\n Will be create this images: \\n'\n print bexoplanet\n #exoplanet = string.join(exoplanet,',') #create the list string of exoplanet science images\n #bexoplanet = string.join(bexoplanet,',')#create the list string of bexoplanet science images\n print '\\nSubtracting superbias.fits from all '+planet+'*.fits images ....\\n'\n for i in range(len(exoplanet)):\n iraf.imarith(exoplanet[i],'-','superbias.fits',bexoplanet[i])\n use.update_progress((i+1.)/len(bexoplanet))\n print '\\n.... cleaning '+planet+'*.fits images\\n'\n os.system('rm '+planet+'*.fits')\n print '\\n Statistics of B'+planet+'*.fits images: \\n'\n for i in range(len(bexoplanet)):\n iraf.imstat(bexoplanet[i])\n print '\\nFlatfielding the B'+planet+'*.fits ....\\n'\n #create the names for exoplanet science images with bias subtracted and flatfielding\n abexoplanet = []\n for i in bexoplanet:\n abexoplanet.append('A'+i)\n #verify if previous superbias exist\n if os.path.isfile('A'+i) == True:\n os.system('rm A'+i)\n print '\\n Will be create this images: \\n'\n print abexoplanet\n #flatifielding images\n for i in range(len(abexoplanet)):\n iraf.imarith(bexoplanet[i],'/','superflat.fits',abexoplanet[i])\n use.update_progress((i+1.)/len(abexoplanet))\n # print '\\n.... cleaning B'+planet+'*.fits images\\n'\n # os.system('rm B'+planet+'*.fits')\n print '\\n Statistics of AB'+planet+'*.fits images: \\n'\n for i in range(len(abexoplanet)):\n iraf.imstat(abexoplanet[i])\n os.chdir(original_path) #change to save_path\n return", "def test_synthetic():\n background = Image.new('RGB', (100, 50), (125, 125, 125))\n red = Image.new('RGB', (10, 5), (255, 0, 0))\n green = Image.new('RGB', (5, 5), (0, 255, 0))\n blue = Image.new('RGB', (20, 5), (0, 0, 255))\n positions = [\n [0, 0],\n [9, 5],\n [99, 20]\n ]\n\n parameters = {\n 'data': [background, red, green, blue],\n 'positions': positions\n }\n\n synth = images.synthetic(parameters)\n\n assert_equal(synth.size, (100, 50))\n assert_equal(synth.getpixel((0, 0)), (255, 0, 0, 255))\n # if there was no overwrite of overlapping patches, this should be:\n # assert_equal(synth.getpixel((9, 5)), (255, 255, 0, 255))\n # but since green is pasted last it is:\n assert_equal(synth.getpixel((9, 5)), (0, 255, 0, 255))", "def test_no_shared_transformations():\n sdata = blobs()\n element_name = \"blobs_image\"\n test_space = \"test\"\n set_transformation(sdata.images[element_name], Identity(), to_coordinate_system=test_space)\n\n gen = sdata._gen_elements()\n for _, name, obj in gen:\n if name != element_name:\n assert test_space not in get_transformation(obj, get_all=True)\n else:\n assert test_space in get_transformation(obj, get_all=True)", "def teardown(self):\n self.wf.write_graph(dotfilename = self.test_path / \"wf_diagram\", graph2use=\"orig\")\n self.wf.run()\n \n\n self.helpers.plot_timeseries(\n self.export_path, self.sample_raw_image, \n highlight_ranges=self.highlight_ranges,\n num_figs=1\n )\n\n if self.plot_img:\n self.helpers.plot_4D_img_slice(self.export_path, \"sample_processed.png\")", "def process_tir_image(ds, data_res, t_thresh=-50, min_mcs_size=5000):\n ctt = (ds['tb']).squeeze()-273.15\n min_pix_nb = min_mcs_size / data_res**2\n\n max_pix_nb = 300000 / data_res**2 # this is to capture satellite artefacts that come in large contiguous stripes.\n labels, goodinds = mcs_define(ctt.values, t_thresh, minmax_area=[min_pix_nb, max_pix_nb]) # 7.7x7.7km = 64km2 per pix in gridsat? 83 pix is 5000km2\n dic = dictionary()\n #plt.figure()\n #plt.pcolormesh(labels)\n #plt.colorbar()\n #plt.show()\n for g in goodinds:\n\n if g==0:\n continue\n\n pos = np.where(labels==g)\n npos = np.where(labels!=g)\n datestr = str(int(ctt['time.year'].values))+'-'+str(int(ctt['time.month'].values)).zfill(2)+'-'+str(int(ctt['time.day'].values)).zfill(2)+'_'+\\\n str(int(ctt['time.hour'].values)).zfill(2)+':'+str(int(ctt['time.minute'].values)).zfill(2)\n \n dic['date'].append(datestr)\n dic['month'].append(int(ctt['time.month']))\n dic['hour'].append(int(ctt['time.hour']))\n dic['year'].append(int(ctt['time.year']))\n dic['day'].append(int(ctt['time.day']))\n dic['minute'].append(int(ctt['time.minute']))\n\n storm = ctt.copy()\n storm.values[npos] = np.nan\n tmin_pos = np.nanargmin(storm.values)\n tpos_2d = np.unravel_index(tmin_pos, storm.shape)\n \n latmin = np.nanmin(ctt.lat.values[pos[0]])\n latmax = np.nanmax(ctt.lat.values[pos[0]])\n lonmin = np.nanmin(ctt.lon.values[pos[1]])\n lonmax = np.nanmax(ctt.lon.values[pos[1]])\n dic['area'].append(np.sum(np.isfinite(storm.values))*data_res**2)\n dic['70area'].append(np.sum(storm.values<=-70)*data_res**2)\n dic['minlon'].append(lonmin)\n dic['minlat'].append(latmin)\n dic['maxlon'].append(lonmax)\n dic['maxlat'].append(latmax)\n dic['clon'].append(lonmin + (lonmax - lonmin)/2)\n dic['clat'].append(latmin + (latmax - latmin)/2)\n dic['tmin'].append(np.nanmin(storm))\n dic['tminlat'].append(float(ctt.lat[tpos_2d[0]].values))\n dic['tminlon'].append(float(ctt.lon[tpos_2d[1]].values))\n dic['tmean'].append(float(np.nanmean(storm)))\n dic['tp1'].append(float(np.nanpercentile(storm, 1)))\n dic['tp99'].append(float(np.nanpercentile(storm, 99)))\n dic['stormID'].append(datestr + '_' + str(g))\n dic['cloudMask'].append(labels==g)\n dic['tir'].append(storm.values)\n\n # for k in dic.keys():\n # print(k, len(dic[k]))\n return dic", "def testTrivial(self):\n self.doTest(afwGeom.makeIdentityTransform())", "def testTrivial(self):\n self.doTest(afwGeom.makeIdentityTransform())", "def __init__(self, data_dir, mode='train'):\n self.mode = mode\n self.data_dir = data_dir\n if self.mode == 'train':\n self.img_dir = os.path.join(self.data_dir, 'train')\n self.gt_dir = os.path.join(self.data_dir, 'train_gt')\n elif self.mode == 'test':\n self.img_dir = os.path.join(self.data_dir, 'test')\n self.gt_dir = os.path.join(self.data_dir, 'test_gt')\n\n ''' set up list of filenames for retrieval purposes'''\n self.filenames = [image_basename(f) for f in os.listdir(self.img_dir)]\n self.filenames.sort()\n self.gt_names = [image_basename(f) for f in os.listdir(self.gt_dir)]\n self.gt_names.sort()\n\n ''' set up image transform '''\n if self.mode == 'train':\n self.transform = transforms.Compose([\n transforms.Resize((1024, 1024)),\n # transforms.RandomHorizontalFlip(),\n # transforms.CenterCrop((512, 512)),\n transforms.ToTensor(), # (H,W,C)->(C,H,W), [0,255]->[0, 1.0] RGB->RGB\n # transforms.Normalize(MEAN, STD)\n ])\n self.mask_transform = transforms.Compose([\n # transforms.Pad(padding =50, fill =0),\n transforms.Resize((1024, 1024)),\n \n # transforms.RandomHorizontalFlip(),\n transforms.ToTensor(), # (H,W,C)->(C,H,W), [0,255]->[0, 1.0] RGB->RGB\n ])\n self.gt_transform = transforms.Compose([\n transforms.Resize((1024,1024)),\n transforms.ToTensor(),\n ])\n\n elif self.mode == 'test':\n self.transform = transforms.Compose([\n# transforms.Pad(padding =50, fill =1),\n transforms.Resize((1024, 1024)),\n # /!\\ to remove later\n #transforms.RandomHorizontalFlip(),\n transforms.ToTensor(), # (H,W,C)->(C,H,W), [0,255]->[0, 1.0] RGB->RGB\n # transforms.Normalize(MEAN, STD)\n ])\n self.mask_transform = transforms.Compose([\n # transforms.Pad(padding =50, fill =1),\n transforms.Resize((1024, 1024)), # /!\\ to remove later\n \n # transforms.RandomHorizontalFlip(),\n transforms.ToTensor(), # (H,W,C)->(C,H,W), [0,255]->[0, 1.0] RGB->RGB\n ])\n\n self.gt_transform = transforms.Compose([\n transforms.Resize((1024,1024)),\n transforms.ToTensor()\n ])", "def problem2():\n\n data = loaddata(\"data/bayerdata.npy\")\n r, g, b = separatechannels(data)\n\n img = assembleimage(r, g, b)\n display_image(img)\n\n img_interpolated = interpolate(r, g, b)\n display_image(img_interpolated)", "def main():\n # ------------------------\n # 0 SETUP\n # ------------------------\n log.getLogger().setLevel(log.INFO)\n torch.autograd.set_detect_anomaly(True)\n parser = argparse.ArgumentParser(\n description='Image Style Transfer Training')\n parser = pl.Trainer.add_argparse_args(parser)\n # data\n parser.add_argument('-s', '--style-image', default='cropamara', type=str)\n parser.add_argument('-d', '--dataset',\n default=os.path.join('/', 'fridge', 'coco'), type=str)\n parser.add_argument('-t', '--to-style',\n default=os.path.join('images', 'test'), type=str)\n parser.add_argument('-m', '--model', default='transformer', type=str)\n parser.add_argument('-b', '--batch-size', default=1, type=int)\n parser.add_argument('-lr', '--learning-rate', default=0.001, type=float,\n help='initial learning rate')\n parser.add_argument(\"--image-size\", type=int, default=256,\n help=\"size of training images, default is 256\")\n\n parser.add_argument(\"--seed\", type=int, default=4747,\n help=\"random seed for training\")\n parser.add_argument(\"--content-weight\", type=float, default=1e5,\n help=\"weight for content-loss, default is 1e5\")\n parser.add_argument(\"--style-weight\", type=float, default=1e10,\n help=\"weight for style-loss, default is 1e10\")\n parser.add_argument(\"--weights\", type=str, default='flat',\n help=\"weight for layer losses, default is 1 each\")\n\n parser.add_argument(\"--content-image\", type=str, default='./images/content-images/gbimage2.jpeg',\n help=\"path to content image you want to stylize\")\n parser.add_argument(\"--content-scale\", type=float, default=None,\n help=\"factor for scaling down the content image\")\n parser.add_argument(\"--output-dir\", type=str, default='./images/output-images/',\n help=\"path for saving the output images\")\n\n parser.add_argument(\"-cp\", \"--checkpoint\", type=str, default='',\n help=\"path for starting weights\")\n parser.add_argument(\"--single\", action='store_true')\n\n parser.set_defaults(progress_bar_refresh_rate=5,\n gpus='0,1,2',\n max_epochs=50,\n overfit_pct=0.01,\n profiler=True,\n weights_summary='full',\n logger=False,\n distributed_backend=\"dp\")\n args = parser.parse_args()\n\n # ------------------------\n # 1 INIT LIGHTNING MODEL\n # ------------------------\n model = FastNeuralStyleSystem(args)\n if args.checkpoint is not '':\n print(f'loading checkpoint: {args.checkpoint}')\n FastNeuralStyleSystem.load_from_checkpoint(args.checkpoint)\n print(model.hparams)\n if args.single:\n print('single image optimize')\n model.to('cuda')\n model.prepare_data()\n model.optimize()\n print('Done single image')\n return\n # ------------------------\n # 2 INIT TRAINER\n # ------------------------\n trainer = pl.Trainer.from_argparse_args(args)\n trainer.checkpoint_callback = pl.callbacks.ModelCheckpoint(\n filepath='./trained_models',\n save_top_k=2,\n verbose=True,\n monitor='train_loss',\n mode='min',\n prefix=args.style_image\n )\n # ------------------------\n # 3 START TRAINING\n # ------------------------\n trainer.fit(model)\n\n import glob\n saved_images = glob.glob(\n f\"{args.output_dir}/{args.style_image}_steps_c_{args.content_weight}_s_{args.style_weight}/*png\")\n gif_images = []\n for step_img in saved_images:\n gif_images.append(imageio.imread(step_img))\n imageio.mimsave(os.path.join(temp_dir, '0_optimization.gif'), gif_images)", "def detect(img, template):\r\n\r\n #detect threshold\r\n args = parse_args()\r\n threshold=dictornary(args)\r\n\r\n # detect edges of image\r\n \"\"\"prewitt_x = [[1, 0, -1]] * 3\r\n prewitt_y = [[1] * 3, [0] * 3, [-1] * 3]\r\n img_x = task1.detect_edges(img, prewitt_x, False)\r\n img_y = task1.detect_edges(img, prewitt_y, False)\r\n img_norm = task1.edge_magnitude(img_x, img_y)\r\n\r\n task1.write_image(task1.normalize(img_norm), \".//img_norm.jpg\")\r\n\r\n # detect edges in template\r\n\r\n temp_x = task1.detect_edges(template, prewitt_x, False)\r\n temp_y = task1.detect_edges(template, prewitt_y, False)\r\n template_norm = task1.edge_magnitude(temp_x, temp_y)\r\n\r\n task1.write_image(task1.normalize(template_norm), \".//template_norm.jpg\") \"\"\"\r\n\r\n img_norm = task1.normalize(img)\r\n template_norm = task1.normalize(template)\r\n\r\n coordinates = []\r\n temp_h = len(template_norm)\r\n temp_w = len(template_norm[0])\r\n\r\n rows = len(img_norm)\r\n cols = len(img_norm[0])\r\n\r\n output = [[0 for x in range(len(img_norm[0]))] for y in range(len(img_norm))]\r\n cropped_img = [[0 for x in range(temp_w)] for y in range(temp_h)]\r\n\r\n for i in range(rows):\r\n for j in range(cols):\r\n\r\n if ((i +temp_h) < rows and (j + temp_w < cols)):\r\n cropped_img = utils.crop(img_norm, i, i + temp_h, j, j + temp_w)\r\n\r\n\r\n img_mul_temp = utils.elementwise_mul(cropped_img, template_norm)\r\n sum = 0\r\n # sum of every elemnet in img_mul_temp\r\n for p in range(temp_h):\r\n for q in range(temp_w):\r\n sum += img_mul_temp[p][q]\r\n\r\n # squaring every element in denominator of image\r\n square_img = utils.elementwise_mul(cropped_img, cropped_img)\r\n numsum_img = 0\r\n for d in range(len(cropped_img)):\r\n for e in range(len(cropped_img[0])):\r\n numsum_img += square_img[d][e]\r\n\r\n # squaring every element in denominator of template\r\n square_temp = utils.elementwise_mul(template_norm, template_norm)\r\n numsum_temp = 0\r\n for k in range(temp_h):\r\n for l in range(temp_w):\r\n numsum_temp += square_temp[k][l]\r\n\r\n denominator = np.sqrt((numsum_img * numsum_temp))\r\n\r\n if (denominator != 0):\r\n output[i][j] = (sum / denominator)\r\n if (output[i][j] > threshold):\r\n coordinates.append([i, j])\r\n\r\n # TODO: implement this function.\r\n # raise NotImplementedError\r\n return coordinates", "def test_empty_img():\n assert detected_boxes[-1] == ground_truth_boxes[-1]", "def act(self, x: np.ndarray, t: int = None, noise: np.ndarray = None) -> np.ndarray:", "def main_ededge(dataset):\n Application.set_input_image_folder('TestData/BSR/BSDS500/data/images/' + dataset)\n\n # Application.delete_folder_appl_out()\n # Benchmarking.delete_folder_benchmark_out()\n\n Application.do_get_image_job(port_output_name='RAW')\n Application.do_grayscale_transform_job(port_input_name='RAW', port_output_name='GRAY_RAW')\n blur = Application.do_gaussian_blur_image_job(port_input_name='GRAY_RAW', sigma=0, kernel_size=9)\n\n list_to_eval_edge = []\n\n first_order_edge = [\n CONFIG.FILTERS.PIXEL_DIFF_3x3, CONFIG.FILTERS.PIXEL_DIFF_SEPARATED_3x3\n , CONFIG.FILTERS.PIXEL_DIFF_SEPARATED_5x5, CONFIG.FILTERS.PIXEL_DIFF_SEPARATED_7x7\n , CONFIG.FILTERS.PIXEL_DIFF_5x5, CONFIG.FILTERS.PIXEL_DIFF_7x7\n\n , CONFIG.FILTERS.SOBEL_3x3, CONFIG.FILTERS.SOBEL_5x5, CONFIG.FILTERS.SOBEL_7x7\n , CONFIG.FILTERS.SOBEL_DILATED_5x5, CONFIG.FILTERS.SOBEL_DILATED_7x7\n\n , CONFIG.FILTERS.PREWITT_3x3, CONFIG.FILTERS.PREWITT_5x5, CONFIG.FILTERS.PREWITT_7x7\n , CONFIG.FILTERS.PREWITT_DILATED_5x5, CONFIG.FILTERS.PREWITT_DILATED_7x7\n\n , CONFIG.FILTERS.KIRSCH_3x3, CONFIG.FILTERS.KIRSCH_5x5\n , CONFIG.FILTERS.KIRSCH_DILATED_5x5, CONFIG.FILTERS.KIRSCH_DILATED_7x7\n\n , CONFIG.FILTERS.KITCHEN_MALIN_3x3\n , CONFIG.FILTERS.KITCHEN_MALIN_DILATED_5x5, CONFIG.FILTERS.KITCHEN_MALIN_DILATED_7x7\n\n , CONFIG.FILTERS.KAYYALI_3x3\n , CONFIG.FILTERS.KAYYALI_DILATED_5x5, CONFIG.FILTERS.KAYYALI_DILATED_7x7\n\n , CONFIG.FILTERS.SCHARR_3x3, CONFIG.FILTERS.SCHARR_5x5\n , CONFIG.FILTERS.SCHARR_DILATED_5x5, CONFIG.FILTERS.SCHARR_DILATED_7x7\n\n , CONFIG.FILTERS.KROON_3x3\n , CONFIG.FILTERS.KROON_DILATED_5x5, CONFIG.FILTERS.KROON_DILATED_7x7\n\n , CONFIG.FILTERS.ORHEI_3x3, CONFIG.FILTERS.ORHEI_B_5x5\n , CONFIG.FILTERS.ORHEI_DILATED_5x5, CONFIG.FILTERS.ORHEI_DILATED_7x7\n ]\n\n for edge in first_order_edge:\n for gr_thr in [50]:\n for anc_thr in [10]:\n e1, e2, = Application.do_edge_drawing_mod_job(port_input_name=blur, operator=edge,\n gradient_thr=gr_thr, anchor_thr=anc_thr, scan_interval=1,\n max_edges=100, max_points_edge=100)\n list_to_eval_edge.append(e1 + '_L0')\n\n Application.create_config_file(verbose=False)\n Application.configure_save_pictures(job_name_in_port=False, ports_to_save='ALL')\n # Application.configure_show_pictures(ports_to_show=list_to_save, time_to_show=200)\n\n # Application.run_application()\n\n # Do bsds benchmarking\n # Be ware not to activate job_name_in_port in Application.configure_save_pictures\n # Benchmarking.run_bsds500_boundary_benchmark(input_location='Logs/application_results',\n # gt_location='TestData/BSR/BSDS500/data/groundTruth/' + dataset,\n # raw_image='TestData/BSR/BSDS500/data/images/' + dataset,\n # jobs_set=list_to_eval_edge, do_thinning=False)\n\n Utils.plot_first_cpm_results(prefix='EDGE_DRAWING_MOD_', level='L0', order_by='f1', name='ed_results',\n list_of_data=list_to_eval_edge, number_of_series=50,\n inputs=[''], self_contained_list=True, set_legend_left=False,\n suffix_to_cut_legend='_S_0_GRAY_RAW_L0',\n replace_list=[('EDGE_DRAWING_MOD_THR_50_ANC_THR_10_SCAN_1_', ''),\n ('SEPARATED_PIXEL_DIFFERENCE_', 'Separated Px Dif '),\n ('PIXEL_DIFFERENCE_', 'Pixel Dif '),\n ('PREWITT_', 'Prewitt '), ('KIRSCH_', 'Kirsch '), ('SOBEL_', 'Sobel '),\n ('SCHARR_', 'Scharr '), ('KROON_', 'Kroon '), ('ORHEI_V1_', 'Orhei '),\n ('ORHEI_', 'Orhei '),\n ('KITCHEN_', 'Kitchen '), ('KAYYALI_', 'Kayyali '),\n ('DILATED_', 'dilated '),\n ('_GAUSS_BLUR_K_9', '')],\n save_plot=True, show_plot=False, set_all_to_legend=False)\n\n # Utils.create_latex_cpm_table_list()\n\n Utils.close_files()", "def imageprepare(self, argv):\n im = Image.open(argv).convert('L')\n img = im.resize((28, 28), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n tv = list(img.getdata()) \n \n # normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\n tva = [(255 - x) * 1.0 / 255.0 for x in tv]\n return tva", "def test_without_target(self, X, y):\n try:\n resize_batch(X, y, 1.0, 'crop', resize_targets=False)\n except:\n pytest.fail('apply_progressive_resizing failed with y == None')", "def run(self):\n self.run_tasks()\n self.images = np.array(self.images)\n self.shapes.extend(self.images.shape[-2:])\n\n self.images = np.reshape(self.images, self.shapes)", "def propagateImage(self, dryrun):\n pass", "def process(image):\n pass", "def evaluate(t, x, y):\n from PIL import Image\n im = Image.open(filename)\n duration = im.info[\"duration\"]*pq.ms if im.info[\"duration\"] is not 0 else 30*pq.ms\n\n Nt = t.shape[0]\n Nx = x.shape[2]\n Ny = y.shape[1]\n\n stim = np.zeros([Nt, Ny, Nx])\n t_map = (t.flatten().rescale(\"ms\") / duration).astype(int)\n t_map = t_map[1:] - t_map[:-1]\n for i, ti in enumerate(t_map):\n try:\n im.seek(im.tell()+ti)\n except EOFError:\n break\n frame = im.convert(\"L\").transpose(Image.FLIP_TOP_BOTTOM).resize((Ny, Nx))\n stim[i, :, :] = np.array(frame)\n stim[i, :, :] = 2 * ((stim[i, :, :] - stim[i, :, :].min()) / (stim[i, :, :].max() - stim[i, :, :].min())) - 1\n\n return stim", "def main():\n base_dir = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n os.pardir,\n )\n default_output_path = os.path.join(base_dir, \"output\", \"out.png\")\n default_texture_path = os.path.join(base_dir, \"textures\", \"grid.png\")\n\n default_options = {\n \"resolution\": (1512, 762),\n \"texture_path\": default_texture_path,\n \"output_path\": default_output_path,\n \"iterations\": 200, # Increase this for good results\n \"camera_position\": [3.1, 1.570796, 0.],\n \"num_processes\": multi.cpu_count(),\n \"chunk_size\": 9000,\n \"gain\": 1,\n \"normalize\": 0,\n \"spin\": 0.7,\n }\n args = parse_args(default_options)\n\n output_path = os.path.dirname(args.output_path)\n if not os.path.exists(output_path):\n print(\"Error: Output path does not exist at:\")\n print(args.output_path)\n print(\"Create the directory or change the path then try again.\")\n print_help_and_exit()\n\n\n try:\n texture = spm.imread(args.texture_path)\n except FileNotFoundError as error:\n print(error)\n print(\"Error: Texture file not found at:\")\n print(args.texture_path)\n print_help_and_exit()\n\n # Convert to float to work in linear colour space\n texture = convert_image_to_float(texture)\n if not args.no_srgb:\n # Convert to sRGB before resizing for correct results\n srgbtorgb(texture)\n\n texture = convert_image_to_float(\n spm.imresize(texture, 2.0, interp=\"bicubic\"),\n )\n\n black_hole = KerrBlackHole(args.spin)\n raytracer = KerrRaytracer(\n black_hole,\n args.camera_position,\n texture,\n args.resolution,\n args.iterations,\n args.num_processes,\n args.chunk_size,\n shuffle=not args.disable_shuffle,\n )\n raytracer.generate_image()\n print(\"Raytracing Completed Succesfully.\")\n print(\n \"Total raytracing time:\",\n datetime.timedelta(seconds=(time.time() - raytracer.start_time)),\n )\n\n colour = post_process(raytracer.colour_buffer_preproc, args.gain, args.normalize)\n\n save_to_img(\n colour,\n args.output_path,\n args.resolution,\n srgb_out=not args.no_srgb,\n )", "def main():\n original = SimpleImage('images/mt-rainier.jpg')\n original.show()\n reflected = make_reflected('images/mt-rainier.jpg')\n reflected.show()", "def main():\n\n os.system(\"rm -rf images; mkdir images\")\n\n if (len(sys.argv) > 1):\n N = int(sys.argv[1])\n else:\n N = 10\n\n x_test = np.load(\"../../../../data/mnist/mnist_test_images.npy\")\n\n for i in range(N):\n r,c = random.randint(6,12), random.randint(6,12)\n g = np.zeros(r*c)\n for j in range(r*c):\n if (random.random() < 0.15):\n g[j] = 1\n g = g.reshape((r,c))\n g[:,0] = g[0,:] = g[:,-1] = g[-1,:] = 0\n\n img = np.zeros((28*r,28*c), dtype=\"uint8\")\n for x in range(r):\n for y in range(c):\n if (g[x,y] == 1):\n n = random.randint(0, x_test.shape[0])\n im = x_test[n]\n img[28*x:(28*x+28), 28*y:(28*y+28)] = im\n \n Image.fromarray(img).save(\"images/image_%04d.png\" % i)", "def __call__(self, img: torch.Tensor) -> torch.Tensor:\n return self._trafo(img)", "def sample_damaging(image):\r\n return crease_image(blotch_image(image, 100, True), 10, False)", "def test_one_image(self, img):\n return self.__image_pipeline(img)", "def runauto(self, istart, nrows, rstep):\n self.ImageSolution=self.arcdisplay.autoidentify(istart=istart, nrows=nrows, rstep=rstep, oneline=False)", "def test_image_task_early_fusion(self):\n args = BASE_ARGS.copy()\n args.update(IMAGE_ARGS)\n args.update(EARLY_FUSION_ARGS)\n\n valid, test = testing_utils.train_model(args)\n self.assertLessEqual(\n valid['ppl'], 8.6, 'failed to train image_seq2seq on image task'\n )", "def imageprepare(filename):\n img = Image.open(filename).convert('L')\n rect = img.getbbox()\n im = img.crop(rect)\n im.save(filename + '_pressprocessed.png')\n\n width = float(im.size[0])\n height = float(im.size[1])\n newImage = Image.new('L', (28, 28), (0)) #creates white canvas of 28x28 pixels\n if width > height: #check which dimension is bigger\n #Width is bigger. Width becomes 20 pixels.\n nheight = int(round((20.0/width*height),0)) #resize height according to ratio width\n if (nheight == 0): #rare case but minimum is 1 pixel\n nheight = 1\n # resize and sharpen\n img = im.resize((20,nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n wtop = int(round(((28 - nheight)/2),0)) #caculate horizontal pozition\n newImage.paste(img, (4, wtop)) #paste resized image on white canvas\n newImage.save(filename + '_final.png')\n else:\n #Height is bigger. Heigth becomes 20 pixels. \n nwidth = int(round((20.0/height*width),0)) #resize width according to ratio height\n if (nwidth == 0): #rare case but minimum is 1 pixel\n nwidth = 1\n # resize and sharpen\n img = im.resize((nwidth,20), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n wleft = int(round(((28 - nwidth)/2),0)) #caculate vertical pozition\n newImage.paste(img, (wleft, 4)) #paste resized image on white canvas\n newImage.save(filename + '_final.png')\n tv = list(newImage.getdata()) #get pixel values\n #normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\n tva = [ (x)*1.0/255.0 for x in tv] \n return tva", "def __call__(self, src, label):\n\n h, w, _ = src.shape\n # interp = np.random.randint(0, 5)\n img = timage.resize_short_within(src, self._short, self._max_size, interp=1)\n img, flips = timage.random_flip(img, px=0.5)\n img = img.astype(np.float32)\n\n if self.teacher_aug:\n target_image_1 = self.random_color_aug(img)\n else:\n target_image_1 = img\n target_image_2 = self.random_color_aug(img)\n\n # target_image_1 = mx.nd.image.to_tensor(target_image_1)\n target_image_1 = mx.nd.image.to_tensor(target_image_1)\n target_image_1 = mx.nd.image.normalize(target_image_1, mean=self._mean, std=self._std)\n\n target_image_2 = mx.nd.image.to_tensor(target_image_2)\n target_image_2 = mx.nd.image.normalize(target_image_2, mean=self._mean, std=self._std)\n\n return target_image_1, target_image_2", "def run(self, image):\n start = datetime.datetime.now()\n\n width, height = image.size\n resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)\n target_size = (int(resize_ratio * width), int(resize_ratio * height))\n resized_image = image.convert('RGB').resize(target_size, Image.ANTIALIAS)\n batch_seg_map = self.sess.run(\n self.OUTPUT_TENSOR_NAME,\n feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]})\n seg_map = batch_seg_map[0]\n\n end = datetime.datetime.now()\n\n diff = end - start\n print(\"Time taken to evaluate segmentation is : \" + str(diff))\n\n return resized_image, seg_map", "def resetTransformations():\n dislin.trfres()", "def process_image(self):\n pass", "def imageprepare(argv):\r\n im = Image.open(argv).convert('L')\r\n width = float(im.size[0])\r\n height = float(im.size[1])\r\n newImage = Image.new('L', (28, 28), (255)) #creates white canvas of 28x28 pixels\r\n \r\n if width > height: #check which dimension is bigger\r\n #Width is bigger. Width becomes 20 pixels.\r\n nheight = int(round((20.0/width*height),0)) #resize height according to ratio width\r\n if (nheight == 0): #rare case but minimum is 1 pixel\r\n nheight = 1 \r\n # resize and sharpen\r\n img = im.resize((20,nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\r\n wtop = int(round(((28 - nheight)/2),0)) #caculate horizontal pozition\r\n newImage.paste(img, (4, wtop)) #paste resized image on white canvas\r\n else:\r\n #Height is bigger. Heigth becomes 20 pixels. \r\n nwidth = int(round((20.0/height*width),0)) #resize width according to ratio height\r\n if (nwidth == 0): #rare case but minimum is 1 pixel\r\n nwidth = 1\r\n # resize and sharpen\r\n img = im.resize((nwidth,20), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\r\n wleft = int(round(((28 - nwidth)/2),0)) #caculate vertical pozition\r\n newImage.paste(img, (wleft, 4)) #paste resized image on white canvas\r\n \r\n #newImage.save(\"sample.png\")\r\n\r\n tv = list(newImage.getdata()) #get pixel values\r\n \r\n #normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\r\n tva = [ (255-x)*1.0/255.0 for x in tv] \r\n #print(tva)\r\n return tva", "def main() -> None:\n\n # Define file name\n file_name = define_file()\n\n # Open chosen image\n img = image.load_img(IMAGES + file_name, color_mode='grayscale')\n\n # Show user image\n plt.imshow(img)\n plt.show()\n\n # Convert image to array\n img_arr = image.img_to_array(img)\n img_arr = np.array([img_arr])\n img_arr = img_arr.astype(\"float32\") / 255.0\n\n # Classify image\n img_class = classification(img_arr)\n\n # Suggest user add noise to original image\n if img_class == ORIGINAL:\n while True:\n command = input('Seems like your image is original. Do you want to add noise? y/n: ')\n if command.strip().lower() in ('y', 'yes'):\n noisy_array = noise(img_arr)\n display(img_arr, noisy_array)\n img = image.array_to_img(noisy_array[0])\n img.save(IMAGES + file_name[:-4] + '_noise' + file_name[-4:])\n main()\n elif command.strip().lower() in ('n', 'no'):\n main()\n else:\n continue\n\n # Suggest user remove noise from noised image\n elif img_class == NOISED:\n while True:\n command = input('Seems like your image has noise. Do you want to remove noise? y/n: ')\n if command.strip().lower() in ('y', 'yes'):\n denoise_array = denoise_image(img_arr)\n display(img_arr, denoise_array)\n img = image.array_to_img(denoise_array[0])\n img.save(IMAGES + file_name[:-4] + '_denoised' + file_name[-4:])\n main()\n elif command.strip().lower() in ('n', 'no'):\n main()\n else:\n continue\n\n # Image denoised. Nothing to do\n else:\n print('Seems like your image denoised.')\n main()", "def demo(image, model_class, do_add_noise=True):\n Log.enable_output = True\n Log.set_log_max_depth(8)\n\n image = normalise(image)\n image = numpy.expand_dims(image, axis=0)\n image = numpy.expand_dims(image, axis=0)\n noisy = add_noise(image) if do_add_noise else image\n print(noisy.shape)\n\n # noisy = models.tensor(noisy)\n image = torch.tensor(image)\n\n model = model_class(\n nb_unet_levels=2,\n spacetime_ndim=2,\n )\n\n print(\"training starts\")\n\n start = time.time()\n n2t_train(noisy, model, nb_epochs=128)\n stop = time.time()\n print(f\"Training: elapsed time: {stop - start} \")\n\n noisy = torch.tensor(noisy)\n model.eval()\n model = model.cpu()\n print(f\"noisy tensor shape: {noisy.shape}\")\n # in case of batching we have to do this:\n start = time.time()\n denoised = model(noisy)\n stop = time.time()\n print(f\"inference: elapsed time: {stop - start} \")\n\n noisy = noisy.detach().numpy()[0, 0, :, :]\n image = image.detach().numpy()[0, 0, :, :]\n denoised = denoised.detach().numpy()[0, 0, :, :]\n\n image = numpy.clip(image, 0, 1)\n noisy = numpy.clip(noisy, 0, 1)\n denoised = numpy.clip(denoised, 0, 1)\n\n return calculate_print_psnr_ssim(image, noisy, denoised)\n\n # import napari\n #\n # viewer = napari.Viewer() # no prior setup needed\n # viewer.add_image(image, name='image')\n # viewer.add_image(noisy, name='noisy')\n # viewer.add_image(denoised, name='denoised')\n # napari.run()", "def applyARUNet(self):\n assert(self.files and len(self.files) > 0)\n\n # TODO: move this to the init method\n session_conf = tf.ConfigProto()\n session_conf.gpu_options.visible_device_list = self.gpu_device\n pred = None\n with tf.Session(graph=self.graph, config=session_conf) as sess:\n x = self.graph.get_tensor_by_name('inImg:0')\n predictor = self.graph.get_tensor_by_name('output:0')\n \n progress = getProgressBar()\n for i in progress(range(len(self.files))):\n# print(self.files[i])\n # img: numpy array (height x width x channels)\n # scipy's misc.imread is deprecated\n # TODO: switch maybe to opencv instead of pillow with its image\n # class overhead\n pil_img = Image.open(self.files[i]).convert('L') # grayscale\n img = np.array(pil_img)\n size = (int(img.shape[1]*self.scale),int(img.shape[0]*self.scale))\n small = np.array(pil_img.resize(size, resample=Image.BICUBIC))\n origsize = (img.shape[1],img.shape[0]) \n\n # TODO: can we actually put them in 1 or 2 batches?\n pred1 = self.runSession(sess, x, predictor, small)\n out = self.pruneBaselines(pred1, size=origsize)\n\n # try other orientation\n # TODO: think of a better check! \n # this one depends on the resolution and due to noise might\n # still pass...\n if self.test_orientation and \\\n np.count_nonzero(out) < 100: \n print('WARNING: no baseline found for img:'\n ' {}'.format(self.files[i]))\n print('rotate it now and try again...')\n # rotate 90 degree counter clock-wise\n small2 = rotate(small, 90, True)\n pred2 = self.runSession(sess, x, predictor, small2) \n origsize = (origsize[1],origsize[0])\n out2 = self.pruneBaselines(pred2, size=origsize)\n # check which direction has higher probability\n # Note: unfortunately the probas are similar high for 0 + 180,\n # as well as 90 and 270 degree, so we cannot test for these\n # orientations!\n # Note 2: raw probability map didnt work out for me, so lets do\n # it that way\n n_comp, _, stats1, _ =\\\n cv2.connectedComponentsWithStats(out.astype(np.uint8))\n n_comp2, _, stats2, _ =\\\n cv2.connectedComponentsWithStats(out2.astype(np.uint8))\n # test for area, assumption is that we get larger\n # mean/median/sum area if it's correctly rotated\n # TODO: might still be a bad test due to noise...\n stat1 = np.sum(stats1[1:,cv2.CC_STAT_AREA])\n stat2 = np.sum(stats2[1:,cv2.CC_STAT_AREA])\n if stat2 > stat1: \n print('rotation by 90 degree counter clockwise gives higher'\n ' probability (orig {} vs rot: {}) for file {}\\n'\n ' -> rotate this file (90 degree'\n ' counter clock-wise), too!'.format(stat1, stat2, self.files[i]))\n out = out2\n \n # small check if we have found a line at all\n if np.count_nonzero(out) < 50: # TODO: think of a better check\n print('WARNING: no baseline found for img:'\n ' {}'.format(self.files[i]))\n\n if self.to_line:\n out = baseToLine(out)\n\n # save it\n name = os.path.splitext(os.path.basename(self.files[i]))[0]\n suffix = self.out_suffix if self.out_suffix else ''\n path = os.path.join(self.outdir, '{}{}.png'.format(name,suffix))\n# print('save to: {}'.format(path))\n out = out * 255\n misc.imsave(path, out)\n \n return pred", "def run_image(image_path, lattice_size=35):\n im = plt.imread(image_path)[:, :, 2]\n im_pixels = _pixels(im)\n\n print('compression ratio is ', lattice_size**2 / float(im.size))\n\n # Hyperparameters.\n num_keypoints = 2\n hparams = tfl.CalibratedRtlHParams(\n num_keypoints=num_keypoints,\n num_lattices=1,\n lattice_rank=2,\n learning_rate=0.003,\n lattice_size=lattice_size)\n\n # Estimator.\n # input: coordinate of the pixel\n # output: value of the pixel\n feature_columns = [\n tf.feature_column.numeric_column('pixel_x'),\n tf.feature_column.numeric_column('pixel_y'),\n ]\n\n def keypoints_initializers():\n return tfl.uniform_keypoints_for_signal(\n num_keypoints,\n input_min=0.0,\n input_max=im_pixels.max(),\n output_min=0.0,\n output_max=lattice_size - 1\n )\n rtl_estimator = tfl.calibrated_rtl_regressor(\n feature_columns=feature_columns,\n hparams=hparams,\n keypoints_initializers_fn=keypoints_initializers\n )\n\n # Example input function.\n input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(\n x={\n 'pixel_x': im_pixels[:, 0],\n 'pixel_y': im_pixels[:, 1]\n },\n y=im_pixels[:, 2],\n batch_size=5000,\n num_epochs=15,\n shuffle=True)\n\n # Train!\n rtl_estimator.train(input_fn=input_fn)\n\n # Evaluate!\n eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(\n x={\n 'pixel_x': im_pixels[:, 0],\n 'pixel_y': im_pixels[:, 1]\n },\n y=im_pixels[:, 2],\n batch_size=5000,\n num_epochs=1,\n shuffle=True)\n print(rtl_estimator.evaluate(input_fn=eval_input_fn))\n\n return rtl_estimator", "def demo(image, name):\n\n # Log.set_log_max_depth(5)\n\n image = normalise(image.astype(np.float32))\n # noisy = add_noise(image, intensity=None, variance=0.1, sap=0, clip=False)\n noisy = random_noise(image, mode=\"gaussian\", var=0.1, seed=0, clip=False)\n noisier = random_noise(noisy, mode=\"gaussian\", var=0.1, seed=100, clip=False)\n\n generator = StandardFeatureGenerator(\n include_corner_features=True,\n include_scale_one=True,\n include_fine_features=True,\n include_spatial_features=True,\n )\n regressor = CBRegressor(\n patience=16, loss='l1', learning_rate=0.002, max_num_estimators=4096\n )\n\n it = ImageTranslatorFGR(feature_generator=generator, regressor=regressor)\n\n it.train(noisy, noisy, jinv=True)\n n2s_denoised = it.translate(noisy)\n\n it.exclude_center_feature = False\n it.train(noisier, noisy, jinv=False)\n denoised = it.translate(noisy)\n denoised_corrected = 2 * denoised - noisy\n\n # denoised2 = it.translate(it.translate(it.translate(denoised)))\n denoised2 = it.translate(denoised)\n\n image = numpy.clip(image, 0, 1)\n noisy = numpy.clip(noisy, 0, 1)\n n2s_denoised = numpy.clip(n2s_denoised, 0, 1)\n denoised_corrected = numpy.clip(denoised_corrected, 0, 1)\n denoised2 = numpy.clip(denoised2, 0, 1)\n\n psnr_noisy = psnr(image, noisy)\n ssim_noisy = ssim(image, noisy)\n\n psnr_n2s_denoised = psnr(image, n2s_denoised)\n ssim_n2s_denoised = ssim(image, n2s_denoised)\n\n psnr_denoised = psnr(image, denoised)\n ssim_denoised = ssim(image, denoised)\n\n psnr_denoised_corrected = psnr(image, denoised_corrected)\n ssim_denoised_corrected = ssim(image, denoised_corrected)\n\n psnr_denoised2 = psnr(image, denoised2)\n ssim_denoised2 = ssim(image, denoised2)\n\n print(\"noisy :\", psnr_noisy, ssim_noisy)\n print(\n \"denoised (classic_denoisers) :\",\n psnr_n2s_denoised,\n ssim_n2s_denoised,\n )\n print(\"denoised (noiser2noise) :\", psnr_denoised, ssim_denoised)\n print(\n \"denoised (noiser2noise corrected) :\",\n psnr_denoised_corrected,\n ssim_denoised_corrected,\n )\n print(\"denoised (x2) :\", psnr_denoised2, ssim_denoised2)\n\n Log.enable_output = False\n denoised_images = []\n for i in range(1, 32):\n psnr_denoised = psnr(image, numpy.clip(denoised, 0, 1))\n ssim_denoised = ssim(image, numpy.clip(denoised, 0, 1))\n psnr_sslos = psnr(numpy.clip(n2s_denoised, 0, 1), numpy.clip(denoised, 0, 1))\n ssim_sslos = ssim(numpy.clip(n2s_denoised, 0, 1), numpy.clip(denoised, 0, 1))\n print(f\"x{i} :\", psnr_sslos, ssim_sslos, psnr_denoised, ssim_denoised)\n\n denoised_images.append(numpy.clip(denoised, 0, 1))\n denoised = it.translate(denoised)\n\n import napari\n\n with napari.gui_qt():\n viewer = napari.Viewer()\n viewer.add_image(image, name='image')\n viewer.add_image(noisy, name='noisy')\n viewer.add_image(noisier, name='noisier')\n viewer.add_image(n2s_denoised, name='denoised (classic_denoisers)')\n viewer.add_image(denoised, name='denoised (noiser3noise)')\n viewer.add_image(denoised_corrected, name='denoised (noiser3noise corrected)')\n viewer.add_image(numpy.stack(denoised_images), name=f'denoised images')", "def applyMorphologicalCleaning(self, image):", "def __call__(self, images, targets):\n pass", "def evaluate(t, x, y):\n # TODO: fix normalization\n from PIL import Image\n\n Nt = t.shape[0]\n Nx = x.shape[2]\n Ny = y.shape[1]\n stim = np.zeros([Nt, Nx, Ny])\n\n for i, filename in enumerate(filenames):\n im = Image.open(filename).convert(\"L\").transpose(Image.FLIP_TOP_BOTTOM)\n t_start = delay + i * (delay + duration)\n t_stop = (i+1) * (duration + delay)\n stim += np.array(im.resize((Ny, Nx))) * (heaviside(t - t_start) - heaviside(t - t_stop))\n\n if stim.max() - stim.min() != 0:\n stim = 2 * ((stim - stim.min()) / (stim.max() - stim.min())) - 1\n return stim", "def main():\n\n # first figure: betas for each predictor\n fig, axes = plt.subplots(figsize=(8, 18), nrows=3)\n\n image_paths = {\n \"3T\": (\n f\"{PATHS['figures_misc']}/figure_3_images/\"\n f\"C1051_20161006_childVSall_depth_1.png\"\n ),\n \"7T\": (\n f\"{PATHS['figures_misc']}/figure_3_images/\"\n f\"C1051_20160212_childVSall_depth_1_sim2pt4.png\"\n ),\n \"7T_noise\": (\n f\"{PATHS['figures_misc']}/figure_3_images/\"\n f\"C1051_20160212_childVSall_depth_1_sim2pt4_noise.png\"\n ),\n }\n\n for ax, (_, image_path) in zip(axes, image_paths.items()):\n assert os.path.isfile(image_path)\n img = imread(image_path)\n img[np.where(np.sum(img, axis=2) == 0.0)] = 1.0\n\n ax.imshow(img[200:-100, 50:-50, :])\n ax.axis(\"off\")\n\n savefig(f\"{PATHS['figures']}/figure_3ac.png\")\n plt.close(fig)", "def test_scrubbing_wf_no_insert_na(\n artifact_dir, sample_raw_image, plot_img, request, helpers\n):\n\n test_path = helpers.create_test_dir(artifact_dir, request.node.name)\n scrubbed_path = test_path / \"scrubbed.nii.gz\"\n\n scrub_vector = [0, 1, 0, 0, 0, 0, 1, 0, 0, 0]\n\n wf = build_scrubbing_workflow(\n scrub_vector,\n import_path=sample_raw_image,\n insert_na=False,\n export_path=scrubbed_path,\n base_dir=test_path,\n crashdump_dir=test_path,\n )\n\n wf.write_graph(dotfilename=test_path / \"scrubbed_flow\", graph2use=\"colored\")\n\n wf.run()\n\n helpers.plot_timeseries(scrubbed_path, sample_raw_image)\n\n if plot_img:\n helpers.plot_4D_img_slice(scrubbed_path, \"scrubbed.png\")", "def testDetect(name = \"smokey.gif\", amount = 20):\n image = Image(name)\n print(\"Close the image window to see the transformation\")\n image.draw()\n image2 = detectEdges(image, amount)\n image2.draw()", "def testTinttsysNNSp(self):\n self._runTest('tinttsys', True, self.tsys_funcs.keys(), 'nearest,nearest')", "def main_proc(self, ds=5.0):\n\n # Preprocessing\n # downsampling edge pixels\n pcd_t_ds = self.pcd_t.voxel_down_sample(voxel_size=ds)\n pcd_t_ds, center_t = centering(pcd_t_ds)\n self.result_id = 0\n reg_trans = None\n\n self.pcd_registrated = list() # results of ICP\n for i in range(len(self.pcd_s)):\n self.pcd_s[i].paint_uniform_color([0.0, 0.0, 1.0])\n pcd_s_ds = self.pcd_s[i].voxel_down_sample(voxel_size=ds)\n\n pcd_s_ds, center_s = centering(pcd_s_ds)\n ts_c = np.identity(4)\n ts_c[:3, 3] = -center_s\n tt_c = np.identity(4)\n tt_c[:3, 3] = center_t\n\n # Registration by ICP algorithm\n reg = ICPRegistration(pcd_s_ds, pcd_t_ds)\n reg.set_distance_tolerance(ds * 0.5)\n mse, rt = reg.registration()\n if mse < self.mse:\n self.result_id = i\n print(\"Init:\", self.initial_angles[i], self.mse, \"==>\", mse)\n self.mse = mse\n reg_trans = rt\n TT = np.dot(reg_trans, ts_c)\n self.trans_final = np.dot(tt_c, TT)\n\n # check transformation progress\n \"\"\"\n hoge = copy.deepcopy(pcd_s_ds)\n hoge.paint_uniform_color([1,0,0])\n mesh_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=100., origin=[0.0,0.0,0.0])\n o3d.visualization.draw_geometries( [mesh_frame,hoge, pcd_t_ds], width=640, height=500)\n hoge.transform( rt )\n o3d.visualization.draw_geometries( [mesh_frame,hoge, pcd_t_ds], width=640, height=500)\n \"\"\"\n\n self.pcds = reg.pcds\n self.d = reg.d\n # Get registration result\n # translation[x,y] and rotation\n _, _, rotate = mat2rpy(self.trans_final)\n print(\"Initial angle is:\", self.initial_angles[self.result_id])\n rotate = np.radians(self.initial_angles[self.result_id]) + rotate\n translation = self.trans_final[:2, 3]\n\n # Choose the direction that results in the smaller rotation\n if rotate > tau:\n rotate -= tau\n elif rotate < 0:\n rotate += tau\n\n self.rotate = rotate\n return self.rotate, translation, self.mse", "def test(one=True, training=True, detector_debug=False):\n total = 0\n imgs = []\n if one:\n\n print(\"Detecting:\")\n file_sign = \"./Data/Reglamentarias/STC-RG-3.jpg\"\n sign = cv2.imread(file_sign, 1)\n d = Detector(sign, show=True, debug=detector_debug)\n s, th = d.detect()\n seg = Segmenter(s)\n\n seg.keypoints()\n seg.descriptors()\n res = np.concatenate((seg.origi, seg.th, seg.img, seg.kpimg), axis=1)\n cv2.imshow(\"res\", res)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n if training:\n print (\"Training\")\n for imagePath in paths.list_images(\"./training/PV/\"):\n print (imagePath)\n sign = cv2.imread(imagePath, 1)\n\n seg = Segmenter(sign)\n seg.watershed()\n seg.keypoints()\n res = np.concatenate((seg.origi, seg.th, seg.img, seg.kpimg), axis=1)\n cv2.imshow(\"res\", res)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n else:\n if not one:\n for i in range(1, 90):\n # file = \"./Data/Preventivas/STC-PV-\"+str(i)+\".jpg\"\n file_sign = \"./Data/Reglamentarias/STC-RG-\" + str(i) + \".jpg\"\n # file = \"./Data/Mixtas/STC-MX-\"+ str(i) +\".jpg\"\n sign = cv2.imread(file_sign, 1)\n d = Detector(sign, show=False)\n s, th = d.detect()\n if s is not None:\n total += 1\n imgs.append((i, s, th))\n\n print (\"Detected:\", str(total))\n\n for i in range(1, len(imgs)-1):\n seg = Segmenter(imgs[i][1])\n seg.watershed()\n seg.keypoints()\n res = np.concatenate((seg.origi, seg.th, seg.img, seg.kpimg), axis=1)\n cv2.imshow(\"img\"+str(imgs[i][0]), res)\n print (str(imgs[i][0]))\n\n cv2.waitKey(0)\n cv2.destroyAllWindows()", "def test_image_task(self):\n args = BASE_ARGS.copy()\n args.update(IMAGE_ARGS)\n\n valid, test = testing_utils.train_model(args)\n self.assertLessEqual(\n valid['ppl'], 8.6, 'failed to train image_seq2seq on image task'\n )", "def skeletonize(data,subscriber = 0):\n nx,ny=data.shape\n #zero padding\n image = zeros((nx+2,ny+2),'int16')\n image[:,:] = IP.BACKGROUND_COLOR\n image[1:-1,1:-1]=data\n\n erosionComplete = False\n runs = 0\n erosionComplete = False\n runs = 0\n isCorner = zeros((nx+2,ny+2),'bool')\n while not erosionComplete:\n ruleI = (image == IP.FEATURE_COLOR)\n XFeat, YFeat = ruleI.nonzero()\n numberFeatures = len(XFeat)\n erosedPixels = 0\n if runs == 0:\n progressbar = progress(numberFeatures)\n neighbourhood = zeros((nx+2,ny+2,3),'int16')\n for x,y in zip(XFeat.tolist(),YFeat.tolist()):\n fingerprint = checkNeighbours(image[x-1:x+2,y-1:y+2])\n neighbourhood[x,y,:]=numpy.array(fingerprint)\n\n ruleII = neighbourhood[:,:,1]>=1\n ruleIII = neighbourhood[:,:,0]> 1\n border = (ruleI & ruleII & ruleIII)\n #ruleIV and ruleV\n XBord, YBord = border.nonzero()\n XBord2 = []\n YBord2 = []\n for x,y in zip(XBord.tolist(),YBord.tolist()):\n if checkTransitions(image[x-1:x+2,y-1:y+2]) <= 1 and not isCorner[x,y]:\n image[x,y] = IP.BACKGROUND_COLOR\n erosedPixels += 1\n subscriber %= progressbar.step()\n else:\n XBord2.append(x)\n YBord2.append(y)\n for x,y in zip(XBord2,YBord2):\n if checkTransitions(image[x-1:x+2,y-1:y+2]) <= 1 and not isCorner[x,y]:\n image[x,y] = IP.BACKGROUND_COLOR\n erosedPixels += 1\n subscriber %= progressbar.step()\n if erosedPixels == 0:\n erosionComplete = True\n subscriber %= 100.\n else:\n xCorn, yCorn = (neighbourhood[:,:,2] > 0 ).nonzero()\n for x,y in zip(xCorn.tolist(),yCorn.tolist()):\n if neighbourhood[x,y,2] == 1:\n isCorner[x+1,y-1] = True\n elif neighbourhood[x,y,2] == 2:\n isCorner[x+1,y+1] = True\n elif neighbourhood[x,y,2] == 3:\n isCorner[x-1,y+1] = True\n elif neighbourhood[x,y,2] == 4:\n isCorner[x-1,y-1] = True\n runs += 1\n return image[1:-1,1:-1].copy()", "def run_visualization(image):\n # for image in images:\n try:\n with tf.gfile.FastGFile(image, 'rb') as f:\n jpeg_str = f.read()\n original_im = Image.open(BytesIO(jpeg_str))\n except IOError:\n print('Cannot retrieve image')\n return\n\n # print('running deeplab on image {0}'.format(image))\n resized_im, seg_map = MODEL.run(original_im)\n seg_map = seg_map.astype(np.uint8) * 255\n resized_im = np.array(resized_im, dtype=np.uint8)\n resized_im = cv2.cvtColor(resized_im, cv2.COLOR_BGR2RGB)\n # vis_segmentation(resized_im, seg_map,FULL_COLOR_MAP ,LABEL_NAMES)\n overlay_image = cv2.addWeighted(resized_im, 0.8, cv2.merge((seg_map * 0, seg_map, seg_map * 0)), 0.2, 0)\n # time.sleep(params.SEC_BETWEEN_PREDICTION)\n\n return resized_im, seg_map, overlay_image.astype(np.uint8)", "def testPosterize(name = \"smokey.gif\", triple = (0,0,0)):\n image = Image(name)\n print(\"Close the image window to see the transformation\")\n image.draw()\n posterize(image, triple)\n image.draw()", "def transform(self, previousimage):", "def main():\n print_banner()\n params = read_steering()\n s, x, y, cur, theta = build_kinoshita()\n s, x, y, cur, theta = read_centerline(s, x, y, cur, theta)\n s, x, y, cur, theta = extend_centerline(s, x, y, cur, theta)\n for t in range(TSTEPS+1):\n cur, theta = tan2curv(s, x, y)\n cur_ori = np.copy(cur)\n cur = filter_curvature(cur, t)\n cur_flt = np.copy(cur)\n cur = lag(s, cur, t)\n cur_lag = np.copy(cur)\n beck_bed = build_beck(cur, s, t)\n allxyz = offset_all(x, y, beck_bed, t)\n if t == 0:\n write_xyz_file(allxyz)\n write_mesh_file(allxyz, beck_bed)\n oxbowxList, oxbowyList = [], []\n centerlinexList, centerlineyList = [], []\n if np.mod(t, GPRINT) == 0:\n centerlinexList.append(x)\n centerlineyList.append(y)\n mf.make_figure(x, y, allxyz, cur_ori, cur_flt, cur_lag, s, beck_bed,\n params, t, oxbowxList, oxbowyList, centerlinexList, centerlineyList)\n if t == TSTEPS:\n break\n s, x, y = migration(s, x, y, cur_flt, cur_lag, theta, t)\n s, x, y, oxbowx, oxbowy, found_cutoff = cutoff(s, x, y)\n s, x, y = smooth_centerline(x, y)\n s, x, y, cur, theta = resample_centerline(s, x, y)\n if found_cutoff:\n oxbowxList.append(oxbowx)\n oxbowyList.append(oxbowy)\n make_gif()\n job_done()", "def train(args):\n # Create the data loader\n loader = sunnerData.DataLoader(\n dataset = sunnerData.ImageDataset(\n root = [[args.train]],\n transforms = transforms.Compose([\n \n# transforms.RandomCrop(720,720)\n# transforms.RandomRotation(45)\n# transforms.RandomHorizontalFlip(), \n# transforms.ColorJitter(brightness=0.5, contrast=0.5),\n \n\n sunnerTransforms.Resize(output_size = (args.H, args.W)),\n #transforms.RandomCrop(512,512)\n sunnerTransforms.ToTensor(),\n sunnerTransforms.ToFloat(),\n # sunnerTransforms.Transpose(),\n sunnerTransforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),\n ])\n ), batch_size = args.batch_size, shuffle = True, num_workers = 2\n )\n loader = sunnerData.IterationLoader(loader, max_iter = args.n_iter)\n\n # Create the model\n model = GANomaly2D(r = args.r, device = args.device)\n model.IO(args.resume, direction = 'load')\n model.train()\n \n # Train!\n bar = tqdm(loader)\n for i, (normal_img,) in enumerate(bar):\n model.forward(normal_img)\n model.backward()\n loss_G, loss_D = model.getLoss()\n bar.set_description(\"Loss_G: \" + str(loss_G) + \" loss_D: \" + str(loss_D))\n bar.refresh()\n if i % args.record_iter == 0:\n model.eval()\n with torch.no_grad():\n z, z_ = model.forward(normal_img)\n img, img_ = model.getImg()\n visualizeEncoderDecoder(img, img_, z, z_,i)\n model.train()\n model.IO(args.det, direction = 'save')\n model.IO(args.det, direction = 'save')", "def tessellate(self):\n\n self.tessellation = Delaunay(self.grid)", "def test(self):\n # Load the trained models.\n self.train_ev_ea()\n self.restore_model(self.test_iters)\n self.encoder_v.eval()\n # Set data loader.\n data_loader = self.data_loader\n empty = torch.FloatTensor(1, 3,self.image_size,self.image_size).to(self.device) \n empty.fill_(1)\n noise = torch.FloatTensor(self.batch_size, self.nz_num)\n noise = noise.to(self.device)\n step = 0\n data_loader.test.reinitialize_index()\n with torch.no_grad():\n while True:\n try:\n x_real, wrong_images, attributes, _, label_org = data_loader.test.next_batch_test(self.batch_size,10)\n except:\n break\n x_real = x_real.to(self.device) \n label_org = label_org.to(self.device)\n attributes = attributes.to(self.device)\n \n \n ev_x = self.encoder_v(x_real)\n noise.normal_(0, 1)\n ea_a = self.encoder_a(attributes, noise)\n \n out_A2B_results = [empty]\n out_A2B_results_a = [empty]\n\n for idx1 in range(label_org.size(0)):\n out_A2B_results.append(x_real[idx1:idx1+1])\n out_A2B_results_a.append(x_real[idx1:idx1+1])\n\n for idx2 in range(label_org.size(0)):\n out_A2B_results.append(x_real[idx2:idx2+1])\n out_A2B_results_a.append(x_real[idx2:idx2+1])\n \n for idx1 in range(label_org.size(0)):\n x_fake = self.decoder(self.encoder(x_real[idx2:idx2+1]), ev_x[idx1:idx1+1])\n out_A2B_results.append(x_fake)\n \n x_fake_a = self.decoder(self.encoder(x_real[idx2:idx2+1]), ea_a[idx1:idx1+1])\n out_A2B_results_a.append(x_fake_a)\n results_concat = torch.cat(out_A2B_results)\n x_AB_results_path = os.path.join(self.result_dir, '{}_x_AB_results_test_v.jpg'.format(step+1)) \n save_image(self.denorm(results_concat.data.cpu()), x_AB_results_path, nrow=label_org.size(0)+1,padding=0)\n print('Saved real and fake images into {}...'.format(x_AB_results_path))\n \n results_concat = torch.cat(out_A2B_results_a)\n x_AB_results_path = os.path.join(self.result_dir, '{}_x_AB_results_test_a.jpg'.format(step+1)) \n save_image(self.denorm(results_concat.data.cpu()), x_AB_results_path, nrow=label_org.size(0)+1,padding=0)\n print('Saved real and fake images into {}...'.format(x_AB_results_path))\n \n step += 1", "def detectSpots(img, detectSpotsParameter = None, correctIlluminationParameter = None, removeBackgroundParameter = None,\n filterDoGParameter = None, findExtendedMaximaParameter = None, detectCellShapeParameter = None,\n verbose = False, out = sys.stdout, **parameter):\n\n timer = Timer();\n \n # normalize data -> to check\n #img = img.astype('float');\n #dmax = 0.075 * 65535;\n #ids = img > dmax;\n #img[ids] = dmax;\n #img /= dmax; \n #out.write(timer.elapsedTime(head = 'Normalization'));\n #img = dataset[600:1000,1600:1800,800:830];\n #img = dataset[600:1000,:,800:830];\n \n # correct illumination\n correctIlluminationParameter = getParameter(detectSpotsParameter, \"correctIlluminationParameter\", correctIlluminationParameter);\n img1 = img.copy();\n img1 = correctIllumination(img1, correctIlluminationParameter = correctIlluminationParameter, verbose = verbose, out = out, **parameter) \n\n # background subtraction in each slice\n #img2 = img.copy();\n removeBackgroundParameter = getParameter(detectSpotsParameter, \"removeBackgroundParameter\", removeBackgroundParameter);\n img2 = removeBackground(img1, removeBackgroundParameter = removeBackgroundParameter, verbose = verbose, out = out, **parameter) \n \n # mask\n #timer.reset();\n #if mask == None: #explicit mask\n # mask = img > 0.01;\n # mask = binary_opening(mask, self.structureELement('Disk', (3,3,3)));\n #img[img < 0.01] = 0; # masking in place # extended maxima\n #out.write(timer.elapsedTime(head = 'Mask')); \n \n #DoG filter\n filterDoGParameter = getParameter(detectSpotsParameter, \"filterDoGParameter\", filterDoGParameter);\n dogSize = getParameter(filterDoGParameter, \"size\", None);\n #img3 = img2.copy(); \n img3 = filterDoG(img2, filterDoGParameter = filterDoGParameter, verbose = verbose, out = out, **parameter);\n \n # normalize \n # imax = img.max();\n # if imax == 0:\n # imax = 1;\n # img /= imax;\n \n # extended maxima\n findExtendedMaximaParameter = getParameter(detectSpotsParameter, \"findExtendedMaximaParameter\", findExtendedMaximaParameter);\n hMax = getParameter(findExtendedMaximaParameter, \"hMax\", None);\n imgmax = findExtendedMaxima(img3, findExtendedMaximaParameter = findExtendedMaximaParameter, verbose = verbose, out = out, **parameter);\n \n #center of maxima\n if not hMax is None:\n centers = findCenterOfMaxima(img, imgmax, verbose = verbose, out = out, **parameter);\n else:\n centers = findPixelCoordinates(imgmax, verbose = verbose, out = out, **parameter);\n \n #cell size detection\n detectCellShapeParameter = getParameter(detectSpotsParameter, \"detectCellShapeParameter\", detectCellShapeParameter);\n cellShapeThreshold = getParameter(detectCellShapeParameter, \"threshold\", None);\n if not cellShapeThreshold is None:\n \n # cell shape via watershed\n imgshape = detectCellShape(img2, centers, detectCellShapeParameter = detectCellShapeParameter, verbose = verbose, out = out, **parameter);\n \n #size of cells \n csize = findCellSize(imgshape, maxLabel = centers.shape[0], out = out, **parameter);\n \n #intensity of cells\n cintensity = findCellIntensity(img, imgshape, maxLabel = centers.shape[0], verbose = verbose, out = out, **parameter);\n\n #intensity of cells in background image\n cintensity2 = findCellIntensity(img2, imgshape, maxLabel = centers.shape[0], verbose = verbose, out = out, **parameter);\n \n #intensity of cells in dog filtered image\n if dogSize is None:\n cintensity3 = cintensity2;\n else:\n cintensity3 = findCellIntensity(img3, imgshape, maxLabel = centers.shape[0], verbose = verbose, out = out, **parameter);\n \n if verbose:\n out.write(timer.elapsedTime(head = 'Spot Detection') + '\\n');\n \n #remove cell;s of size 0\n idz = csize > 0;\n \n return ( centers[idz], numpy.vstack((cintensity[idz], cintensity3[idz], cintensity2[idz], csize[idz])).transpose()); \n \n \n else:\n #intensity of cells\n cintensity = findIntensity(img, centers, verbose = verbose, out = out, **parameter);\n\n #intensity of cells in background image\n cintensity2 = findIntensity(img2, centers, verbose = verbose, out = out, **parameter);\n \n #intensity of cells in dog filtered image\n if dogSize is None:\n cintensity3 = cintensity2;\n else:\n cintensity3 = findIntensity(img3, centers, verbose = verbose, out = out, **parameter);\n\n if verbose:\n out.write(timer.elapsedTime(head = 'Spot Detection') + '\\n');\n \n return ( centers, numpy.vstack((cintensity, cintensity3, cintensity2)).transpose());", "def test_time_optimize(args, model, optim, imgs, poses, hwf, bound):\n pixels = imgs.reshape(-1, 3)\n\n rays_o, rays_d = get_rays_shapenet(hwf, poses)\n rays_o, rays_d = rays_o.reshape(-1, 3), rays_d.reshape(-1, 3)\n\n num_rays = rays_d.shape[0]\n for step in range(args.tto_steps):\n indices = torch.randint(num_rays, size=[args.tto_batchsize])\n raybatch_o, raybatch_d = rays_o[indices], rays_d[indices]\n pixelbatch = pixels[indices] \n t_vals, xyz = sample_points(raybatch_o, raybatch_d, bound[0], bound[1],\n args.num_samples, perturb=True)\n \n optim.zero_grad()\n rgbs, sigmas = model(xyz)\n colors = volume_render(rgbs, sigmas, t_vals, white_bkgd=True)\n loss = F.mse_loss(colors, pixelbatch)\n loss.backward()\n optim.step()", "def test_random_single_image():\n\n shap.image_plot(np.random.randn(3, 20, 20), np.random.randn(3, 20, 20), show=False)", "def testComputeImage(self):\n for fiberId in self.detMap.fiberId:\n for fraction in (0.1, 0.5, 0.9):\n yy = self.synthConfig.height*fraction\n if yy == int(yy):\n # Ensure we have a non-integer pixel position,\n # so computeImage and computeKernelImage differ\n yy += 0.5\n wavelength = self.detMap.findWavelength(fiberId, yy)\n image = self.psf.computeImage(fiberId, wavelength)\n kernel = self.psf.computeKernelImage(fiberId, wavelength)\n\n # Image should have xy0 set somewhere in the middle of the larger image\n self.assertNotEqual(image.getX0(), 0)\n self.assertNotEqual(image.getY0(), 0)\n\n # Kernel should have xy0 set to the half-size\n halfSize = (self.size - 1)//2\n self.assertEqual(kernel.getX0(), -halfSize)\n self.assertEqual(kernel.getY0(), -halfSize)\n\n # Centroid on image should be at the point of interest\n xx, yy = self.detMap.findPoint(fiberId, wavelength)\n centroid = calculateCentroid(image)\n self.assertFloatsAlmostEqual(xx, centroid.x, atol=2.0e-2)\n self.assertFloatsAlmostEqual(yy, centroid.y, atol=2.0e-2)\n\n # Centroid on kernel should be zero\n centroid = calculateCentroid(kernel)\n self.assertFloatsAlmostEqual(centroid.x, 0.0, atol=1.0e-7)\n self.assertFloatsAlmostEqual(centroid.y, 0.0, atol=1.0e-7)", "def run_predictive(op) -> None:\n\n try:\n img = Image.open(op['input'])\n except Exception as e:\n print(e)\n sys.exit(1)\n\n algo.predictive.run(op)", "def test_replace_image(self):\n pass", "def test_resize_noop(self, X, y, mode):\n Xc, _ = resize_batch(X, y, 1.0, mode, resize_targets=False)\n assert X is Xc", "def run(self, image):\n # width, height = image.size\n # resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)\n # target_size = (int(resize_ratio * width), int(resize_ratio * height))\n target_size = (self.INPUT_SIZE, self.INPUT_SIZE)\n resized_image = image.convert('RGB').resize(target_size, Image.ANTIALIAS)\n net_image = resized_image\n if params.HZ_preprocess_activate:\n net_image = params.image_preprocess_func(resized_image)\n net_image = np.expand_dims(net_image, axis=-1)\n batch_seg_map = self.sess.run(\n self.OUTPUT_TENSOR_NAME,\n feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(net_image)]})\n seg_map = batch_seg_map[0]\n return resized_image, seg_map", "def test_transformer2d_single_step_e2e(self):\n\n problem_object = allen_brain.Img2imgAllenBrainDim8to32()\n\n with TemporaryDirectory() as tmp_dir:\n\n mock_raw_data(tmp_dir, raw_dim=256, num_images=100)\n\n with TemporaryDirectory() as data_dir:\n\n problem_object.generate_data(data_dir, tmp_dir)\n\n input_xy_dim = problem_object.input_dim\n target_xy_dim = problem_object.output_dim\n num_channels = problem_object.num_channels\n\n hparams = image_transformer_2d.img2img_transformer2d_tiny()\n hparams.data_dir = data_dir\n\n p_hparams = problem_object.get_hparams(hparams)\n\n model = image_transformer_2d.Img2imgTransformer(\n hparams, tf.estimator.ModeKeys.TRAIN, p_hparams\n )\n\n @tfe.implicit_value_and_gradients\n def loss_fn(features):\n _, losses = model(features)\n return losses[\"training\"]\n\n batch_size = 1\n train_dataset = problem_object.dataset(Modes.TRAIN, data_dir)\n train_dataset = train_dataset.repeat(None).batch(batch_size)\n\n optimizer = tf.train.AdamOptimizer()\n\n example = tfe.Iterator(train_dataset).next()\n example[\"targets\"] = tf.reshape(example[\"targets\"],\n [batch_size,\n target_xy_dim,\n target_xy_dim,\n num_channels])\n _, gv = loss_fn(example)\n optimizer.apply_gradients(gv)\n\n model.set_mode(Modes.EVAL)\n dataset = problem_object.dataset(Modes.EVAL, data_dir)\n\n example = tfe.Iterator(dataset).next()\n example[\"inputs\"] = tf.reshape(example[\"inputs\"],\n [1,\n input_xy_dim,\n input_xy_dim,\n num_channels])\n example[\"targets\"] = tf.reshape(example[\"targets\"],\n [1,\n target_xy_dim,\n target_xy_dim,\n num_channels])\n\n predictions, _ = model(example)\n\n self.assertEqual(predictions.numpy().shape,\n (1,\n target_xy_dim,\n target_xy_dim,\n num_channels,\n 256))", "def preprocessing(image, smooth_size, folder):\n from skimage.restoration import denoise_tv_chambolle\n \n dim = int(image.shape[0] / 50.)\n smoothed = rank.median(image, disk(smooth_size))\n #smoothed = denoise_tv_chambolle(image, weight=0.002)\n smoothed = rank.enhance_contrast(smoothed, disk(smooth_size))\n \n pl.subplot(2, 3, 1)\n pl.title(\"after median\")\n pl.imshow(smoothed)\n pl.gray()\n # If after smoothing the \"dot\" disappears\n # use the image value\n \n # TODO: wat do with thresh?\n try:\n im_max = smoothed.max()\n thresh = threshold_otsu(image)\n except:\n im_max = image.max()\n thresh = threshold_otsu(image)\n\n \n if im_max < thresh:\n labeled = np.zeros(smoothed.shape, dtype=np.int32)\n \n else:\n binary = smoothed > thresh\n \n # TODO: this array size is the fault of errors\n bin_open = binary_opening(binary, np.ones((dim, dim)), iterations=5)\n bin_close = binary_closing(bin_open, np.ones((5,5)), iterations=5)\n \n pl.subplot(2, 3, 2)\n pl.title(\"threshold\")\n pl.imshow(binary, interpolation='nearest')\n pl.subplot(2, 3, 3)\n pl.title(\"opening\")\n pl.imshow(bin_open, interpolation='nearest')\n pl.subplot(2, 3, 4)\n pl.title(\"closing\")\n pl.imshow(bin_close, interpolation='nearest')\n \n distance = ndimage.distance_transform_edt(bin_open)\n local_maxi = peak_local_max(distance,\n indices=False, labels=bin_open)\n \n markers = ndimage.label(local_maxi)[0]\n \n labeled = watershed(-distance, markers, mask=bin_open)\n pl.subplot(2, 3, 5)\n pl.title(\"label\")\n pl.imshow(labeled)\n #pl.show()\n pl.savefig(folder)\n pl.close('all')\n\n #misc.imsave(folder, labeled)\n# labels_rw = random_walker(bin_close, markers, mode='cg_mg')\n# \n# pl.imshow(labels_rw, interpolation='nearest')\n# pl.show()\n\n return labeled", "def test_full_setup(n):\n for x in range(n):\n for y in range(n):\n Stitch(x,y)\n Stitch.stitches[(x,y)].vital = True if round(rnd.random()) == 1 else False", "def setup():\n img = Image.new('RGB', (10, 20))\n img.putpixel((5, 10), (0, 255, 0))\n img.save('green-dot.tif')\n img.save('green-dot.jpg')\n img.save('green-dot.png')", "def imageprepare(argv):\n im = Image.open(argv).convert('L')\n width = float(im.size[0])\n height = float(im.size[1])\n newImage = Image.new('L', (28, 28), (255)) # creates white canvas of 28x28 pixels\n\n if width > height: # check which dimension is bigger\n # Width is bigger. Width becomes 20 pixels.\n nheight = int(round((20.0 / width * height), 0)) # resize height according to ratio width\n if (nheight == 0): # rare case but minimum is 1 pixel\n nheight = 1\n # resize and sharpen\n img = im.resize((20, nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n wtop = int(round(((28 - nheight) / 2), 0)) # calculate horizontal position\n newImage.paste(img, (4, wtop)) # paste resized image on white canvas\n else:\n # Height is bigger. Heigth becomes 20 pixels.\n nwidth = int(round((20.0 / height * width), 0)) # resize width according to ratio height\n if (nwidth == 0): # rare case but minimum is 1 pixel\n nwidth = 1\n # resize and sharpen\n img = im.resize((nwidth, 20), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n wleft = int(round(((28 - nwidth) / 2), 0)) # caculate vertical pozition\n newImage.paste(img, (wleft, 4)) # paste resized image on white canvas\n\n # newImage.save(\"sample.png\n\n tv = list(newImage.getdata()) # get pixel values\n\n # normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\n tva = [(255 - x) * 1.0 / 255.0 for x in tv]\n print(tva)\n return tva", "def test_synthetic_auto():\n background = Image.new('RGB', (7, 3), (125, 125, 125))\n red = Image.new('RGB', (1, 1), (255, 0, 0))\n green = Image.new('RGB', (1, 1), (0, 255, 0))\n blue = Image.new('RGB', (1, 1), (0, 0, 255))\n\n parameters = {\n 'data': [background, red, green, blue],\n 'positions': 'auto'\n }\n\n synth = images.synthetic(parameters)\n\n assert_equal(synth.size, (7, 3))\n assert_equal(synth.getpixel((1, 1)), (255, 0, 0, 255))\n assert_equal(synth.getpixel((3, 1)), (0, 255, 0, 255))\n assert_equal(synth.getpixel((5, 1)), (0, 0, 255, 255))", "def run(self, image):\n start = datetime.datetime.now()\n\n width, height = image.size\n resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)\n target_size = (int(resize_ratio * width), int(resize_ratio * height))\n resized_image = image.convert('RGB').resize(target_size, Image.ANTIALIAS)\n batch_seg_map = self.sess.run(\n self.OUTPUT_TENSOR_NAME,\n feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]})\n seg_map = batch_seg_map[0]\n\n end = datetime.datetime.now()\n\n diff = end - start\n print(\"Time taken to evaluate segmentation is : \" + str(diff))\n\n return resized_image, seg_map", "def getLoader(s_image_dir,c_image_dir, \n style_selected_dir, content_selected_dir,\n crop_size=178, batch_size=16, num_workers=8, \n colorJitterEnable=True, colorConfig={\"brightness\":0.05,\"contrast\":0.05,\"saturation\":0.05,\"hue\":0.05}):\n s_transforms = []\n c_transforms = []\n \n s_transforms.append(StyleResize())\n # s_transforms.append(T.Resize(900))\n c_transforms.append(T.Resize(900))\n\n s_transforms.append(T.RandomCrop(crop_size,pad_if_needed=True,padding_mode='reflect'))\n c_transforms.append(T.RandomCrop(crop_size))\n\n s_transforms.append(T.RandomHorizontalFlip())\n c_transforms.append(T.RandomHorizontalFlip())\n \n s_transforms.append(T.RandomVerticalFlip())\n c_transforms.append(T.RandomVerticalFlip())\n\n if colorJitterEnable:\n if colorConfig is not None:\n print(\"Enable color jitter!\")\n colorBrightness = colorConfig[\"brightness\"]\n colorContrast = colorConfig[\"contrast\"]\n colorSaturation = colorConfig[\"saturation\"]\n colorHue = (-colorConfig[\"hue\"],colorConfig[\"hue\"])\n s_transforms.append(T.ColorJitter(brightness=colorBrightness,\\\n contrast=colorContrast,saturation=colorSaturation, hue=colorHue))\n c_transforms.append(T.ColorJitter(brightness=colorBrightness,\\\n contrast=colorContrast,saturation=colorSaturation, hue=colorHue))\n s_transforms.append(T.ToTensor())\n c_transforms.append(T.ToTensor())\n\n s_transforms.append(T.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)))\n c_transforms.append(T.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)))\n \n s_transforms = T.Compose(s_transforms)\n c_transforms = T.Compose(c_transforms)\n\n content_dataset = TotalDataset(c_image_dir,s_image_dir, content_selected_dir, style_selected_dir\n , c_transforms,s_transforms)\n content_data_loader = data.DataLoader(dataset=content_dataset,batch_size=batch_size,\n drop_last=True,shuffle=True,num_workers=num_workers,pin_memory=True)\n prefetcher = data_prefetcher(content_data_loader)\n return prefetcher" ]
[ "0.61748016", "0.5833619", "0.57900006", "0.57824117", "0.5640208", "0.5631252", "0.56215304", "0.56194323", "0.5608988", "0.56024307", "0.5585959", "0.55422133", "0.5494821", "0.54943854", "0.54853487", "0.5449842", "0.54339325", "0.54300725", "0.54207504", "0.5395208", "0.538505", "0.53806543", "0.5361486", "0.53534174", "0.5348697", "0.53351843", "0.53295827", "0.5322457", "0.5313348", "0.52960414", "0.5295383", "0.5285343", "0.5274901", "0.5271447", "0.5271447", "0.5269739", "0.5256925", "0.5256266", "0.52546185", "0.5249764", "0.5249196", "0.5240734", "0.5236575", "0.5235166", "0.5230899", "0.5228465", "0.5226331", "0.52251244", "0.52250826", "0.5218824", "0.52164507", "0.5211714", "0.5204908", "0.52027106", "0.51977015", "0.51957864", "0.51860875", "0.5173146", "0.5164221", "0.5163691", "0.5162913", "0.51616305", "0.5161485", "0.5161151", "0.51566887", "0.5154892", "0.5154867", "0.51516825", "0.51494014", "0.5148976", "0.5145885", "0.51458055", "0.514479", "0.5141174", "0.5138843", "0.5130859", "0.512487", "0.51209754", "0.5113267", "0.5104555", "0.51021266", "0.509995", "0.50962406", "0.50937897", "0.5087318", "0.5085369", "0.5083509", "0.5082485", "0.5080704", "0.5080675", "0.50804096", "0.50800014", "0.5078223", "0.5074249", "0.5071642", "0.50707865", "0.50585204", "0.5057812", "0.5054274", "0.50504553", "0.50470585" ]
0.0
-1
run the tesselation on a empty image
def test_on_map_of_constants(synthetic_checkerboard): img = synthetic_checkerboard['img'] di = synthetic_checkerboard['cdi'] cpp_vorimg = tess.tessellate_labimg(img,di) py_vorimg = pytess.tessellate_labimg(img,di) assert np.alltrue(py_vorimg[:4,:4] == 1) printers.store_ndarray("py_voronoi_on_map_of_constants_output.txt",py_vorimg) assert cpp_vorimg.size > 0 assert cpp_vorimg.shape == synthetic_checkerboard['img'].shape assert np.alltrue(synthetic_checkerboard['img'][1:3,1:3] == 1) printers.store_ndarray("cpp_voronoi_input.txt",img) printers.store_ndarray("cpp_voronoi_on_map_of_constants_output.txt",cpp_vorimg) assert np.alltrue(cpp_vorimg[:4,:4] == 1) assert np.alltrue(cpp_vorimg == py_vorimg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_stuff(self):\n self.create_tourism_raster()", "def write_stitched_image(self):\r\n\r\n self.write_debug(\"End of train detected. Writing stitched image.\")\r\n cv2.imwrite(os.path.join(self.output_dir_stitched, 'stitched.jpg'), self.stitched_image)", "def final_plain():\n\n\tconfig = Config()\n\tconfig.layer1_size = 256\n\tconfig.num_channels = 15\n\tconfig.target_channels = 3\n\tconfig.target_loss = 0.01\n\tconfig.lifetime = 32\n\tconfig.size = 32\n\tconfig.initial_state = 'sconf_center_black_dot'\n\tconfig.edge_strategy = 'EdgeStrategy.TF_SAME'\n\tconfig.growing_jump = 0\n\n\tfor path in glob.glob(\"images/final/*.png\"):\n\t\timg_name = os.path.basename(path)\n\t\tconfig.target_state = f'sconf_image(\"final/{img_name}\")'\n\t\tbuild_and_train(\"final_compare_gradual\", config)", "def make_t(self):\n self.img[1, 1:-1] = 1\n self.img[2:-1, self.l_i / 2] = 1\n self.img_name = 'T'", "def draw_T(self):\n for i in range(self.n):\n for j in range(self.m):\n t = self.T[i, j]\n if t != 0 and self.V[i, j] == 1:\n if len(self.images) > 0:\n self.draw_img(i, j, t)\n else:\n self.draw_text(i, j, str(t), BLACK)", "def run(self,workspace):\n image_name = self.image_name.value\n cpimage = workspace.image_set.get_image(image_name)\n image = cpimage.pixel_data\n mask = cpimage.mask\n workspace.display_data.statistics = []\n level = int(self.atrous_level.value)\n\n wavelet = self.a_trous(1.0*image, level+1)\n wlevprod = wavelet[:,:,level-1] * 3.0\n\n spotthresh = wlevprod.mean() + float(self.noise_removal_factor.value) * wlevprod.std()\n tidx = wlevprod < spotthresh\n wlevprod[tidx] = 0\n\n wlevprod = self.circular_average_filter(wlevprod, int(self.smoothing_filter_size.value))\n wlevprod = self.smooth_image(wlevprod, mask)\n\n max_wlevprod = scipy.ndimage.filters.maximum_filter(wlevprod,3)\n maxloc = (wlevprod == max_wlevprod)\n twlevprod = max_wlevprod > float(self.final_spot_threshold.value)\n maxloc[twlevprod == 0] = 0\n \n labeled_image,object_count = scipy.ndimage.label(maxloc,\n np.ones((3,3),bool))\n\n unedited_labels = labeled_image.copy()\n # Filter out objects touching the border or mask\n border_excluded_labeled_image = labeled_image.copy()\n labeled_image = self.filter_on_border(image, labeled_image)\n border_excluded_labeled_image[labeled_image > 0] = 0\n \n # Relabel the image\n labeled_image,object_count = relabel(labeled_image)\n new_labeled_image, new_object_count = self.limit_object_count(\n labeled_image, object_count)\n if new_object_count < object_count:\n # Add the labels that were filtered out into the border\n # image.\n border_excluded_mask = ((border_excluded_labeled_image > 0) |\n ((labeled_image > 0) & \n (new_labeled_image == 0)))\n border_excluded_labeled_image = scipy.ndimage.label(border_excluded_mask,\n np.ones((3,3),bool))[0]\n object_count = new_object_count\n labeled_image = new_labeled_image\n \n # Make an outline image\n outline_image = cellprofiler.cpmath.outline.outline(labeled_image)\n outline_border_excluded_image = cellprofiler.cpmath.outline.outline(border_excluded_labeled_image)\n \n if self.show_window:\n statistics = workspace.display_data.statistics\n statistics.append([\"# of accepted objects\",\n \"%d\"%(object_count)])\n\n workspace.display_data.image = image\n workspace.display_data.labeled_image = labeled_image\n workspace.display_data.border_excluded_labels = border_excluded_labeled_image\n\n # Add image measurements\n objname = self.object_name.value\n measurements = workspace.measurements\n cpmi.add_object_count_measurements(measurements,\n objname, object_count)\n # Add label matrices to the object set\n objects = cellprofiler.objects.Objects()\n objects.segmented = labeled_image\n objects.unedited_segmented = unedited_labels\n objects.parent_image = image\n \n workspace.object_set.add_objects(objects,self.object_name.value)\n cpmi.add_object_location_measurements(workspace.measurements, \n self.object_name.value,\n labeled_image)\n if self.should_save_outlines.value:\n out_img = cpi.Image(outline_image.astype(bool),\n parent_image = image)\n workspace.image_set.add(self.save_outlines.value, out_img)", "def main():\n test_image = load_image()\n\n pixelate_image(\n normalize_image(test_image)\n )\n pass", "def unpropagateImage(self, dryrun):\n pass", "def exercise():\n\n #\n # Convert Lena Tiff image to raw format\n #\n for f in glob.glob('*.jpg'):\n os.remove(f)\n \n for f in glob.glob('*.dat'):\n os.remove(f)\n \n input_raw_file = convert_to_raw('Lena.tiff')\n\n for device in ['cpu', 'gpu']:\n for interp in ['nn', 'bl']:\n for (w,h) in ((256, 300), (486, 486),(2000, 1000),(1000, 2000),(8000, 4000)):\n (t, f) = interpolate(input_raw_file, device + '_' + interp + '_lena.dat', device, 0, interp, w, h)\n convert_to_jpg(f)\n\n \n for f in glob.glob('*.dat'):\n convert_to_jpg(f)\n os.remove(f)\n \n quit()", "def process(self, image):", "def setUp(self):\n test_file_1 = path.join(\n path.dirname(datasets.__file__), \"twod_image_1.npy\"\n )\n\n original_image = np.load(test_file_1)\n\n # get a single tile from the image to test\n # note this image is currently unpadded.\n # how many boundary elements are needed to pad?\n extracted_image = original_image[0:32, 0:32]\n\n self.img = np.expand_dims(extracted_image, axis=-1)\n\n # Don't make this too huge for brevity.\n self.J = 3\n # 0 = no overlap etc.\n self.overlap_log_2 = 0\n # apply to all available orders\n self.order = 3\n # Should be one or more to avoid aliasing, if you want overlapping\n # tiles this can increase too.\n self.oversampling = 1\n\n self.num_angles = 3\n self.angles = tuple(\n [\n 90.0\n - np.rad2deg(\n (int(self.num_angles - self.num_angles / 2 - 1) - theta)\n * np.pi\n / self.num_angles\n )\n for theta in range(self.num_angles)\n ]\n )\n\n # details of the input data\n self.sample_rate = 0.004 * 3\n\n # vanilla filter bank\n wavelets = [\n vanilla_morlet_2d(self.sample_rate, j=i) for i in range(0, self.J)\n ]\n father_wavelet = vanilla_gabor_2d(self.sample_rate, j=self.J)\n\n # extract the kernels of each of the wavelets for manual convolution\n # we'll test using three different angles that we used to create the\n # transform above.\n wav1 = wavelets[0]\n wav2 = wavelets[1]\n wav3 = wavelets[2]\n\n # extract the kernels of each of the wavelets for manual convolution\n # we'll test using three different angles that we used to create the\n # transform above.\n wav1_k = wav1.kernel(self.angles[0])\n wav2_k = wav2.kernel(self.angles[1])\n wav3_k = wav3.kernel(self.angles[2])\n\n phi = father_wavelet.kernel(0.0)\n\n npad = 31\n img_pad = np.pad(\n self.img, ((npad, npad), (npad, npad), (0, 0)), mode=\"reflect\"\n )\n # get numpy array of the test input image\n x = img_pad[:, :, 0]\n\n # manual convolution, |x * psi_1|\n conv = np.abs(convolve2d(x, wav1_k, mode=\"same\"))\n conv2 = np.abs(convolve2d(conv, wav2_k, mode=\"same\"))\n conv3 = np.abs(convolve2d(conv2, wav3_k, mode=\"same\"))\n\n # unpad the original image, and convolve with the phi\n # note that the dimensions for phi are one less than the\n # conv result, so we get a 4x4 result. Take the first one\n self.manual_result1 = convolve2d(\n conv[npad:-npad, npad:-npad], phi.real, mode=\"valid\"\n )[0, 0]\n self.manual_result2 = convolve2d(\n conv2[npad:-npad, npad:-npad], phi.real, mode=\"valid\"\n )[0, 0]\n self.manual_result3 = convolve2d(\n conv3[npad:-npad, npad:-npad], phi.real, mode=\"valid\"\n )[0, 0]", "def execute(self, image):\n undist = self.undistort(image)\n result = self.threshold_image(undist, self.thresholds['ksize'],\n self.thresholds['sobel'],\n self.thresholds['magnitude'],\n self.thresholds['direction'],\n self.thresholds['saturation'],\n self.thresholds['lightness'],\n self.thresholds['blue-yellow'])\n warped = self.warp(result)\n if self.args.is_test:\n self.image_logger.save_image(warped, 'warped_image.png')\n ploty, left_fit, right_fit, left_fitx, right_fitx = self.get_line_fit(warped)\n left_rad, right_rad = measure_curvature(warped, left_fitx, right_fitx, self.args.is_test)\n self.left_line.update(left_fit, left_rad)\n self.right_line.update(right_fit, right_rad)\n result = self.draw_final_image(image, warped, undist, ploty, left_fitx, right_fitx, self.Minv,\n self.left_line.best_curvature,\n self.right_line.best_curvature)\n return result", "def retarget_image(img, T, C, r, c):\n row, col = img.shape[:2]\n seam_path = optimal_path(T, C, r, c)\n img_final = img\n for i in seam_path:\n if i == 0:\n img_final, _ = seam_removal_horizontal(img_final)\n else:\n img_final, _ = seam_removal_vertical(img_final, [])\n return img_final", "def prepare_test_img(self, idx):\n img_info = self.img_infos[idx]\n img_path = osp.join(self.img_prefix, img_info['filename'])\n\n if self.proposals is not None:\n proposal = self.proposals[idx][:self.num_max_proposals]\n if not proposal.shape[1] == 4 or proposal.shape[1] == 5:\n raise AssertionError(\n 'proposals should have shapes (n, 4) or (n, 5), '\n 'but found {}'.format(proposal.shape))\n else:\n proposal = None\n\n if self.with_background_erasing:\n ann = self.get_ann_info(idx)\n gt_bboxes = ann['bboxes']\n else:\n gt_bboxes = None\n\n def prepare_single_scale(img_path, expected_size, flip_ratio=0,\n proposal=None, bbox=None):\n _img, img_shape, pad_shape, scale_factor, \\\n flipped_flag, flipped_direction = self.img_transforms(\n img_path, expected_size, flip_ratio=flip_ratio)\n if bbox is not None:\n if not len(bbox) == 0:\n _gt_bboxes = self.bbox_transforms(bbox,\n img_shape,\n scale_factor,\n flipped_flag,\n flipped_direction)\n else:\n _gt_bboxes = bbox\n _img = self.background_erasing(\n _img, img_shape, _gt_bboxes,\n cell_size=self.be_cell_size,\n random_ratio=self.be_random_ratio)\n _img = to_tensor(_img)\n _img_meta = dict(\n filename=img_info['filename'],\n ori_shape=(img_info['height'], img_info['width'], 3),\n img_shape=img_shape,\n pad_shape=pad_shape,\n scale_factor=scale_factor,\n flipped_flag=flipped_flag,\n flipped_direction=flipped_direction\n )\n if proposal is not None:\n if proposal.shape[1] == 5:\n score = proposal[:, 4, None]\n proposal = proposal[:, :4]\n else:\n score = None\n _proposal = self.bbox_transforms(proposal,\n img_shape,\n scale_factor,\n flipped_flag,\n flipped_direction)\n _proposal = np.hstack([_proposal, score]) \\\n if score is not None else _proposal\n _proposal = to_tensor(_proposal)\n else:\n _proposal = None\n return _img, _img_meta, _proposal\n\n imgs = []\n img_metas = []\n proposals = []\n for expected_size in self.img_expected_sizes:\n # at first, we do not flip the image\n _img, _img_meta, _proposal = prepare_single_scale(\n img_path, expected_size, flip_ratio=0,\n proposal=proposal, bbox=gt_bboxes)\n imgs.append(_img)\n img_metas.append(DataContainer(_img_meta, cpu_only=True))\n proposals.append(_proposal)\n if self.flip_ratio > 0:\n _img, _img_meta, _proposal = prepare_single_scale(\n img_path, expected_size, flip_ratio=1,\n proposal=proposal, bbox=gt_bboxes)\n imgs.append(_img)\n img_metas.append(DataContainer(_img_meta, cpu_only=True))\n proposals.append(_proposal)\n data = dict(img=imgs, img_meta=img_metas)\n if self.proposals is not None:\n data['proposals'] = proposals\n return data", "def __update_tesseract__(self):\n if self.row_bitmaps != []:\n self.__write_out_row__()\n cv2.imwrite(\"active_weather.basic.exp\" + str(self.box_count) + \".tiff\", self.training_page)\n # call([\"convert\", \"-density 300\", \"-depth 4\", \"active_weather.basic.exp0.tiff\",\"active_weather.basic.exp0.tiff\"])\n call([\"/usr/bin/tesseract\", \"active_weather.basic.exp0.tiff\", \"active_weather.basic.exp0\", \"nobatch\", \"box.train\"])\n\n with open(\"font_properties\",\"w\") as f:\n f.write(\"basic 0 0 0 0 0\\n\")\n\n call([\"unicharset_extractor\", \"active_weather.basic.exp0.box\"])\n os.system(\"/home/ggdhines/github/tesseract/training/set_unicharset_properties -F font_properties -U unicharset -O unicharset --script_dir=/home/ggdhines/langdata\")\n # os.system(\"shapeclustering -F font_properties -U unicharset active_weather.basic.exp0.tr\")\n # os.system(\"shapeclustering -F font_properties active_weather.basic.exp0.tr\")\n os.system(\"mftraining -F font_properties -U unicharset -O active_weather.unicharset active_weather.basic.exp0.tr\")\n os.system(\"cntraining active_weather.basic.exp0.tr\")\n\n os.system(\"mv inttemp active_weather.inttemp\")\n os.system(\"mv normproto active_weather.normproto\")\n os.system(\"mv pffmtable active_weather.pffmtable\")\n os.system(\"mv shapetable active_weather.shapetable\")\n os.system(\"combine_tessdata active_weather.\")\n\n os.system(\"mv active_weather.basic.* /tmp/tessdata/\")\n os.system(\"mv active_weather.inttemp /tmp/tessdata/\")\n os.system(\"mv active_weather.normproto /tmp/tessdata/\")\n os.system(\"mv active_weather.pffmtable /tmp/tessdata/\")\n os.system(\"mv active_weather.shapetable /tmp/tessdata/\")\n os.system(\"mv active_weather.traineddata /tmp/tessdata/\")\n os.system(\"mv active_weather.unicharset /tmp/tessdata/\")\n os.system(\"mv font_properties /tmp/tessdata/\")", "def expose_test(self):\n with self.lock:\n self.dark = 1\n self.tstart = time.time()\n self.timestamp = time.strftime(\"%Y-%m-%dT%H:%M:%S\", time.localtime(self.tstart))\n imagesize = (self.expArea[3] - self.expArea[1],\n self.expArea[2] - self.expArea[0])\n self.data = np.ones(shape=imagesize, dtype=np.uint16)\n self.tend = time.time()", "def run_frame(self, ti, img):\n pass", "def reset_image_estimate(self):\n # reset_shared_var(self.t_A)\n self.t_A.set_value(self.t_QUAD_REG_MEAN.get_value())\n reset_shared_var(self.t_Ap)", "def main():\n \n # for inserting other images, add tem to /input folder and list them here\n images = (\n 'image-0',\n 'image-1',\n 'image-2'\n )\n\n for image_name in images:\n print(image_name, \"image:\")\n\n image = open_image(image_name)\n display_image(image, \"Original input \" + image_name)\n\n grayscale_v = transform_colors(image)\n display_image(grayscale_v[:,:,0], \"Grayscale \" + image_name)\n save_image(image_name + \"-grayscale\", grayscale_v[:,:,0])\n\n contours_v, contours = get_contours(grayscale_v)\n display_image(contours_v, \"Contours \" + image_name)\n save_image(image_name + \"-contours\", contours_v)\n\n labeled_img, areas = get_measures(image, contours[1:])\n display_image(labeled_img, \"Labeled \" + image_name)\n save_image(image_name + \"-labeled\", labeled_img)\n\n areas_histogram(areas, image_name)", "def teardown():\n os.remove('green-dot.tif')\n os.remove('green-dot.jpg')\n os.remove('green-dot.png')", "def single_channel_stacking(tifs):\n template_ID=int(len(tifs)/2)\n \n template_raster=gdal_array.LoadFile(tifs[template_ID-1])\n avg_raster=np.zeros_like(template_raster)\n avg_raster=avg_raster+1\n new_raster=np.copy(template_raster)\n # ones=np.full(template_raster.shape, 1)\n for i, tif in enumerate(tifs, start=1):\n if i==template_ID: \n continue\n \n tif_raster=gdal_array.LoadFile(tif)\n # tif_raster=cut_transformed_array_borders(tif_raster)\n result=ird.similarity(template_raster,tif_raster , numiter=1, order=1)\n img_transformed= ird.transform_img(tif_raster, scale=result['scale'], angle=result['angle'], tvec=result['tvec'], mode='constant', bgval=0, order=2)\n \n img_transformed=cut_transformed_array_borders(img_transformed)\n \n # ones_transformed=ird.transform_img(ones, scale=result['scale'], angle=result['angle'], tvec=result['tvec'], mode='constant', bgval=0, order=1)\n ones_transformed=np.zeros_like(template_raster)\n ones_transformed[np.where(img_transformed>0)]=1\n print(ones_transformed)\n \n print(np.mean(ones_transformed), np.max(ones_transformed), np.min(ones_transformed))\n print(ones_transformed[np.where(ones_transformed>0)])\n print(np.min(ones_transformed[np.where(ones_transformed>0)]))\n print(np.max(ones_transformed[np.where(ones_transformed>0)]))\n\n plt.imshow(ones_transformed)\n plt.show()\n plt.close()\n \n # ones_transformed=cut_transformed_array_borders(ones_transformed)\n \n avg_raster=avg_raster+ones_transformed\n # ird.imshow(template_raster, tif_raster, img_transformed)\n \n new_raster=new_raster+img_transformed\n \n # new_raster=new_raster+template_raster \n # new_raster=new_raster/len(tifs)\n\n gtz=np.where(avg_raster>0)\n \n\n \n\n \n \n plt.imshow(new_raster)\n plt.show()\n plt.close()\n # gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_not_abvertaghe_stacked_.tiff\")\n new_raster[gtz]=new_raster[gtz]/avg_raster[gtz] \n gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_stacked_.tiff\")\n plt.imshow(new_raster)\n plt.savefig(\"test.tif\", dpi=800)\n plt.show()\n plt.close()\n\n def discrete_cmap(N, base_cmap=None):\n \"\"\"Create an N-bin discrete colormap from the specified input map\"\"\"\n \n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n \n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)\n\n cmap=discrete_cmap(int(avg_raster.max())+1, base_cmap=\"ocean\") \n \n norm=mpl.colors.BoundaryNorm(np.arange(-0.5,int(avg_raster.max()+1)), cmap.N)\n fig=plt.figure()\n fig.set_size_inches(5,4)\n ax=fig.add_subplot(111)\n data=ax.matshow(avg_raster, cmap=cmap, norm=norm)\n fig.colorbar(data, ticks=np.linspace(0,int(avg_raster.max()),int(avg_raster.max()+1)), drawedges=True)\n\n plt.show()\n plt.close()\n\n\n # gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_stacked_.tiff\")", "def blackout_images(image,ticlass):\n rgb = ocropy.intarray()\n ticlass.textImageProbabilities(rgb,image)\n r = ocropy.bytearray()\n g = ocropy.bytearray()\n b = ocropy.bytearray()\n ocropy.unpack_rgb(r,g,b,rgb)\n components = ocropy.intarray()\n components.copy(g)\n n = ocropy.label_components(components)\n print \"[note] number of image regions\",n\n tirects = ocropy.rectarray()\n ocropy.bounding_boxes(tirects,components)\n for i in range(1,tirects.length()):\n r = tirects.at(i)\n ocropy.fill_rect(image,r,0)\n r.pad_by(-5,-5)\n ocropy.fill_rect(image,r,255)", "def imageprepare():\r\n file_name = '9-test.png'\r\n im = Image.open(file_name).convert('L')\r\n\r\n im.save(\"9-t.png\")\r\n plt.imshow(im)\r\n plt.show()\r\n tv = list(im.getdata())\r\n\r\n # normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\r\n tva = [(255 - x) * 1.0 / 255.0 for x in tv]\r\n return tva", "def test_on_tiff(self):\n im = np.random.randint(0, 127, size=(512, 512))\n path = Path(\".\\\\test_tif.tif\")\n\n # Annoying low contrast warning\n with suppress_warnings():\n imsave(str(path), im)\n\n from_skued = diffread(path)\n self.assertTrue(np.allclose(im, from_skued))\n os.remove(path)", "def main(image_path):\n temp_dir = tempfile.mkdtemp()\n print('Saving output to {}'.format(temp_dir))\n estimator = run_image(image_path)\n visualize(estimator, image_path, temp_dir)", "def run_algo(self, th):\n p = self.run_proc(['threshold', str(th), 'input_0.png',\n 'output.png'])\n self.wait_proc(p, timeout=self.timeout)\n return", "def freespaceImageAnalysis( fids, guesses = None, fit=True, bgInput=None, bgPcInput=None, shapes=[None], zeroCorrection=0, zeroCorrectionPC=0,\n keys=None, fitModule=bump, extraPicDictionaries=None, newAnnotation=False, onlyThisPic=None, pltVSize=5, \n plotSigmas=False, plotCounts=False, manualColorRange=None, calcTemperature=False, clearOutput=True, \n dataRange=None, guessTemp=10e-6, trackFitCenter=False, picsPerRep=1, startPic=0, binningParams=None, \n win=pw.PictureWindow(), transferAnalysisOpts=None, tferBinningParams=None, tferWin= pw.PictureWindow(),\n extraTferAnalysisArgs={}, emGainSetting=300, lastConditionIsBackGround=True, showTferAnalysisPlots=True,\n show2dFitsAndResiduals=True, plotFitAmps=False, indvColorRanges=False, fitF2D=gaussian_2d.f_notheta, \n rmHighCounts=True, useBase=True, weightBackgroundByLoading=True, returnPics=False, forceNoAnnotation=False):\n fids = [fids] if type(fids) == int else fids\n keys = [None for _ in fids] if keys is None else keys\n sortedStackedPics = {}\n initThresholds = [None]\n picsForBg = []\n bgWeights = []\n isAnnotatedList = []\n for filenum, fid in enumerate(fids):\n if transferAnalysisOpts is not None:\n res = ta.stage1TransferAnalysis( fid, transferAnalysisOpts, useBase=useBase, **extraTferAnalysisArgs )\n (initAtoms, tferAtoms, initAtomsPs, tferAtomsPs, key, keyName, initPicCounts, tferPicCounts, repetitions, initThresholds,\n avgPics, tferThresholds, initAtomImages, tferAtomImages, basicInfoStr, ensembleHits, groupedPostSelectedPics, isAnnotated) = res\n isAnnotatedList.append(isAnnotated)\n # assumes that you only want to look at the first condition. \n for varPics in groupedPostSelectedPics: # don't remember why 0 works if false...\n picsForBg.append(varPics[-1 if lastConditionIsBackGround else 0])\n bgWeights.append(len(varPics[0]))\n allFSIPics = [ varpics[0][startPic::picsPerRep] for varpics in groupedPostSelectedPics]\n if showTferAnalysisPlots:\n fig, axs = plt.subplots(1,2)\n mp.makeAvgPlts( axs[0], axs[1], avgPics, transferAnalysisOpts, ['r','g','b'] ) \n allFSIPics = [win.window( np.array(pics) ) for pics in allFSIPics]\n allFSIPics = ah.softwareBinning( binningParams, allFSIPics )\n elif type(fid) == int:\n ### For looking at either PGC imgs or FSI imgs \n with exp.ExpFile(fid) as file:\n # I think this only makes sense if there is a specific bg pic in the rotation\n picsForBg.append(list(file.get_pics()))\n allFSIPics = file.get_pics()[startPic::picsPerRep]\n _, key = file.get_key()\n if len(np.array(key).shape) == 2:\n key = key[:,0]\n file.get_basic_info()\n allFSIPics = win.window( allFSIPics )\n allFSIPics = ah.softwareBinning( binningParams, allFSIPics )\n allFSIPics = np.reshape( allFSIPics, (len(key), int(allFSIPics.shape[0]/len(key)), allFSIPics.shape[1], allFSIPics.shape[2]) )\n else:\n ### Assumes given pics have the same start pic and increment (picsPerRep).\n # doesn't combine well w/ transfer analysis\n picsForBg.append(fid)\n allFSIPics = fid[startPic::picsPerRep]\n print(\"Assuming input is list of all pics, then splices to get FSI pics. Old code assumed the given were FSI pics.\")\n allFSIPics = win.window( allFSIPics )\n allFSIPics = ah.softwareBinning( binningParams, allFSIPics )\n allFSIPics = np.reshape( allFSIPics, (len(key), int(allFSIPics.shape[0]/len(key)), allFSIPics.shape[1], allFSIPics.shape[2]) )\n # ##############\n if keys[filenum] is not None:\n key = keys[filenum]\n for i, keyV in enumerate(key):\n keyV = misc.round_sig_str(keyV)\n sortedStackedPics[keyV] = np.append(sortedStackedPics[keyV], allFSIPics[i],axis=0) if (keyV in sortedStackedPics) else allFSIPics[i] \n if lastConditionIsBackGround:\n bgInput, pcBgInput = getBgImgs(picsForBg, startPic = startPic, picsPerRep = picsPerRep, rmHighCounts=rmHighCounts, bgWeights=bgWeights, \n weightBackgrounds=weightBackgroundByLoading)\n elif bgInput == 'lastPic':\n bgInput, pcBgInput = getBgImgs(picsForBg, startPic = picsPerRep-1, picsPerRep = picsPerRep, rmHighCounts=rmHighCounts, bgWeights=bgWeights,\n weightBackgrounds=weightBackgroundByLoading )\n if bgInput is not None: # was broken and not working if not given bg\n bgInput = win.window(bgInput)\n bgInput = ah.softwareBinning(binningParams, bgInput)\n if bgPcInput is not None:\n bgPcInput = win.window(bgPcInput)\n bgPcInput = ah.softwareBinning(binningParams, bgPcInput) \n \n if extraPicDictionaries is not None:\n if type(extraPicDictionaries) == dict:\n extraPicDictionaries = [extraPicDictionaries]\n for dictionary in extraPicDictionaries:\n for keyV, pics in dictionary.items():\n sortedStackedPics[keyV] = (np.append(sortedStackedPics[keyV], pics,axis=0) if keyV in sortedStackedPics else pics) \n sortedStackedKeyFl = [float(keyStr) for keyStr in sortedStackedPics.keys()]\n sortedKey, sortedStackedPics = ah.applyDataRange(dataRange, sortedStackedPics, list(sorted(sortedStackedKeyFl)))\n numVars = len(sortedStackedPics.items())\n if len(np.array(shapes).shape) == 1:\n shapes = [shapes for _ in range(numVars)] \n if guesses is None:\n guesses = [[None for _ in range(4)] for _ in range(numVars)]\n if len(np.array(bgInput).shape) == 2 or bgInput == None:\n bgInput = [bgInput for _ in range(numVars)]\n if len(np.array(bgPcInput).shape) == 2 or bgPcInput == None:\n bgPcInput = [bgPcInput for _ in range(numVars)]\n \n datalen, avgFitSigmas, images, hFitParams, hFitErrs, vFitParams, vFitErrs, fitParams2D, fitErrs2D = [{} for _ in range(9)]\n titles = ['Bare', 'Photon-Count', 'Bare-mbg', 'Photon-Count-mbg']\n assert(len(sortedKey)>0)\n for vari, keyV in enumerate(sortedKey):\n keyV=misc.round_sig_str(keyV)\n if vari==0:\n initKeyv = keyV\n varPics = sortedStackedPics[keyV]\n # 0 is init atom pics for post-selection on atom number... if we wanted to.\n expansionPics = rmHighCountPics(varPics,7000) if rmHighCounts else varPics\n datalen[keyV] = len(expansionPics)\n expPhotonCountImage = photonCounting(expansionPics, 120)[0] / len(expansionPics)\n bgPhotonCountImage = np.zeros(expansionPics[0].shape) if bgPcInput[vari] is None else bgPcInput[vari]\n expAvg = np.mean(expansionPics, 0)\n bgAvg = np.zeros(expansionPics[0].shape) if (bgInput[vari] is None or len(bgInput[vari]) == 1) else bgInput[vari]\n \n if bgPhotonCountImage is None:\n print('no bg photon', expAvg.shape)\n bgPhotonCount = np.zeros(photonCountImage.shape)\n avg_mbg = expAvg - bgAvg\n avg_mbgpc = expPhotonCountImage - bgPhotonCountImage\n images[keyV] = [expAvg, expPhotonCountImage, avg_mbg, avg_mbgpc]\n hFitParams[keyV], hFitErrs[keyV], vFitParams[keyV], vFitErrs[keyV], fitParams2D[keyV], fitErrs2D[keyV] = [[] for _ in range(6)]\n for imnum, (im, guess) in enumerate(zip(images[keyV], guesses[vari])):\n if fit:\n # fancy guess_x and guess_y values use the initial fitted value, typically short time, as a guess.\n _, pictureFitParams2d, pictureFitErrors2d, v_params, v_errs, h_params, h_errs = ah.fitPic(\n im, guessSigma_x=5, guessSigma_y=5, showFit=False, \n guess_x=None if vari==0 else fitParams2D[initKeyv][imnum][1], guess_y=None if vari==0 else fitParams2D[initKeyv][imnum][2],\n fitF=fitF2D)\n fitParams2D[keyV].append(pictureFitParams2d)\n fitErrs2D[keyV].append(pictureFitErrors2d)\n hFitParams[keyV].append(h_params)\n hFitErrs[keyV].append(h_errs)\n vFitParams[keyV].append(v_params)\n vFitErrs[keyV].append(v_errs)\n # conversion from the num of pixels on the camera to microns at the focus of the tweezers\n cf = 16e-6/64\n mins, maxes = [[], []]\n imgs_ = np.array(list(images.values()))\n for imgInc in range(4):\n if indvColorRanges:\n mins.append(None)\n maxes.append(None)\n elif manualColorRange is None:\n mins.append(min(imgs_[:,imgInc].flatten()))\n maxes.append(max(imgs_[:,imgInc].flatten()))\n else:\n mins.append(manualColorRange[0])\n maxes.append(manualColorRange[1])\n numVariations = len(images)\n if onlyThisPic is None:\n fig, axs = plt.subplots(numVariations, 4, figsize=(20, pltVSize*numVariations))\n if numVariations == 1:\n axs = np.array([axs])\n bgFig, bgAxs = plt.subplots(1, 2, figsize=(20, pltVSize))\n else:\n numRows = int(np.ceil((numVariations+3)/4))\n fig, axs = plt.subplots(numRows, 4 if numVariations>1 else 3, figsize=(20, pltVSize*numRows))\n avgPicAx = axs.flatten()[-3]\n avgPicFig = fig\n bgAxs = [axs.flatten()[-1], axs.flatten()[-2]]\n bgFig = fig\n if show2dFitsAndResiduals:\n fig2d, axs2d = plt.subplots(*((2,numVariations) if numVariations>1 else (1,2)))\n keyPlt = np.zeros(len(images))\n (totalSignal, hfitCenter, hFitCenterErrs, hSigmas, hSigmaErrs, h_amp, hAmpErrs, vfitCenter, vFitCenterErrs, vSigmas, vSigmaErrs, v_amp, \n vAmpErrs, hSigma2D, hSigma2dErr, vSigma2D, vSigma2dErr) = [np.zeros((len(images), 4)) for _ in range(17)]\n \n for vari, ((keyV,ims), hParamSet, hErr_set, vParamSet, vErr_set, paramSet2D, errSet2D) in enumerate(zip(\n images.items(), *[dic.values() for dic in [hFitParams, hFitErrs, vFitParams, vFitErrs, fitParams2D, fitErrs2D]])):\n for which in range(4):\n if onlyThisPic is None:\n (im, ax, title, min_, max_, hparams, hErrs, vparams, vErrs, param2d, err2d\n ) = [obj[which] for obj in (ims, axs[vari], titles, mins, maxes, hParamSet, hErr_set, vParamSet, vErr_set, paramSet2D, errSet2D)] \n else:\n which = onlyThisPic\n ax = axs.flatten()[vari]\n (im, title, min_, max_, hparams, hErrs, vparams, vErrs, param2d, err2d\n ) = [obj[which] for obj in (ims, titles, mins, maxes, hParamSet, hErr_set, vParamSet, vErr_set, paramSet2D, errSet2D)] \n h_amp[vari][which], hfitCenter[vari][which], hSigmas[vari][which] = hparams[0], hparams[1], hparams[2]*cf*1e6\n hAmpErrs[vari][which], hFitCenterErrs[vari][which], hSigmaErrs[vari][which] = hErrs[0], hErrs[1], hErrs[2]*cf*1e6\n v_amp[vari][which], vfitCenter[vari][which], vSigmas[vari][which] = vparams[0], vparams[1], vparams[2]*cf*1e6\n vAmpErrs[vari][which], vFitCenterErrs[vari][which], vSigmaErrs[vari][which] = vErrs[0], vErrs[1], vErrs[2]*cf*1e6\n hSigma2D[vari][which], hSigma2dErr[vari][which], vSigma2D[vari][which], vSigma2dErr[vari][which] = [\n val*cf*1e6 for val in [param2d[-3], err2d[-3], param2d[-2], err2d[-2]]]\n \n totalSignal[vari][which] = np.sum(im.flatten())\n keyPlt[vari] = keyV\n res = mp.fancyImshow(fig, ax, im, imageArgs={'cmap':dark_viridis_cmap, 'vmin':min_, 'vmax':max_}, \n hFitParams=hparams, vFitParams=vparams, fitModule=fitModule, flipVAx = True, fitParams2D=param2d)\n ax.set_title(keyV + ': ' + str(datalen[keyV]) + ';\\n' + title + ': ' + misc.errString(hSigmas[vari][which],hSigmaErrs[vari][which]) \n + r'$\\mu m$ sigma, ' + misc.round_sig_str(totalSignal[vari][which],5), fontsize=12) \n if show2dFitsAndResiduals:\n X, Y = np.meshgrid(np.arange(len(im[0])), np.arange(len(im)))\n data_fitted = fitF2D((X,Y), *param2d)\n fitProper = data_fitted.reshape(im.shape[0],im.shape[1])\n ax1 = axs2d[0] if numVariations == 1 else axs2d[0,vari]\n ax2 = axs2d[1] if numVariations == 1 else axs2d[1,vari]\n imr = ax1.imshow(fitProper, vmin=min_, vmax=max_)\n mp.addAxColorbar(fig2d, ax1, imr)\n ax1.contour(np.arange(len(im[0])), np.arange(len(im)), fitProper, 4, colors='w', alpha=0.2)\n imr = ax2.imshow(fitProper-im)\n mp.addAxColorbar(fig2d, ax2, imr)\n ax2.contour(np.arange(len(im[0])), np.arange(len(im)), fitProper, 4, colors='w', alpha=0.2)\n if onlyThisPic is not None:\n break\n \n mp.fancyImshow(avgPicFig, avgPicAx, np.mean([img[onlyThisPic] for img in images.values()],axis=0), imageArgs={'cmap':dark_viridis_cmap},flipVAx = True)\n avgPicAx.set_title('Average Over Variations')\n ### Plotting background and photon counted background\n mp.fancyImshow(bgFig, bgAxs[0], bgAvg, imageArgs={'cmap':dark_viridis_cmap},flipVAx = True) \n bgAxs[0].set_title('Background image (' + str(len(picsForBg)/picsPerRep) + ')')\n mp.fancyImshow(bgFig, bgAxs[1], bgPhotonCountImage, imageArgs={'cmap':dark_viridis_cmap},flipVAx = True) \n bgAxs[1].set_title('Photon counted background image (' + str(len(picsForBg)/picsPerRep) + ')')\n fig.subplots_adjust(left=0,right=1,bottom=0.1, hspace=0.2, **({'top': 0.7, 'wspace': 0.4} if (onlyThisPic is None) else {'top': 0.9, 'wspace': 0.3}))\n \n disp.display(fig)\n temps, tempErrs, tempFitVs, = [],[],[]\n if calcTemperature: \n for sigmas, sigmaerrs in zip([hSigmas, vSigmas, hSigma2D, vSigma2D],[hSigmaErrs, vSigmaErrs, hSigma2dErr, vSigma2dErr]):\n mbgSigmas = np.array([elt[2] for elt in sigmas])\n mbgSigmaErrs = np.array([elt[2] for elt in sigmaerrs])\n myGuess = [0.0, min((mbgSigmas)*1e-6), guessTemp]\n temp, fitV, cov = ah.calcBallisticTemperature(keyPlt*1e-3, (mbgSigmas)*1e-6, guess = myGuess, sizeErrors = mbgSigmaErrs)\n error = np.sqrt(np.diag(cov))\n temps.append(temp)\n tempErrs.append(error[2])\n tempFitVs.append(fitV)\n numAxisCol = int(plotSigmas) + int(plotCounts) + int(trackFitCenter)\n if numAxisCol != 0:\n fig2, axs = plt.subplots(1, numAxisCol, figsize = (15, 5)) \n fig2.subplots_adjust(top=0.75, wspace = 0.4)\n colors = ['b','k','c','purple']\n if plotSigmas:\n ax = (axs if numAxisCol == 1 else axs[0]) \n stdStyle = dict(marker='o',linestyle='',capsize=3)\n if onlyThisPic is not None:\n ax.errorbar(keyPlt, hSigmas[:,onlyThisPic], hSigmaErrs[:,onlyThisPic], color=colors[0], label='h '+titles[onlyThisPic], **stdStyle);\n ax.errorbar(keyPlt, hSigma2D[:,onlyThisPic], hSigma2dErr[:,onlyThisPic], color=colors[1], label='2dh '+titles[onlyThisPic], **stdStyle);\n ax.errorbar(keyPlt, vSigmas[:,onlyThisPic], vSigmaErrs[:,onlyThisPic], color=colors[2], label='v '+titles[onlyThisPic], **stdStyle);\n ax.errorbar(keyPlt, vSigma2D[:,onlyThisPic], vSigma2dErr[:,onlyThisPic], color=colors[3], label='2dv '+titles[onlyThisPic], **stdStyle);\n else:\n for whichPic in range(4):\n ax.errorbar(keyPlt, hSigmas[:,whichPic], hSigmaErrs[:,whichPic], color='b', label='h '+titles[whichPic], **stdStyle);\n ax.errorbar(keyPlt, vSigmas[:,whichPic], vSigmaErrs[:,whichPic], color='c', label='v '+titles[whichPic], **stdStyle);\n ax.set_ylim(max(0,ax.get_ylim()[0]),min([ax.get_ylim()[1],5]))\n ax.set_ylabel(r'Fit Sigma ($\\mu m$)')\n \n if calcTemperature:\n # converting time to s, hSigmas in um \n xPoints = np.linspace(min(keyPlt), max(keyPlt))*1e-3\n for num, fitV in enumerate(tempFitVs):\n #ax.plot(xPoints*1e3, LargeBeamMotExpansion.f(xPoints, *myGuess)*1e6, label = 'guess')\n ax.plot(xPoints*1e3, LargeBeamMotExpansion.f(xPoints, *fitV)*1e6, color=colors[num])\n ax.legend()\n\n if plotFitAmps: \n ax = (axs if numAxisCol == 1 else axs[0])\n ampAx = ax.twinx()\n\n if onlyThisPic is not None:\n ampAx.errorbar(keyPlt, h_amp[:,onlyThisPic], hAmpErrs[:,onlyThisPic], label='h '+titles[onlyThisPic], color = 'orange', **stdStyle);\n ampAx.errorbar(keyPlt, v_amp[:,onlyThisPic], vAmpErrs[:,onlyThisPic], label='v '+titles[onlyThisPic], color = 'r', **stdStyle);\n else:\n for whichPic in range(4):\n ampAx.errorbar(keyPlt, h_amp[:,whichPic], hAmpErrs[:,whichPic], label='h '+titles[whichPic], color = 'orange', **stdStyle);\n ampAx.errorbar(keyPlt, v_amp[:,whichPic], vAmpErrs[:,whichPic], label='v '+titles[whichPic], color = 'r', **stdStyle);\n [tick.set_color('red') for tick in ampAx.yaxis.get_ticklines()]\n [tick.set_color('red') for tick in ampAx.yaxis.get_ticklabels()]\n ampAx.set_ylabel(r'Fit h_amps', color = 'r')\n \n hTotalPhotons, vTotalPhotons = None, None\n if plotCounts:\n # numAxCol = 1: ax = axs\n # numAxCol = 2: plotSigmas + plotCounts -- ax = axs[1]\n # numAxCol = 2: plotCounts + trackFitCenter -- ax = axs[0]\n # numAxCol = 3: ax = axs[1]\n if numAxisCol == 1:\n ax = axs\n elif numAxisCol == 2:\n ax = axs[1 if plotSigmas else 0]\n else:\n ax = axs[1]\n # Create axis to plot photon counts\n ax.set_ylabel(r'Integrated signal')\n photon_axis = ax.twinx()\n # This is not currently doing any correct for e.g. the loading rate.\n countToCameraPhotonEM = 0.018577 / (emGainSetting/200) # the float is is EM200. \n countToScatteredPhotonEM = 0.018577 / 0.07 / (emGainSetting/200)\n\n if onlyThisPic is not None:\n # calculate number of photons\n hamp = h_amp[:,onlyThisPic]*len(expansionPics[0][0]) # Horizontal \"un\"normalization for number of columns begin averaged.\n vamp = v_amp[:,onlyThisPic]*len(expansionPics[0]) \n hsigpx = hSigmas[:,onlyThisPic]/(16/64) # Convert from um back to to pixels.\n vsigpx = vSigmas[:,onlyThisPic]/(16/64)\n htotalCountsPerPic = bump.area_under(hamp, hsigpx)\n vtotalCountsPerPic = bump.area_under(vamp, vsigpx)\n hTotalPhotons = countToScatteredPhotonEM*htotalCountsPerPic\n vTotalPhotons = countToScatteredPhotonEM*vtotalCountsPerPic\n ax.plot(keyPlt, totalSignal[:,onlyThisPic], marker='o', linestyle='', label=titles[onlyThisPic]);\n photon_axis.plot(keyPlt, hTotalPhotons, marker='o', linestyle='', color = 'r', label='Horizontal')\n photon_axis.plot(keyPlt, vTotalPhotons, marker='o', linestyle='', color = 'orange', label='Vertical')\n else:\n for whichPic in range(4):\n # See above comments\n amp = h_amp[:,whichPic]*len(expansionPics[0][0]) \n sig = hSigmas[:,whichPic]/(16/64) \n totalCountsPerPic = bump.area_under(amp, sig)\n hTotalPhotons = countToScatteredPhotonEM*totalCountsPerPic\n ax.plot(keyPlt, totalSignal[:,whichPic], marker='o', linestyle='', label=titles[whichPic]);\n photon_axis.plot(keyPlt, hTotalPhotons, marker='o', linestyle='', color = ['red', 'orange', 'yellow', 'pink'][whichPic]) \n ax.legend()\n photon_axis.legend()\n [tick.set_color('red') for tick in photon_axis.yaxis.get_ticklines()]\n [tick.set_color('red') for tick in photon_axis.yaxis.get_ticklabels()]\n photon_axis.set_ylabel(r'Fit-Based Avg Scattered Photon/Img', color = 'r')\n if trackFitCenter:\n #numaxcol = 1: ax = axs\n #numaxcol = 2: trackfitcenter + plothSigmas: ax = axs[1]\n #numaxcol = 2: trackfitcenter + plotCounts: ax = axs[1]\n #numaxcol = 3: ax = axs[2]\n ax = (axs if numAxisCol == 1 else axs[-1])\n if onlyThisPic is not None:\n #ax.errorbar(keyPlt, hfitCenter[:,onlyThisPic], hFitCenterErrs[:,onlyThisPic], marker='o', linestyle='', capsize=3, label=titles[onlyThisPic]);\n ax.errorbar(keyPlt, vfitCenter[:,onlyThisPic], vFitCenterErrs[:,onlyThisPic], marker='o', linestyle='', capsize=3, label=titles[onlyThisPic]);\n #def accel(t, x0, a):\n # return x0 + 0.5*a*t**2\n #accelFit, AccelCov = opt.curve_fit(accel, keyPlt*1e-3, hfitCenter[:,onlyThisPic], sigma = hFitCenterErrs[:,onlyThisPic])\n #fitx = np.linspace(keyPlt[0], keyPlt[-1])*1e-3\n #fity = accel(fitx, *accelFit)\n #ax.plot(fitx*1e3, fity)\n else:\n for whichPic in range(4):\n ax.errorbar(keyPlt, hfitCenter[:,whichPic], hFitCenterErrs[:,whichPic], marker='o', linestyle='', capsize=3, label=titles[whichPic]);\n #accelErr = np.sqrt(np.diag(AccelCov))\n fig2.legend()\n ax.set_ylabel(r'Fit Centers (pix)')\n ax.set_xlabel('time (ms)')\n \n if numAxisCol != 0:\n disp.display(fig2) \n \n if not forceNoAnnotation:\n for fid, isAnnotated in zip(fids, isAnnotatedList):\n if not isAnnotated:\n if type(fid) == int or type(fid) == type(''):\n if newAnnotation or not exp.checkAnnotation(fid, force=False, quiet=True, useBase=useBase):\n exp.annotate(fid, useBase=useBase)\n if clearOutput:\n disp.clear_output()\n if calcTemperature: \n for temp, err, label in zip(temps, tempErrs, ['Hor', 'Vert', 'Hor2D', 'Vert2D']): \n print(label + ' temperature = ' + misc.errString(temp*1e6, err*1e6) + 'uk')\n\n for fid in fids:\n if type(fid) == int:\n expTitle, _, lev = exp.getAnnotation(fid)\n expTitle = ''.join('#' for _ in range(lev)) + ' File ' + str(fid) + ': ' + expTitle\n disp.display(disp.Markdown(expTitle))\n with exp.ExpFile(fid) as file:\n file.get_basic_info()\n if trackFitCenter:\n pass\n #print('Acceleration in Mpix/s^2 = ' + misc.errString(accelFit[1], accelErr[1]))\n if transferAnalysisOpts is not None and showTferAnalysisPlots:\n colors, colors2 = misc.getColors(len(transferAnalysisOpts.initLocs()) + 2)#, cmStr=dataColor)\n pltShape = (transferAnalysisOpts.initLocsIn[-1], transferAnalysisOpts.initLocsIn[-2])\n # mp.plotThresholdHists([initThresholds[0][0],initThresholds[1][0]], colors, shape=pltShape)\n mp.plotThresholdHists([initThresholds[0][0], initThresholds[0][0]], colors, shape=[1,2])\n returnDictionary = {'images':images, 'fits':hFitParams, 'errs':hFitErrs, 'hSigmas':hSigmas, 'sigmaErrors':hSigmaErrs, 'dataKey':keyPlt, \n 'hTotalPhotons':hTotalPhotons, 'tempCalc':temps, 'tempCalcErr':tempErrs, 'initThresholds':initThresholds[0], \n '2DFit':fitParams2D, '2DErr':fitErrs2D, 'bgPics':picsForBg, 'dataLength':datalen}\n if returnPics: \n returnDictionary['pics'] = sortedStackedPics\n return returnDictionary", "def test_full(args, model, device): \n test_path = '../data/full/original/'\n generate_path = '../data/full/generate/'\n test_image_num = len([name for name in os.listdir(test_path)\n if os.path.isfile(os.path.join(test_path, name))])\n\n score_psnr, score_ssim_skimage, score_ssim_minstar, score_msssim_minstar = 0.0, 0.0, 0.0, 0.0\n ind = 0\n for name in os.listdir(test_path):\n if os.path.isfile(os.path.join(test_path, name)):\n ind += 1\n test_original, test_style, image_height, image_width = load_test_dataset(name)\n x = torch.from_numpy(test_original).float()\n y_real = torch.from_numpy(test_style).float()\n x = x.view(image_height, image_width, config.channels).permute(2, 0, 1).to(device)\n y_real = y_real.view(image_height, image_width, config.channels).permute(2, 0, 1).to(device)\n \n y_fake = model.gen_g(x.view(-1, config.channels, image_height, image_width))\n y_fake = y_fake.view(config.channels, image_height, image_width)\n \n # Calculate PSNR & SSIM scores\n score_psnr += psnr_full(y_fake, y_real)\n \n y_fake_np = y_fake.detach().cpu().numpy().transpose(1, 2, 0)\n y_real_np = y_real.cpu().numpy().transpose(1, 2, 0)\n temp_ssim, _ = compare_ssim(y_fake_np, y_real_np, multichannel=True, gaussian_weights=True, full=True)\n score_ssim_skimage += temp_ssim\n \n temp_ssim, _ = ssim(y_fake, y_real, kernel_size=11, kernel_sigma=1.5)\n score_ssim_minstar += temp_ssim\n \n score_msssim_minstar += multi_scale_ssim(y_fake, y_real, kernel_size=11, kernel_sigma=1.5)\n print('PSNR & SSIM scores of {} images are calculated.'.format(ind))\n \n utils.save_image(y_fake, os.path.join(generate_path, '{}-x.jpg'.format(name[:5] + args.model_type)))\n\n score_psnr /= test_image_num\n score_ssim_skimage /= test_image_num\n score_ssim_minstar /= test_image_num\n score_msssim_minstar /= test_image_num\n print('PSNR : {:.4f}, SSIM_skimage : {:.4f}, SSIM_minstar : {:.4f}, SSIM_msssim: {:.4f}'.format(\n score_psnr, score_ssim_skimage, score_ssim_minstar, score_msssim_minstar))", "def science_reduction(input_file):\n #name of the planet\n planet = input_file['exoplanet']\n #set original directory\n original_path = os.getcwd()\n save_path = input_file['save_path']\n data_path = input_file['data_path']\n #Change your directory to data diretory\n os.chdir(data_path)\n #list all flat images\n exoplanet = glob.glob(planet+'*.fits')\n print '\\nLoading exoplanet images \\nTotal of '+planet+'*.fits files = ',len(exoplanet),'\\nFiles = \\n'\n print exoplanet\n #if save_path exist, continue; if not, create.\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n #create a list of bias images and copy images to save_path\n print '\\nCopy science images to save_path directory to main reduction: ....'\n os.system('cp '+planet+'*.fits '+save_path)\n print '\\n .... done. \\n'\n #change to save_path\n os.chdir(save_path)\n #create the names for exoplanet science mages with bias subtracted\n bexoplanet = []\n for i in exoplanet:\n bexoplanet.append('B'+i)\n #verify if previous superbias exist\n if os.path.isfile('B'+i) == True:\n os.system('rm B'+i)\n print '\\n Will be create this images: \\n'\n print bexoplanet\n #exoplanet = string.join(exoplanet,',') #create the list string of exoplanet science images\n #bexoplanet = string.join(bexoplanet,',')#create the list string of bexoplanet science images\n print '\\nSubtracting superbias.fits from all '+planet+'*.fits images ....\\n'\n for i in range(len(exoplanet)):\n iraf.imarith(exoplanet[i],'-','superbias.fits',bexoplanet[i])\n use.update_progress((i+1.)/len(bexoplanet))\n print '\\n.... cleaning '+planet+'*.fits images\\n'\n os.system('rm '+planet+'*.fits')\n print '\\n Statistics of B'+planet+'*.fits images: \\n'\n for i in range(len(bexoplanet)):\n iraf.imstat(bexoplanet[i])\n print '\\nFlatfielding the B'+planet+'*.fits ....\\n'\n #create the names for exoplanet science images with bias subtracted and flatfielding\n abexoplanet = []\n for i in bexoplanet:\n abexoplanet.append('A'+i)\n #verify if previous superbias exist\n if os.path.isfile('A'+i) == True:\n os.system('rm A'+i)\n print '\\n Will be create this images: \\n'\n print abexoplanet\n #flatifielding images\n for i in range(len(abexoplanet)):\n iraf.imarith(bexoplanet[i],'/','superflat.fits',abexoplanet[i])\n use.update_progress((i+1.)/len(abexoplanet))\n # print '\\n.... cleaning B'+planet+'*.fits images\\n'\n # os.system('rm B'+planet+'*.fits')\n print '\\n Statistics of AB'+planet+'*.fits images: \\n'\n for i in range(len(abexoplanet)):\n iraf.imstat(abexoplanet[i])\n os.chdir(original_path) #change to save_path\n return", "def test_synthetic():\n background = Image.new('RGB', (100, 50), (125, 125, 125))\n red = Image.new('RGB', (10, 5), (255, 0, 0))\n green = Image.new('RGB', (5, 5), (0, 255, 0))\n blue = Image.new('RGB', (20, 5), (0, 0, 255))\n positions = [\n [0, 0],\n [9, 5],\n [99, 20]\n ]\n\n parameters = {\n 'data': [background, red, green, blue],\n 'positions': positions\n }\n\n synth = images.synthetic(parameters)\n\n assert_equal(synth.size, (100, 50))\n assert_equal(synth.getpixel((0, 0)), (255, 0, 0, 255))\n # if there was no overwrite of overlapping patches, this should be:\n # assert_equal(synth.getpixel((9, 5)), (255, 255, 0, 255))\n # but since green is pasted last it is:\n assert_equal(synth.getpixel((9, 5)), (0, 255, 0, 255))", "def test_no_shared_transformations():\n sdata = blobs()\n element_name = \"blobs_image\"\n test_space = \"test\"\n set_transformation(sdata.images[element_name], Identity(), to_coordinate_system=test_space)\n\n gen = sdata._gen_elements()\n for _, name, obj in gen:\n if name != element_name:\n assert test_space not in get_transformation(obj, get_all=True)\n else:\n assert test_space in get_transformation(obj, get_all=True)", "def teardown(self):\n self.wf.write_graph(dotfilename = self.test_path / \"wf_diagram\", graph2use=\"orig\")\n self.wf.run()\n \n\n self.helpers.plot_timeseries(\n self.export_path, self.sample_raw_image, \n highlight_ranges=self.highlight_ranges,\n num_figs=1\n )\n\n if self.plot_img:\n self.helpers.plot_4D_img_slice(self.export_path, \"sample_processed.png\")", "def process_tir_image(ds, data_res, t_thresh=-50, min_mcs_size=5000):\n ctt = (ds['tb']).squeeze()-273.15\n min_pix_nb = min_mcs_size / data_res**2\n\n max_pix_nb = 300000 / data_res**2 # this is to capture satellite artefacts that come in large contiguous stripes.\n labels, goodinds = mcs_define(ctt.values, t_thresh, minmax_area=[min_pix_nb, max_pix_nb]) # 7.7x7.7km = 64km2 per pix in gridsat? 83 pix is 5000km2\n dic = dictionary()\n #plt.figure()\n #plt.pcolormesh(labels)\n #plt.colorbar()\n #plt.show()\n for g in goodinds:\n\n if g==0:\n continue\n\n pos = np.where(labels==g)\n npos = np.where(labels!=g)\n datestr = str(int(ctt['time.year'].values))+'-'+str(int(ctt['time.month'].values)).zfill(2)+'-'+str(int(ctt['time.day'].values)).zfill(2)+'_'+\\\n str(int(ctt['time.hour'].values)).zfill(2)+':'+str(int(ctt['time.minute'].values)).zfill(2)\n \n dic['date'].append(datestr)\n dic['month'].append(int(ctt['time.month']))\n dic['hour'].append(int(ctt['time.hour']))\n dic['year'].append(int(ctt['time.year']))\n dic['day'].append(int(ctt['time.day']))\n dic['minute'].append(int(ctt['time.minute']))\n\n storm = ctt.copy()\n storm.values[npos] = np.nan\n tmin_pos = np.nanargmin(storm.values)\n tpos_2d = np.unravel_index(tmin_pos, storm.shape)\n \n latmin = np.nanmin(ctt.lat.values[pos[0]])\n latmax = np.nanmax(ctt.lat.values[pos[0]])\n lonmin = np.nanmin(ctt.lon.values[pos[1]])\n lonmax = np.nanmax(ctt.lon.values[pos[1]])\n dic['area'].append(np.sum(np.isfinite(storm.values))*data_res**2)\n dic['70area'].append(np.sum(storm.values<=-70)*data_res**2)\n dic['minlon'].append(lonmin)\n dic['minlat'].append(latmin)\n dic['maxlon'].append(lonmax)\n dic['maxlat'].append(latmax)\n dic['clon'].append(lonmin + (lonmax - lonmin)/2)\n dic['clat'].append(latmin + (latmax - latmin)/2)\n dic['tmin'].append(np.nanmin(storm))\n dic['tminlat'].append(float(ctt.lat[tpos_2d[0]].values))\n dic['tminlon'].append(float(ctt.lon[tpos_2d[1]].values))\n dic['tmean'].append(float(np.nanmean(storm)))\n dic['tp1'].append(float(np.nanpercentile(storm, 1)))\n dic['tp99'].append(float(np.nanpercentile(storm, 99)))\n dic['stormID'].append(datestr + '_' + str(g))\n dic['cloudMask'].append(labels==g)\n dic['tir'].append(storm.values)\n\n # for k in dic.keys():\n # print(k, len(dic[k]))\n return dic", "def __init__(self, data_dir, mode='train'):\n self.mode = mode\n self.data_dir = data_dir\n if self.mode == 'train':\n self.img_dir = os.path.join(self.data_dir, 'train')\n self.gt_dir = os.path.join(self.data_dir, 'train_gt')\n elif self.mode == 'test':\n self.img_dir = os.path.join(self.data_dir, 'test')\n self.gt_dir = os.path.join(self.data_dir, 'test_gt')\n\n ''' set up list of filenames for retrieval purposes'''\n self.filenames = [image_basename(f) for f in os.listdir(self.img_dir)]\n self.filenames.sort()\n self.gt_names = [image_basename(f) for f in os.listdir(self.gt_dir)]\n self.gt_names.sort()\n\n ''' set up image transform '''\n if self.mode == 'train':\n self.transform = transforms.Compose([\n transforms.Resize((1024, 1024)),\n # transforms.RandomHorizontalFlip(),\n # transforms.CenterCrop((512, 512)),\n transforms.ToTensor(), # (H,W,C)->(C,H,W), [0,255]->[0, 1.0] RGB->RGB\n # transforms.Normalize(MEAN, STD)\n ])\n self.mask_transform = transforms.Compose([\n # transforms.Pad(padding =50, fill =0),\n transforms.Resize((1024, 1024)),\n \n # transforms.RandomHorizontalFlip(),\n transforms.ToTensor(), # (H,W,C)->(C,H,W), [0,255]->[0, 1.0] RGB->RGB\n ])\n self.gt_transform = transforms.Compose([\n transforms.Resize((1024,1024)),\n transforms.ToTensor(),\n ])\n\n elif self.mode == 'test':\n self.transform = transforms.Compose([\n# transforms.Pad(padding =50, fill =1),\n transforms.Resize((1024, 1024)),\n # /!\\ to remove later\n #transforms.RandomHorizontalFlip(),\n transforms.ToTensor(), # (H,W,C)->(C,H,W), [0,255]->[0, 1.0] RGB->RGB\n # transforms.Normalize(MEAN, STD)\n ])\n self.mask_transform = transforms.Compose([\n # transforms.Pad(padding =50, fill =1),\n transforms.Resize((1024, 1024)), # /!\\ to remove later\n \n # transforms.RandomHorizontalFlip(),\n transforms.ToTensor(), # (H,W,C)->(C,H,W), [0,255]->[0, 1.0] RGB->RGB\n ])\n\n self.gt_transform = transforms.Compose([\n transforms.Resize((1024,1024)),\n transforms.ToTensor()\n ])", "def testTrivial(self):\n self.doTest(afwGeom.makeIdentityTransform())", "def testTrivial(self):\n self.doTest(afwGeom.makeIdentityTransform())", "def main():\n # ------------------------\n # 0 SETUP\n # ------------------------\n log.getLogger().setLevel(log.INFO)\n torch.autograd.set_detect_anomaly(True)\n parser = argparse.ArgumentParser(\n description='Image Style Transfer Training')\n parser = pl.Trainer.add_argparse_args(parser)\n # data\n parser.add_argument('-s', '--style-image', default='cropamara', type=str)\n parser.add_argument('-d', '--dataset',\n default=os.path.join('/', 'fridge', 'coco'), type=str)\n parser.add_argument('-t', '--to-style',\n default=os.path.join('images', 'test'), type=str)\n parser.add_argument('-m', '--model', default='transformer', type=str)\n parser.add_argument('-b', '--batch-size', default=1, type=int)\n parser.add_argument('-lr', '--learning-rate', default=0.001, type=float,\n help='initial learning rate')\n parser.add_argument(\"--image-size\", type=int, default=256,\n help=\"size of training images, default is 256\")\n\n parser.add_argument(\"--seed\", type=int, default=4747,\n help=\"random seed for training\")\n parser.add_argument(\"--content-weight\", type=float, default=1e5,\n help=\"weight for content-loss, default is 1e5\")\n parser.add_argument(\"--style-weight\", type=float, default=1e10,\n help=\"weight for style-loss, default is 1e10\")\n parser.add_argument(\"--weights\", type=str, default='flat',\n help=\"weight for layer losses, default is 1 each\")\n\n parser.add_argument(\"--content-image\", type=str, default='./images/content-images/gbimage2.jpeg',\n help=\"path to content image you want to stylize\")\n parser.add_argument(\"--content-scale\", type=float, default=None,\n help=\"factor for scaling down the content image\")\n parser.add_argument(\"--output-dir\", type=str, default='./images/output-images/',\n help=\"path for saving the output images\")\n\n parser.add_argument(\"-cp\", \"--checkpoint\", type=str, default='',\n help=\"path for starting weights\")\n parser.add_argument(\"--single\", action='store_true')\n\n parser.set_defaults(progress_bar_refresh_rate=5,\n gpus='0,1,2',\n max_epochs=50,\n overfit_pct=0.01,\n profiler=True,\n weights_summary='full',\n logger=False,\n distributed_backend=\"dp\")\n args = parser.parse_args()\n\n # ------------------------\n # 1 INIT LIGHTNING MODEL\n # ------------------------\n model = FastNeuralStyleSystem(args)\n if args.checkpoint is not '':\n print(f'loading checkpoint: {args.checkpoint}')\n FastNeuralStyleSystem.load_from_checkpoint(args.checkpoint)\n print(model.hparams)\n if args.single:\n print('single image optimize')\n model.to('cuda')\n model.prepare_data()\n model.optimize()\n print('Done single image')\n return\n # ------------------------\n # 2 INIT TRAINER\n # ------------------------\n trainer = pl.Trainer.from_argparse_args(args)\n trainer.checkpoint_callback = pl.callbacks.ModelCheckpoint(\n filepath='./trained_models',\n save_top_k=2,\n verbose=True,\n monitor='train_loss',\n mode='min',\n prefix=args.style_image\n )\n # ------------------------\n # 3 START TRAINING\n # ------------------------\n trainer.fit(model)\n\n import glob\n saved_images = glob.glob(\n f\"{args.output_dir}/{args.style_image}_steps_c_{args.content_weight}_s_{args.style_weight}/*png\")\n gif_images = []\n for step_img in saved_images:\n gif_images.append(imageio.imread(step_img))\n imageio.mimsave(os.path.join(temp_dir, '0_optimization.gif'), gif_images)", "def problem2():\n\n data = loaddata(\"data/bayerdata.npy\")\n r, g, b = separatechannels(data)\n\n img = assembleimage(r, g, b)\n display_image(img)\n\n img_interpolated = interpolate(r, g, b)\n display_image(img_interpolated)", "def detect(img, template):\r\n\r\n #detect threshold\r\n args = parse_args()\r\n threshold=dictornary(args)\r\n\r\n # detect edges of image\r\n \"\"\"prewitt_x = [[1, 0, -1]] * 3\r\n prewitt_y = [[1] * 3, [0] * 3, [-1] * 3]\r\n img_x = task1.detect_edges(img, prewitt_x, False)\r\n img_y = task1.detect_edges(img, prewitt_y, False)\r\n img_norm = task1.edge_magnitude(img_x, img_y)\r\n\r\n task1.write_image(task1.normalize(img_norm), \".//img_norm.jpg\")\r\n\r\n # detect edges in template\r\n\r\n temp_x = task1.detect_edges(template, prewitt_x, False)\r\n temp_y = task1.detect_edges(template, prewitt_y, False)\r\n template_norm = task1.edge_magnitude(temp_x, temp_y)\r\n\r\n task1.write_image(task1.normalize(template_norm), \".//template_norm.jpg\") \"\"\"\r\n\r\n img_norm = task1.normalize(img)\r\n template_norm = task1.normalize(template)\r\n\r\n coordinates = []\r\n temp_h = len(template_norm)\r\n temp_w = len(template_norm[0])\r\n\r\n rows = len(img_norm)\r\n cols = len(img_norm[0])\r\n\r\n output = [[0 for x in range(len(img_norm[0]))] for y in range(len(img_norm))]\r\n cropped_img = [[0 for x in range(temp_w)] for y in range(temp_h)]\r\n\r\n for i in range(rows):\r\n for j in range(cols):\r\n\r\n if ((i +temp_h) < rows and (j + temp_w < cols)):\r\n cropped_img = utils.crop(img_norm, i, i + temp_h, j, j + temp_w)\r\n\r\n\r\n img_mul_temp = utils.elementwise_mul(cropped_img, template_norm)\r\n sum = 0\r\n # sum of every elemnet in img_mul_temp\r\n for p in range(temp_h):\r\n for q in range(temp_w):\r\n sum += img_mul_temp[p][q]\r\n\r\n # squaring every element in denominator of image\r\n square_img = utils.elementwise_mul(cropped_img, cropped_img)\r\n numsum_img = 0\r\n for d in range(len(cropped_img)):\r\n for e in range(len(cropped_img[0])):\r\n numsum_img += square_img[d][e]\r\n\r\n # squaring every element in denominator of template\r\n square_temp = utils.elementwise_mul(template_norm, template_norm)\r\n numsum_temp = 0\r\n for k in range(temp_h):\r\n for l in range(temp_w):\r\n numsum_temp += square_temp[k][l]\r\n\r\n denominator = np.sqrt((numsum_img * numsum_temp))\r\n\r\n if (denominator != 0):\r\n output[i][j] = (sum / denominator)\r\n if (output[i][j] > threshold):\r\n coordinates.append([i, j])\r\n\r\n # TODO: implement this function.\r\n # raise NotImplementedError\r\n return coordinates", "def act(self, x: np.ndarray, t: int = None, noise: np.ndarray = None) -> np.ndarray:", "def test_empty_img():\n assert detected_boxes[-1] == ground_truth_boxes[-1]", "def main_ededge(dataset):\n Application.set_input_image_folder('TestData/BSR/BSDS500/data/images/' + dataset)\n\n # Application.delete_folder_appl_out()\n # Benchmarking.delete_folder_benchmark_out()\n\n Application.do_get_image_job(port_output_name='RAW')\n Application.do_grayscale_transform_job(port_input_name='RAW', port_output_name='GRAY_RAW')\n blur = Application.do_gaussian_blur_image_job(port_input_name='GRAY_RAW', sigma=0, kernel_size=9)\n\n list_to_eval_edge = []\n\n first_order_edge = [\n CONFIG.FILTERS.PIXEL_DIFF_3x3, CONFIG.FILTERS.PIXEL_DIFF_SEPARATED_3x3\n , CONFIG.FILTERS.PIXEL_DIFF_SEPARATED_5x5, CONFIG.FILTERS.PIXEL_DIFF_SEPARATED_7x7\n , CONFIG.FILTERS.PIXEL_DIFF_5x5, CONFIG.FILTERS.PIXEL_DIFF_7x7\n\n , CONFIG.FILTERS.SOBEL_3x3, CONFIG.FILTERS.SOBEL_5x5, CONFIG.FILTERS.SOBEL_7x7\n , CONFIG.FILTERS.SOBEL_DILATED_5x5, CONFIG.FILTERS.SOBEL_DILATED_7x7\n\n , CONFIG.FILTERS.PREWITT_3x3, CONFIG.FILTERS.PREWITT_5x5, CONFIG.FILTERS.PREWITT_7x7\n , CONFIG.FILTERS.PREWITT_DILATED_5x5, CONFIG.FILTERS.PREWITT_DILATED_7x7\n\n , CONFIG.FILTERS.KIRSCH_3x3, CONFIG.FILTERS.KIRSCH_5x5\n , CONFIG.FILTERS.KIRSCH_DILATED_5x5, CONFIG.FILTERS.KIRSCH_DILATED_7x7\n\n , CONFIG.FILTERS.KITCHEN_MALIN_3x3\n , CONFIG.FILTERS.KITCHEN_MALIN_DILATED_5x5, CONFIG.FILTERS.KITCHEN_MALIN_DILATED_7x7\n\n , CONFIG.FILTERS.KAYYALI_3x3\n , CONFIG.FILTERS.KAYYALI_DILATED_5x5, CONFIG.FILTERS.KAYYALI_DILATED_7x7\n\n , CONFIG.FILTERS.SCHARR_3x3, CONFIG.FILTERS.SCHARR_5x5\n , CONFIG.FILTERS.SCHARR_DILATED_5x5, CONFIG.FILTERS.SCHARR_DILATED_7x7\n\n , CONFIG.FILTERS.KROON_3x3\n , CONFIG.FILTERS.KROON_DILATED_5x5, CONFIG.FILTERS.KROON_DILATED_7x7\n\n , CONFIG.FILTERS.ORHEI_3x3, CONFIG.FILTERS.ORHEI_B_5x5\n , CONFIG.FILTERS.ORHEI_DILATED_5x5, CONFIG.FILTERS.ORHEI_DILATED_7x7\n ]\n\n for edge in first_order_edge:\n for gr_thr in [50]:\n for anc_thr in [10]:\n e1, e2, = Application.do_edge_drawing_mod_job(port_input_name=blur, operator=edge,\n gradient_thr=gr_thr, anchor_thr=anc_thr, scan_interval=1,\n max_edges=100, max_points_edge=100)\n list_to_eval_edge.append(e1 + '_L0')\n\n Application.create_config_file(verbose=False)\n Application.configure_save_pictures(job_name_in_port=False, ports_to_save='ALL')\n # Application.configure_show_pictures(ports_to_show=list_to_save, time_to_show=200)\n\n # Application.run_application()\n\n # Do bsds benchmarking\n # Be ware not to activate job_name_in_port in Application.configure_save_pictures\n # Benchmarking.run_bsds500_boundary_benchmark(input_location='Logs/application_results',\n # gt_location='TestData/BSR/BSDS500/data/groundTruth/' + dataset,\n # raw_image='TestData/BSR/BSDS500/data/images/' + dataset,\n # jobs_set=list_to_eval_edge, do_thinning=False)\n\n Utils.plot_first_cpm_results(prefix='EDGE_DRAWING_MOD_', level='L0', order_by='f1', name='ed_results',\n list_of_data=list_to_eval_edge, number_of_series=50,\n inputs=[''], self_contained_list=True, set_legend_left=False,\n suffix_to_cut_legend='_S_0_GRAY_RAW_L0',\n replace_list=[('EDGE_DRAWING_MOD_THR_50_ANC_THR_10_SCAN_1_', ''),\n ('SEPARATED_PIXEL_DIFFERENCE_', 'Separated Px Dif '),\n ('PIXEL_DIFFERENCE_', 'Pixel Dif '),\n ('PREWITT_', 'Prewitt '), ('KIRSCH_', 'Kirsch '), ('SOBEL_', 'Sobel '),\n ('SCHARR_', 'Scharr '), ('KROON_', 'Kroon '), ('ORHEI_V1_', 'Orhei '),\n ('ORHEI_', 'Orhei '),\n ('KITCHEN_', 'Kitchen '), ('KAYYALI_', 'Kayyali '),\n ('DILATED_', 'dilated '),\n ('_GAUSS_BLUR_K_9', '')],\n save_plot=True, show_plot=False, set_all_to_legend=False)\n\n # Utils.create_latex_cpm_table_list()\n\n Utils.close_files()", "def imageprepare(self, argv):\n im = Image.open(argv).convert('L')\n img = im.resize((28, 28), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n tv = list(img.getdata()) \n \n # normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\n tva = [(255 - x) * 1.0 / 255.0 for x in tv]\n return tva", "def test_without_target(self, X, y):\n try:\n resize_batch(X, y, 1.0, 'crop', resize_targets=False)\n except:\n pytest.fail('apply_progressive_resizing failed with y == None')", "def run(self):\n self.run_tasks()\n self.images = np.array(self.images)\n self.shapes.extend(self.images.shape[-2:])\n\n self.images = np.reshape(self.images, self.shapes)", "def evaluate(t, x, y):\n from PIL import Image\n im = Image.open(filename)\n duration = im.info[\"duration\"]*pq.ms if im.info[\"duration\"] is not 0 else 30*pq.ms\n\n Nt = t.shape[0]\n Nx = x.shape[2]\n Ny = y.shape[1]\n\n stim = np.zeros([Nt, Ny, Nx])\n t_map = (t.flatten().rescale(\"ms\") / duration).astype(int)\n t_map = t_map[1:] - t_map[:-1]\n for i, ti in enumerate(t_map):\n try:\n im.seek(im.tell()+ti)\n except EOFError:\n break\n frame = im.convert(\"L\").transpose(Image.FLIP_TOP_BOTTOM).resize((Ny, Nx))\n stim[i, :, :] = np.array(frame)\n stim[i, :, :] = 2 * ((stim[i, :, :] - stim[i, :, :].min()) / (stim[i, :, :].max() - stim[i, :, :].min())) - 1\n\n return stim", "def propagateImage(self, dryrun):\n pass", "def process(image):\n pass", "def main():\n base_dir = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n os.pardir,\n )\n default_output_path = os.path.join(base_dir, \"output\", \"out.png\")\n default_texture_path = os.path.join(base_dir, \"textures\", \"grid.png\")\n\n default_options = {\n \"resolution\": (1512, 762),\n \"texture_path\": default_texture_path,\n \"output_path\": default_output_path,\n \"iterations\": 200, # Increase this for good results\n \"camera_position\": [3.1, 1.570796, 0.],\n \"num_processes\": multi.cpu_count(),\n \"chunk_size\": 9000,\n \"gain\": 1,\n \"normalize\": 0,\n \"spin\": 0.7,\n }\n args = parse_args(default_options)\n\n output_path = os.path.dirname(args.output_path)\n if not os.path.exists(output_path):\n print(\"Error: Output path does not exist at:\")\n print(args.output_path)\n print(\"Create the directory or change the path then try again.\")\n print_help_and_exit()\n\n\n try:\n texture = spm.imread(args.texture_path)\n except FileNotFoundError as error:\n print(error)\n print(\"Error: Texture file not found at:\")\n print(args.texture_path)\n print_help_and_exit()\n\n # Convert to float to work in linear colour space\n texture = convert_image_to_float(texture)\n if not args.no_srgb:\n # Convert to sRGB before resizing for correct results\n srgbtorgb(texture)\n\n texture = convert_image_to_float(\n spm.imresize(texture, 2.0, interp=\"bicubic\"),\n )\n\n black_hole = KerrBlackHole(args.spin)\n raytracer = KerrRaytracer(\n black_hole,\n args.camera_position,\n texture,\n args.resolution,\n args.iterations,\n args.num_processes,\n args.chunk_size,\n shuffle=not args.disable_shuffle,\n )\n raytracer.generate_image()\n print(\"Raytracing Completed Succesfully.\")\n print(\n \"Total raytracing time:\",\n datetime.timedelta(seconds=(time.time() - raytracer.start_time)),\n )\n\n colour = post_process(raytracer.colour_buffer_preproc, args.gain, args.normalize)\n\n save_to_img(\n colour,\n args.output_path,\n args.resolution,\n srgb_out=not args.no_srgb,\n )", "def main():\n original = SimpleImage('images/mt-rainier.jpg')\n original.show()\n reflected = make_reflected('images/mt-rainier.jpg')\n reflected.show()", "def main():\n\n os.system(\"rm -rf images; mkdir images\")\n\n if (len(sys.argv) > 1):\n N = int(sys.argv[1])\n else:\n N = 10\n\n x_test = np.load(\"../../../../data/mnist/mnist_test_images.npy\")\n\n for i in range(N):\n r,c = random.randint(6,12), random.randint(6,12)\n g = np.zeros(r*c)\n for j in range(r*c):\n if (random.random() < 0.15):\n g[j] = 1\n g = g.reshape((r,c))\n g[:,0] = g[0,:] = g[:,-1] = g[-1,:] = 0\n\n img = np.zeros((28*r,28*c), dtype=\"uint8\")\n for x in range(r):\n for y in range(c):\n if (g[x,y] == 1):\n n = random.randint(0, x_test.shape[0])\n im = x_test[n]\n img[28*x:(28*x+28), 28*y:(28*y+28)] = im\n \n Image.fromarray(img).save(\"images/image_%04d.png\" % i)", "def __call__(self, img: torch.Tensor) -> torch.Tensor:\n return self._trafo(img)", "def sample_damaging(image):\r\n return crease_image(blotch_image(image, 100, True), 10, False)", "def test_one_image(self, img):\n return self.__image_pipeline(img)", "def test_image_task_early_fusion(self):\n args = BASE_ARGS.copy()\n args.update(IMAGE_ARGS)\n args.update(EARLY_FUSION_ARGS)\n\n valid, test = testing_utils.train_model(args)\n self.assertLessEqual(\n valid['ppl'], 8.6, 'failed to train image_seq2seq on image task'\n )", "def runauto(self, istart, nrows, rstep):\n self.ImageSolution=self.arcdisplay.autoidentify(istart=istart, nrows=nrows, rstep=rstep, oneline=False)", "def imageprepare(filename):\n img = Image.open(filename).convert('L')\n rect = img.getbbox()\n im = img.crop(rect)\n im.save(filename + '_pressprocessed.png')\n\n width = float(im.size[0])\n height = float(im.size[1])\n newImage = Image.new('L', (28, 28), (0)) #creates white canvas of 28x28 pixels\n if width > height: #check which dimension is bigger\n #Width is bigger. Width becomes 20 pixels.\n nheight = int(round((20.0/width*height),0)) #resize height according to ratio width\n if (nheight == 0): #rare case but minimum is 1 pixel\n nheight = 1\n # resize and sharpen\n img = im.resize((20,nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n wtop = int(round(((28 - nheight)/2),0)) #caculate horizontal pozition\n newImage.paste(img, (4, wtop)) #paste resized image on white canvas\n newImage.save(filename + '_final.png')\n else:\n #Height is bigger. Heigth becomes 20 pixels. \n nwidth = int(round((20.0/height*width),0)) #resize width according to ratio height\n if (nwidth == 0): #rare case but minimum is 1 pixel\n nwidth = 1\n # resize and sharpen\n img = im.resize((nwidth,20), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n wleft = int(round(((28 - nwidth)/2),0)) #caculate vertical pozition\n newImage.paste(img, (wleft, 4)) #paste resized image on white canvas\n newImage.save(filename + '_final.png')\n tv = list(newImage.getdata()) #get pixel values\n #normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\n tva = [ (x)*1.0/255.0 for x in tv] \n return tva", "def __call__(self, src, label):\n\n h, w, _ = src.shape\n # interp = np.random.randint(0, 5)\n img = timage.resize_short_within(src, self._short, self._max_size, interp=1)\n img, flips = timage.random_flip(img, px=0.5)\n img = img.astype(np.float32)\n\n if self.teacher_aug:\n target_image_1 = self.random_color_aug(img)\n else:\n target_image_1 = img\n target_image_2 = self.random_color_aug(img)\n\n # target_image_1 = mx.nd.image.to_tensor(target_image_1)\n target_image_1 = mx.nd.image.to_tensor(target_image_1)\n target_image_1 = mx.nd.image.normalize(target_image_1, mean=self._mean, std=self._std)\n\n target_image_2 = mx.nd.image.to_tensor(target_image_2)\n target_image_2 = mx.nd.image.normalize(target_image_2, mean=self._mean, std=self._std)\n\n return target_image_1, target_image_2", "def run(self, image):\n start = datetime.datetime.now()\n\n width, height = image.size\n resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)\n target_size = (int(resize_ratio * width), int(resize_ratio * height))\n resized_image = image.convert('RGB').resize(target_size, Image.ANTIALIAS)\n batch_seg_map = self.sess.run(\n self.OUTPUT_TENSOR_NAME,\n feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]})\n seg_map = batch_seg_map[0]\n\n end = datetime.datetime.now()\n\n diff = end - start\n print(\"Time taken to evaluate segmentation is : \" + str(diff))\n\n return resized_image, seg_map", "def resetTransformations():\n dislin.trfres()", "def imageprepare(argv):\r\n im = Image.open(argv).convert('L')\r\n width = float(im.size[0])\r\n height = float(im.size[1])\r\n newImage = Image.new('L', (28, 28), (255)) #creates white canvas of 28x28 pixels\r\n \r\n if width > height: #check which dimension is bigger\r\n #Width is bigger. Width becomes 20 pixels.\r\n nheight = int(round((20.0/width*height),0)) #resize height according to ratio width\r\n if (nheight == 0): #rare case but minimum is 1 pixel\r\n nheight = 1 \r\n # resize and sharpen\r\n img = im.resize((20,nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\r\n wtop = int(round(((28 - nheight)/2),0)) #caculate horizontal pozition\r\n newImage.paste(img, (4, wtop)) #paste resized image on white canvas\r\n else:\r\n #Height is bigger. Heigth becomes 20 pixels. \r\n nwidth = int(round((20.0/height*width),0)) #resize width according to ratio height\r\n if (nwidth == 0): #rare case but minimum is 1 pixel\r\n nwidth = 1\r\n # resize and sharpen\r\n img = im.resize((nwidth,20), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\r\n wleft = int(round(((28 - nwidth)/2),0)) #caculate vertical pozition\r\n newImage.paste(img, (wleft, 4)) #paste resized image on white canvas\r\n \r\n #newImage.save(\"sample.png\")\r\n\r\n tv = list(newImage.getdata()) #get pixel values\r\n \r\n #normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\r\n tva = [ (255-x)*1.0/255.0 for x in tv] \r\n #print(tva)\r\n return tva", "def demo(image, model_class, do_add_noise=True):\n Log.enable_output = True\n Log.set_log_max_depth(8)\n\n image = normalise(image)\n image = numpy.expand_dims(image, axis=0)\n image = numpy.expand_dims(image, axis=0)\n noisy = add_noise(image) if do_add_noise else image\n print(noisy.shape)\n\n # noisy = models.tensor(noisy)\n image = torch.tensor(image)\n\n model = model_class(\n nb_unet_levels=2,\n spacetime_ndim=2,\n )\n\n print(\"training starts\")\n\n start = time.time()\n n2t_train(noisy, model, nb_epochs=128)\n stop = time.time()\n print(f\"Training: elapsed time: {stop - start} \")\n\n noisy = torch.tensor(noisy)\n model.eval()\n model = model.cpu()\n print(f\"noisy tensor shape: {noisy.shape}\")\n # in case of batching we have to do this:\n start = time.time()\n denoised = model(noisy)\n stop = time.time()\n print(f\"inference: elapsed time: {stop - start} \")\n\n noisy = noisy.detach().numpy()[0, 0, :, :]\n image = image.detach().numpy()[0, 0, :, :]\n denoised = denoised.detach().numpy()[0, 0, :, :]\n\n image = numpy.clip(image, 0, 1)\n noisy = numpy.clip(noisy, 0, 1)\n denoised = numpy.clip(denoised, 0, 1)\n\n return calculate_print_psnr_ssim(image, noisy, denoised)\n\n # import napari\n #\n # viewer = napari.Viewer() # no prior setup needed\n # viewer.add_image(image, name='image')\n # viewer.add_image(noisy, name='noisy')\n # viewer.add_image(denoised, name='denoised')\n # napari.run()", "def process_image(self):\n pass", "def main() -> None:\n\n # Define file name\n file_name = define_file()\n\n # Open chosen image\n img = image.load_img(IMAGES + file_name, color_mode='grayscale')\n\n # Show user image\n plt.imshow(img)\n plt.show()\n\n # Convert image to array\n img_arr = image.img_to_array(img)\n img_arr = np.array([img_arr])\n img_arr = img_arr.astype(\"float32\") / 255.0\n\n # Classify image\n img_class = classification(img_arr)\n\n # Suggest user add noise to original image\n if img_class == ORIGINAL:\n while True:\n command = input('Seems like your image is original. Do you want to add noise? y/n: ')\n if command.strip().lower() in ('y', 'yes'):\n noisy_array = noise(img_arr)\n display(img_arr, noisy_array)\n img = image.array_to_img(noisy_array[0])\n img.save(IMAGES + file_name[:-4] + '_noise' + file_name[-4:])\n main()\n elif command.strip().lower() in ('n', 'no'):\n main()\n else:\n continue\n\n # Suggest user remove noise from noised image\n elif img_class == NOISED:\n while True:\n command = input('Seems like your image has noise. Do you want to remove noise? y/n: ')\n if command.strip().lower() in ('y', 'yes'):\n denoise_array = denoise_image(img_arr)\n display(img_arr, denoise_array)\n img = image.array_to_img(denoise_array[0])\n img.save(IMAGES + file_name[:-4] + '_denoised' + file_name[-4:])\n main()\n elif command.strip().lower() in ('n', 'no'):\n main()\n else:\n continue\n\n # Image denoised. Nothing to do\n else:\n print('Seems like your image denoised.')\n main()", "def applyARUNet(self):\n assert(self.files and len(self.files) > 0)\n\n # TODO: move this to the init method\n session_conf = tf.ConfigProto()\n session_conf.gpu_options.visible_device_list = self.gpu_device\n pred = None\n with tf.Session(graph=self.graph, config=session_conf) as sess:\n x = self.graph.get_tensor_by_name('inImg:0')\n predictor = self.graph.get_tensor_by_name('output:0')\n \n progress = getProgressBar()\n for i in progress(range(len(self.files))):\n# print(self.files[i])\n # img: numpy array (height x width x channels)\n # scipy's misc.imread is deprecated\n # TODO: switch maybe to opencv instead of pillow with its image\n # class overhead\n pil_img = Image.open(self.files[i]).convert('L') # grayscale\n img = np.array(pil_img)\n size = (int(img.shape[1]*self.scale),int(img.shape[0]*self.scale))\n small = np.array(pil_img.resize(size, resample=Image.BICUBIC))\n origsize = (img.shape[1],img.shape[0]) \n\n # TODO: can we actually put them in 1 or 2 batches?\n pred1 = self.runSession(sess, x, predictor, small)\n out = self.pruneBaselines(pred1, size=origsize)\n\n # try other orientation\n # TODO: think of a better check! \n # this one depends on the resolution and due to noise might\n # still pass...\n if self.test_orientation and \\\n np.count_nonzero(out) < 100: \n print('WARNING: no baseline found for img:'\n ' {}'.format(self.files[i]))\n print('rotate it now and try again...')\n # rotate 90 degree counter clock-wise\n small2 = rotate(small, 90, True)\n pred2 = self.runSession(sess, x, predictor, small2) \n origsize = (origsize[1],origsize[0])\n out2 = self.pruneBaselines(pred2, size=origsize)\n # check which direction has higher probability\n # Note: unfortunately the probas are similar high for 0 + 180,\n # as well as 90 and 270 degree, so we cannot test for these\n # orientations!\n # Note 2: raw probability map didnt work out for me, so lets do\n # it that way\n n_comp, _, stats1, _ =\\\n cv2.connectedComponentsWithStats(out.astype(np.uint8))\n n_comp2, _, stats2, _ =\\\n cv2.connectedComponentsWithStats(out2.astype(np.uint8))\n # test for area, assumption is that we get larger\n # mean/median/sum area if it's correctly rotated\n # TODO: might still be a bad test due to noise...\n stat1 = np.sum(stats1[1:,cv2.CC_STAT_AREA])\n stat2 = np.sum(stats2[1:,cv2.CC_STAT_AREA])\n if stat2 > stat1: \n print('rotation by 90 degree counter clockwise gives higher'\n ' probability (orig {} vs rot: {}) for file {}\\n'\n ' -> rotate this file (90 degree'\n ' counter clock-wise), too!'.format(stat1, stat2, self.files[i]))\n out = out2\n \n # small check if we have found a line at all\n if np.count_nonzero(out) < 50: # TODO: think of a better check\n print('WARNING: no baseline found for img:'\n ' {}'.format(self.files[i]))\n\n if self.to_line:\n out = baseToLine(out)\n\n # save it\n name = os.path.splitext(os.path.basename(self.files[i]))[0]\n suffix = self.out_suffix if self.out_suffix else ''\n path = os.path.join(self.outdir, '{}{}.png'.format(name,suffix))\n# print('save to: {}'.format(path))\n out = out * 255\n misc.imsave(path, out)\n \n return pred", "def run_image(image_path, lattice_size=35):\n im = plt.imread(image_path)[:, :, 2]\n im_pixels = _pixels(im)\n\n print('compression ratio is ', lattice_size**2 / float(im.size))\n\n # Hyperparameters.\n num_keypoints = 2\n hparams = tfl.CalibratedRtlHParams(\n num_keypoints=num_keypoints,\n num_lattices=1,\n lattice_rank=2,\n learning_rate=0.003,\n lattice_size=lattice_size)\n\n # Estimator.\n # input: coordinate of the pixel\n # output: value of the pixel\n feature_columns = [\n tf.feature_column.numeric_column('pixel_x'),\n tf.feature_column.numeric_column('pixel_y'),\n ]\n\n def keypoints_initializers():\n return tfl.uniform_keypoints_for_signal(\n num_keypoints,\n input_min=0.0,\n input_max=im_pixels.max(),\n output_min=0.0,\n output_max=lattice_size - 1\n )\n rtl_estimator = tfl.calibrated_rtl_regressor(\n feature_columns=feature_columns,\n hparams=hparams,\n keypoints_initializers_fn=keypoints_initializers\n )\n\n # Example input function.\n input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(\n x={\n 'pixel_x': im_pixels[:, 0],\n 'pixel_y': im_pixels[:, 1]\n },\n y=im_pixels[:, 2],\n batch_size=5000,\n num_epochs=15,\n shuffle=True)\n\n # Train!\n rtl_estimator.train(input_fn=input_fn)\n\n # Evaluate!\n eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(\n x={\n 'pixel_x': im_pixels[:, 0],\n 'pixel_y': im_pixels[:, 1]\n },\n y=im_pixels[:, 2],\n batch_size=5000,\n num_epochs=1,\n shuffle=True)\n print(rtl_estimator.evaluate(input_fn=eval_input_fn))\n\n return rtl_estimator", "def demo(image, name):\n\n # Log.set_log_max_depth(5)\n\n image = normalise(image.astype(np.float32))\n # noisy = add_noise(image, intensity=None, variance=0.1, sap=0, clip=False)\n noisy = random_noise(image, mode=\"gaussian\", var=0.1, seed=0, clip=False)\n noisier = random_noise(noisy, mode=\"gaussian\", var=0.1, seed=100, clip=False)\n\n generator = StandardFeatureGenerator(\n include_corner_features=True,\n include_scale_one=True,\n include_fine_features=True,\n include_spatial_features=True,\n )\n regressor = CBRegressor(\n patience=16, loss='l1', learning_rate=0.002, max_num_estimators=4096\n )\n\n it = ImageTranslatorFGR(feature_generator=generator, regressor=regressor)\n\n it.train(noisy, noisy, jinv=True)\n n2s_denoised = it.translate(noisy)\n\n it.exclude_center_feature = False\n it.train(noisier, noisy, jinv=False)\n denoised = it.translate(noisy)\n denoised_corrected = 2 * denoised - noisy\n\n # denoised2 = it.translate(it.translate(it.translate(denoised)))\n denoised2 = it.translate(denoised)\n\n image = numpy.clip(image, 0, 1)\n noisy = numpy.clip(noisy, 0, 1)\n n2s_denoised = numpy.clip(n2s_denoised, 0, 1)\n denoised_corrected = numpy.clip(denoised_corrected, 0, 1)\n denoised2 = numpy.clip(denoised2, 0, 1)\n\n psnr_noisy = psnr(image, noisy)\n ssim_noisy = ssim(image, noisy)\n\n psnr_n2s_denoised = psnr(image, n2s_denoised)\n ssim_n2s_denoised = ssim(image, n2s_denoised)\n\n psnr_denoised = psnr(image, denoised)\n ssim_denoised = ssim(image, denoised)\n\n psnr_denoised_corrected = psnr(image, denoised_corrected)\n ssim_denoised_corrected = ssim(image, denoised_corrected)\n\n psnr_denoised2 = psnr(image, denoised2)\n ssim_denoised2 = ssim(image, denoised2)\n\n print(\"noisy :\", psnr_noisy, ssim_noisy)\n print(\n \"denoised (classic_denoisers) :\",\n psnr_n2s_denoised,\n ssim_n2s_denoised,\n )\n print(\"denoised (noiser2noise) :\", psnr_denoised, ssim_denoised)\n print(\n \"denoised (noiser2noise corrected) :\",\n psnr_denoised_corrected,\n ssim_denoised_corrected,\n )\n print(\"denoised (x2) :\", psnr_denoised2, ssim_denoised2)\n\n Log.enable_output = False\n denoised_images = []\n for i in range(1, 32):\n psnr_denoised = psnr(image, numpy.clip(denoised, 0, 1))\n ssim_denoised = ssim(image, numpy.clip(denoised, 0, 1))\n psnr_sslos = psnr(numpy.clip(n2s_denoised, 0, 1), numpy.clip(denoised, 0, 1))\n ssim_sslos = ssim(numpy.clip(n2s_denoised, 0, 1), numpy.clip(denoised, 0, 1))\n print(f\"x{i} :\", psnr_sslos, ssim_sslos, psnr_denoised, ssim_denoised)\n\n denoised_images.append(numpy.clip(denoised, 0, 1))\n denoised = it.translate(denoised)\n\n import napari\n\n with napari.gui_qt():\n viewer = napari.Viewer()\n viewer.add_image(image, name='image')\n viewer.add_image(noisy, name='noisy')\n viewer.add_image(noisier, name='noisier')\n viewer.add_image(n2s_denoised, name='denoised (classic_denoisers)')\n viewer.add_image(denoised, name='denoised (noiser3noise)')\n viewer.add_image(denoised_corrected, name='denoised (noiser3noise corrected)')\n viewer.add_image(numpy.stack(denoised_images), name=f'denoised images')", "def evaluate(t, x, y):\n # TODO: fix normalization\n from PIL import Image\n\n Nt = t.shape[0]\n Nx = x.shape[2]\n Ny = y.shape[1]\n stim = np.zeros([Nt, Nx, Ny])\n\n for i, filename in enumerate(filenames):\n im = Image.open(filename).convert(\"L\").transpose(Image.FLIP_TOP_BOTTOM)\n t_start = delay + i * (delay + duration)\n t_stop = (i+1) * (duration + delay)\n stim += np.array(im.resize((Ny, Nx))) * (heaviside(t - t_start) - heaviside(t - t_stop))\n\n if stim.max() - stim.min() != 0:\n stim = 2 * ((stim - stim.min()) / (stim.max() - stim.min())) - 1\n return stim", "def applyMorphologicalCleaning(self, image):", "def __call__(self, images, targets):\n pass", "def main():\n\n # first figure: betas for each predictor\n fig, axes = plt.subplots(figsize=(8, 18), nrows=3)\n\n image_paths = {\n \"3T\": (\n f\"{PATHS['figures_misc']}/figure_3_images/\"\n f\"C1051_20161006_childVSall_depth_1.png\"\n ),\n \"7T\": (\n f\"{PATHS['figures_misc']}/figure_3_images/\"\n f\"C1051_20160212_childVSall_depth_1_sim2pt4.png\"\n ),\n \"7T_noise\": (\n f\"{PATHS['figures_misc']}/figure_3_images/\"\n f\"C1051_20160212_childVSall_depth_1_sim2pt4_noise.png\"\n ),\n }\n\n for ax, (_, image_path) in zip(axes, image_paths.items()):\n assert os.path.isfile(image_path)\n img = imread(image_path)\n img[np.where(np.sum(img, axis=2) == 0.0)] = 1.0\n\n ax.imshow(img[200:-100, 50:-50, :])\n ax.axis(\"off\")\n\n savefig(f\"{PATHS['figures']}/figure_3ac.png\")\n plt.close(fig)", "def testTinttsysNNSp(self):\n self._runTest('tinttsys', True, self.tsys_funcs.keys(), 'nearest,nearest')", "def test_scrubbing_wf_no_insert_na(\n artifact_dir, sample_raw_image, plot_img, request, helpers\n):\n\n test_path = helpers.create_test_dir(artifact_dir, request.node.name)\n scrubbed_path = test_path / \"scrubbed.nii.gz\"\n\n scrub_vector = [0, 1, 0, 0, 0, 0, 1, 0, 0, 0]\n\n wf = build_scrubbing_workflow(\n scrub_vector,\n import_path=sample_raw_image,\n insert_na=False,\n export_path=scrubbed_path,\n base_dir=test_path,\n crashdump_dir=test_path,\n )\n\n wf.write_graph(dotfilename=test_path / \"scrubbed_flow\", graph2use=\"colored\")\n\n wf.run()\n\n helpers.plot_timeseries(scrubbed_path, sample_raw_image)\n\n if plot_img:\n helpers.plot_4D_img_slice(scrubbed_path, \"scrubbed.png\")", "def testDetect(name = \"smokey.gif\", amount = 20):\n image = Image(name)\n print(\"Close the image window to see the transformation\")\n image.draw()\n image2 = detectEdges(image, amount)\n image2.draw()", "def main_proc(self, ds=5.0):\n\n # Preprocessing\n # downsampling edge pixels\n pcd_t_ds = self.pcd_t.voxel_down_sample(voxel_size=ds)\n pcd_t_ds, center_t = centering(pcd_t_ds)\n self.result_id = 0\n reg_trans = None\n\n self.pcd_registrated = list() # results of ICP\n for i in range(len(self.pcd_s)):\n self.pcd_s[i].paint_uniform_color([0.0, 0.0, 1.0])\n pcd_s_ds = self.pcd_s[i].voxel_down_sample(voxel_size=ds)\n\n pcd_s_ds, center_s = centering(pcd_s_ds)\n ts_c = np.identity(4)\n ts_c[:3, 3] = -center_s\n tt_c = np.identity(4)\n tt_c[:3, 3] = center_t\n\n # Registration by ICP algorithm\n reg = ICPRegistration(pcd_s_ds, pcd_t_ds)\n reg.set_distance_tolerance(ds * 0.5)\n mse, rt = reg.registration()\n if mse < self.mse:\n self.result_id = i\n print(\"Init:\", self.initial_angles[i], self.mse, \"==>\", mse)\n self.mse = mse\n reg_trans = rt\n TT = np.dot(reg_trans, ts_c)\n self.trans_final = np.dot(tt_c, TT)\n\n # check transformation progress\n \"\"\"\n hoge = copy.deepcopy(pcd_s_ds)\n hoge.paint_uniform_color([1,0,0])\n mesh_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=100., origin=[0.0,0.0,0.0])\n o3d.visualization.draw_geometries( [mesh_frame,hoge, pcd_t_ds], width=640, height=500)\n hoge.transform( rt )\n o3d.visualization.draw_geometries( [mesh_frame,hoge, pcd_t_ds], width=640, height=500)\n \"\"\"\n\n self.pcds = reg.pcds\n self.d = reg.d\n # Get registration result\n # translation[x,y] and rotation\n _, _, rotate = mat2rpy(self.trans_final)\n print(\"Initial angle is:\", self.initial_angles[self.result_id])\n rotate = np.radians(self.initial_angles[self.result_id]) + rotate\n translation = self.trans_final[:2, 3]\n\n # Choose the direction that results in the smaller rotation\n if rotate > tau:\n rotate -= tau\n elif rotate < 0:\n rotate += tau\n\n self.rotate = rotate\n return self.rotate, translation, self.mse", "def test(one=True, training=True, detector_debug=False):\n total = 0\n imgs = []\n if one:\n\n print(\"Detecting:\")\n file_sign = \"./Data/Reglamentarias/STC-RG-3.jpg\"\n sign = cv2.imread(file_sign, 1)\n d = Detector(sign, show=True, debug=detector_debug)\n s, th = d.detect()\n seg = Segmenter(s)\n\n seg.keypoints()\n seg.descriptors()\n res = np.concatenate((seg.origi, seg.th, seg.img, seg.kpimg), axis=1)\n cv2.imshow(\"res\", res)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n if training:\n print (\"Training\")\n for imagePath in paths.list_images(\"./training/PV/\"):\n print (imagePath)\n sign = cv2.imread(imagePath, 1)\n\n seg = Segmenter(sign)\n seg.watershed()\n seg.keypoints()\n res = np.concatenate((seg.origi, seg.th, seg.img, seg.kpimg), axis=1)\n cv2.imshow(\"res\", res)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n else:\n if not one:\n for i in range(1, 90):\n # file = \"./Data/Preventivas/STC-PV-\"+str(i)+\".jpg\"\n file_sign = \"./Data/Reglamentarias/STC-RG-\" + str(i) + \".jpg\"\n # file = \"./Data/Mixtas/STC-MX-\"+ str(i) +\".jpg\"\n sign = cv2.imread(file_sign, 1)\n d = Detector(sign, show=False)\n s, th = d.detect()\n if s is not None:\n total += 1\n imgs.append((i, s, th))\n\n print (\"Detected:\", str(total))\n\n for i in range(1, len(imgs)-1):\n seg = Segmenter(imgs[i][1])\n seg.watershed()\n seg.keypoints()\n res = np.concatenate((seg.origi, seg.th, seg.img, seg.kpimg), axis=1)\n cv2.imshow(\"img\"+str(imgs[i][0]), res)\n print (str(imgs[i][0]))\n\n cv2.waitKey(0)\n cv2.destroyAllWindows()", "def test_image_task(self):\n args = BASE_ARGS.copy()\n args.update(IMAGE_ARGS)\n\n valid, test = testing_utils.train_model(args)\n self.assertLessEqual(\n valid['ppl'], 8.6, 'failed to train image_seq2seq on image task'\n )", "def skeletonize(data,subscriber = 0):\n nx,ny=data.shape\n #zero padding\n image = zeros((nx+2,ny+2),'int16')\n image[:,:] = IP.BACKGROUND_COLOR\n image[1:-1,1:-1]=data\n\n erosionComplete = False\n runs = 0\n erosionComplete = False\n runs = 0\n isCorner = zeros((nx+2,ny+2),'bool')\n while not erosionComplete:\n ruleI = (image == IP.FEATURE_COLOR)\n XFeat, YFeat = ruleI.nonzero()\n numberFeatures = len(XFeat)\n erosedPixels = 0\n if runs == 0:\n progressbar = progress(numberFeatures)\n neighbourhood = zeros((nx+2,ny+2,3),'int16')\n for x,y in zip(XFeat.tolist(),YFeat.tolist()):\n fingerprint = checkNeighbours(image[x-1:x+2,y-1:y+2])\n neighbourhood[x,y,:]=numpy.array(fingerprint)\n\n ruleII = neighbourhood[:,:,1]>=1\n ruleIII = neighbourhood[:,:,0]> 1\n border = (ruleI & ruleII & ruleIII)\n #ruleIV and ruleV\n XBord, YBord = border.nonzero()\n XBord2 = []\n YBord2 = []\n for x,y in zip(XBord.tolist(),YBord.tolist()):\n if checkTransitions(image[x-1:x+2,y-1:y+2]) <= 1 and not isCorner[x,y]:\n image[x,y] = IP.BACKGROUND_COLOR\n erosedPixels += 1\n subscriber %= progressbar.step()\n else:\n XBord2.append(x)\n YBord2.append(y)\n for x,y in zip(XBord2,YBord2):\n if checkTransitions(image[x-1:x+2,y-1:y+2]) <= 1 and not isCorner[x,y]:\n image[x,y] = IP.BACKGROUND_COLOR\n erosedPixels += 1\n subscriber %= progressbar.step()\n if erosedPixels == 0:\n erosionComplete = True\n subscriber %= 100.\n else:\n xCorn, yCorn = (neighbourhood[:,:,2] > 0 ).nonzero()\n for x,y in zip(xCorn.tolist(),yCorn.tolist()):\n if neighbourhood[x,y,2] == 1:\n isCorner[x+1,y-1] = True\n elif neighbourhood[x,y,2] == 2:\n isCorner[x+1,y+1] = True\n elif neighbourhood[x,y,2] == 3:\n isCorner[x-1,y+1] = True\n elif neighbourhood[x,y,2] == 4:\n isCorner[x-1,y-1] = True\n runs += 1\n return image[1:-1,1:-1].copy()", "def run_visualization(image):\n # for image in images:\n try:\n with tf.gfile.FastGFile(image, 'rb') as f:\n jpeg_str = f.read()\n original_im = Image.open(BytesIO(jpeg_str))\n except IOError:\n print('Cannot retrieve image')\n return\n\n # print('running deeplab on image {0}'.format(image))\n resized_im, seg_map = MODEL.run(original_im)\n seg_map = seg_map.astype(np.uint8) * 255\n resized_im = np.array(resized_im, dtype=np.uint8)\n resized_im = cv2.cvtColor(resized_im, cv2.COLOR_BGR2RGB)\n # vis_segmentation(resized_im, seg_map,FULL_COLOR_MAP ,LABEL_NAMES)\n overlay_image = cv2.addWeighted(resized_im, 0.8, cv2.merge((seg_map * 0, seg_map, seg_map * 0)), 0.2, 0)\n # time.sleep(params.SEC_BETWEEN_PREDICTION)\n\n return resized_im, seg_map, overlay_image.astype(np.uint8)", "def testPosterize(name = \"smokey.gif\", triple = (0,0,0)):\n image = Image(name)\n print(\"Close the image window to see the transformation\")\n image.draw()\n posterize(image, triple)\n image.draw()", "def transform(self, previousimage):", "def main():\n print_banner()\n params = read_steering()\n s, x, y, cur, theta = build_kinoshita()\n s, x, y, cur, theta = read_centerline(s, x, y, cur, theta)\n s, x, y, cur, theta = extend_centerline(s, x, y, cur, theta)\n for t in range(TSTEPS+1):\n cur, theta = tan2curv(s, x, y)\n cur_ori = np.copy(cur)\n cur = filter_curvature(cur, t)\n cur_flt = np.copy(cur)\n cur = lag(s, cur, t)\n cur_lag = np.copy(cur)\n beck_bed = build_beck(cur, s, t)\n allxyz = offset_all(x, y, beck_bed, t)\n if t == 0:\n write_xyz_file(allxyz)\n write_mesh_file(allxyz, beck_bed)\n oxbowxList, oxbowyList = [], []\n centerlinexList, centerlineyList = [], []\n if np.mod(t, GPRINT) == 0:\n centerlinexList.append(x)\n centerlineyList.append(y)\n mf.make_figure(x, y, allxyz, cur_ori, cur_flt, cur_lag, s, beck_bed,\n params, t, oxbowxList, oxbowyList, centerlinexList, centerlineyList)\n if t == TSTEPS:\n break\n s, x, y = migration(s, x, y, cur_flt, cur_lag, theta, t)\n s, x, y, oxbowx, oxbowy, found_cutoff = cutoff(s, x, y)\n s, x, y = smooth_centerline(x, y)\n s, x, y, cur, theta = resample_centerline(s, x, y)\n if found_cutoff:\n oxbowxList.append(oxbowx)\n oxbowyList.append(oxbowy)\n make_gif()\n job_done()", "def train(args):\n # Create the data loader\n loader = sunnerData.DataLoader(\n dataset = sunnerData.ImageDataset(\n root = [[args.train]],\n transforms = transforms.Compose([\n \n# transforms.RandomCrop(720,720)\n# transforms.RandomRotation(45)\n# transforms.RandomHorizontalFlip(), \n# transforms.ColorJitter(brightness=0.5, contrast=0.5),\n \n\n sunnerTransforms.Resize(output_size = (args.H, args.W)),\n #transforms.RandomCrop(512,512)\n sunnerTransforms.ToTensor(),\n sunnerTransforms.ToFloat(),\n # sunnerTransforms.Transpose(),\n sunnerTransforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),\n ])\n ), batch_size = args.batch_size, shuffle = True, num_workers = 2\n )\n loader = sunnerData.IterationLoader(loader, max_iter = args.n_iter)\n\n # Create the model\n model = GANomaly2D(r = args.r, device = args.device)\n model.IO(args.resume, direction = 'load')\n model.train()\n \n # Train!\n bar = tqdm(loader)\n for i, (normal_img,) in enumerate(bar):\n model.forward(normal_img)\n model.backward()\n loss_G, loss_D = model.getLoss()\n bar.set_description(\"Loss_G: \" + str(loss_G) + \" loss_D: \" + str(loss_D))\n bar.refresh()\n if i % args.record_iter == 0:\n model.eval()\n with torch.no_grad():\n z, z_ = model.forward(normal_img)\n img, img_ = model.getImg()\n visualizeEncoderDecoder(img, img_, z, z_,i)\n model.train()\n model.IO(args.det, direction = 'save')\n model.IO(args.det, direction = 'save')", "def tessellate(self):\n\n self.tessellation = Delaunay(self.grid)", "def test(self):\n # Load the trained models.\n self.train_ev_ea()\n self.restore_model(self.test_iters)\n self.encoder_v.eval()\n # Set data loader.\n data_loader = self.data_loader\n empty = torch.FloatTensor(1, 3,self.image_size,self.image_size).to(self.device) \n empty.fill_(1)\n noise = torch.FloatTensor(self.batch_size, self.nz_num)\n noise = noise.to(self.device)\n step = 0\n data_loader.test.reinitialize_index()\n with torch.no_grad():\n while True:\n try:\n x_real, wrong_images, attributes, _, label_org = data_loader.test.next_batch_test(self.batch_size,10)\n except:\n break\n x_real = x_real.to(self.device) \n label_org = label_org.to(self.device)\n attributes = attributes.to(self.device)\n \n \n ev_x = self.encoder_v(x_real)\n noise.normal_(0, 1)\n ea_a = self.encoder_a(attributes, noise)\n \n out_A2B_results = [empty]\n out_A2B_results_a = [empty]\n\n for idx1 in range(label_org.size(0)):\n out_A2B_results.append(x_real[idx1:idx1+1])\n out_A2B_results_a.append(x_real[idx1:idx1+1])\n\n for idx2 in range(label_org.size(0)):\n out_A2B_results.append(x_real[idx2:idx2+1])\n out_A2B_results_a.append(x_real[idx2:idx2+1])\n \n for idx1 in range(label_org.size(0)):\n x_fake = self.decoder(self.encoder(x_real[idx2:idx2+1]), ev_x[idx1:idx1+1])\n out_A2B_results.append(x_fake)\n \n x_fake_a = self.decoder(self.encoder(x_real[idx2:idx2+1]), ea_a[idx1:idx1+1])\n out_A2B_results_a.append(x_fake_a)\n results_concat = torch.cat(out_A2B_results)\n x_AB_results_path = os.path.join(self.result_dir, '{}_x_AB_results_test_v.jpg'.format(step+1)) \n save_image(self.denorm(results_concat.data.cpu()), x_AB_results_path, nrow=label_org.size(0)+1,padding=0)\n print('Saved real and fake images into {}...'.format(x_AB_results_path))\n \n results_concat = torch.cat(out_A2B_results_a)\n x_AB_results_path = os.path.join(self.result_dir, '{}_x_AB_results_test_a.jpg'.format(step+1)) \n save_image(self.denorm(results_concat.data.cpu()), x_AB_results_path, nrow=label_org.size(0)+1,padding=0)\n print('Saved real and fake images into {}...'.format(x_AB_results_path))\n \n step += 1", "def test_time_optimize(args, model, optim, imgs, poses, hwf, bound):\n pixels = imgs.reshape(-1, 3)\n\n rays_o, rays_d = get_rays_shapenet(hwf, poses)\n rays_o, rays_d = rays_o.reshape(-1, 3), rays_d.reshape(-1, 3)\n\n num_rays = rays_d.shape[0]\n for step in range(args.tto_steps):\n indices = torch.randint(num_rays, size=[args.tto_batchsize])\n raybatch_o, raybatch_d = rays_o[indices], rays_d[indices]\n pixelbatch = pixels[indices] \n t_vals, xyz = sample_points(raybatch_o, raybatch_d, bound[0], bound[1],\n args.num_samples, perturb=True)\n \n optim.zero_grad()\n rgbs, sigmas = model(xyz)\n colors = volume_render(rgbs, sigmas, t_vals, white_bkgd=True)\n loss = F.mse_loss(colors, pixelbatch)\n loss.backward()\n optim.step()", "def detectSpots(img, detectSpotsParameter = None, correctIlluminationParameter = None, removeBackgroundParameter = None,\n filterDoGParameter = None, findExtendedMaximaParameter = None, detectCellShapeParameter = None,\n verbose = False, out = sys.stdout, **parameter):\n\n timer = Timer();\n \n # normalize data -> to check\n #img = img.astype('float');\n #dmax = 0.075 * 65535;\n #ids = img > dmax;\n #img[ids] = dmax;\n #img /= dmax; \n #out.write(timer.elapsedTime(head = 'Normalization'));\n #img = dataset[600:1000,1600:1800,800:830];\n #img = dataset[600:1000,:,800:830];\n \n # correct illumination\n correctIlluminationParameter = getParameter(detectSpotsParameter, \"correctIlluminationParameter\", correctIlluminationParameter);\n img1 = img.copy();\n img1 = correctIllumination(img1, correctIlluminationParameter = correctIlluminationParameter, verbose = verbose, out = out, **parameter) \n\n # background subtraction in each slice\n #img2 = img.copy();\n removeBackgroundParameter = getParameter(detectSpotsParameter, \"removeBackgroundParameter\", removeBackgroundParameter);\n img2 = removeBackground(img1, removeBackgroundParameter = removeBackgroundParameter, verbose = verbose, out = out, **parameter) \n \n # mask\n #timer.reset();\n #if mask == None: #explicit mask\n # mask = img > 0.01;\n # mask = binary_opening(mask, self.structureELement('Disk', (3,3,3)));\n #img[img < 0.01] = 0; # masking in place # extended maxima\n #out.write(timer.elapsedTime(head = 'Mask')); \n \n #DoG filter\n filterDoGParameter = getParameter(detectSpotsParameter, \"filterDoGParameter\", filterDoGParameter);\n dogSize = getParameter(filterDoGParameter, \"size\", None);\n #img3 = img2.copy(); \n img3 = filterDoG(img2, filterDoGParameter = filterDoGParameter, verbose = verbose, out = out, **parameter);\n \n # normalize \n # imax = img.max();\n # if imax == 0:\n # imax = 1;\n # img /= imax;\n \n # extended maxima\n findExtendedMaximaParameter = getParameter(detectSpotsParameter, \"findExtendedMaximaParameter\", findExtendedMaximaParameter);\n hMax = getParameter(findExtendedMaximaParameter, \"hMax\", None);\n imgmax = findExtendedMaxima(img3, findExtendedMaximaParameter = findExtendedMaximaParameter, verbose = verbose, out = out, **parameter);\n \n #center of maxima\n if not hMax is None:\n centers = findCenterOfMaxima(img, imgmax, verbose = verbose, out = out, **parameter);\n else:\n centers = findPixelCoordinates(imgmax, verbose = verbose, out = out, **parameter);\n \n #cell size detection\n detectCellShapeParameter = getParameter(detectSpotsParameter, \"detectCellShapeParameter\", detectCellShapeParameter);\n cellShapeThreshold = getParameter(detectCellShapeParameter, \"threshold\", None);\n if not cellShapeThreshold is None:\n \n # cell shape via watershed\n imgshape = detectCellShape(img2, centers, detectCellShapeParameter = detectCellShapeParameter, verbose = verbose, out = out, **parameter);\n \n #size of cells \n csize = findCellSize(imgshape, maxLabel = centers.shape[0], out = out, **parameter);\n \n #intensity of cells\n cintensity = findCellIntensity(img, imgshape, maxLabel = centers.shape[0], verbose = verbose, out = out, **parameter);\n\n #intensity of cells in background image\n cintensity2 = findCellIntensity(img2, imgshape, maxLabel = centers.shape[0], verbose = verbose, out = out, **parameter);\n \n #intensity of cells in dog filtered image\n if dogSize is None:\n cintensity3 = cintensity2;\n else:\n cintensity3 = findCellIntensity(img3, imgshape, maxLabel = centers.shape[0], verbose = verbose, out = out, **parameter);\n \n if verbose:\n out.write(timer.elapsedTime(head = 'Spot Detection') + '\\n');\n \n #remove cell;s of size 0\n idz = csize > 0;\n \n return ( centers[idz], numpy.vstack((cintensity[idz], cintensity3[idz], cintensity2[idz], csize[idz])).transpose()); \n \n \n else:\n #intensity of cells\n cintensity = findIntensity(img, centers, verbose = verbose, out = out, **parameter);\n\n #intensity of cells in background image\n cintensity2 = findIntensity(img2, centers, verbose = verbose, out = out, **parameter);\n \n #intensity of cells in dog filtered image\n if dogSize is None:\n cintensity3 = cintensity2;\n else:\n cintensity3 = findIntensity(img3, centers, verbose = verbose, out = out, **parameter);\n\n if verbose:\n out.write(timer.elapsedTime(head = 'Spot Detection') + '\\n');\n \n return ( centers, numpy.vstack((cintensity, cintensity3, cintensity2)).transpose());", "def test_random_single_image():\n\n shap.image_plot(np.random.randn(3, 20, 20), np.random.randn(3, 20, 20), show=False)", "def run_predictive(op) -> None:\n\n try:\n img = Image.open(op['input'])\n except Exception as e:\n print(e)\n sys.exit(1)\n\n algo.predictive.run(op)", "def test_replace_image(self):\n pass", "def test_resize_noop(self, X, y, mode):\n Xc, _ = resize_batch(X, y, 1.0, mode, resize_targets=False)\n assert X is Xc", "def run(self, image):\n # width, height = image.size\n # resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)\n # target_size = (int(resize_ratio * width), int(resize_ratio * height))\n target_size = (self.INPUT_SIZE, self.INPUT_SIZE)\n resized_image = image.convert('RGB').resize(target_size, Image.ANTIALIAS)\n net_image = resized_image\n if params.HZ_preprocess_activate:\n net_image = params.image_preprocess_func(resized_image)\n net_image = np.expand_dims(net_image, axis=-1)\n batch_seg_map = self.sess.run(\n self.OUTPUT_TENSOR_NAME,\n feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(net_image)]})\n seg_map = batch_seg_map[0]\n return resized_image, seg_map", "def testComputeImage(self):\n for fiberId in self.detMap.fiberId:\n for fraction in (0.1, 0.5, 0.9):\n yy = self.synthConfig.height*fraction\n if yy == int(yy):\n # Ensure we have a non-integer pixel position,\n # so computeImage and computeKernelImage differ\n yy += 0.5\n wavelength = self.detMap.findWavelength(fiberId, yy)\n image = self.psf.computeImage(fiberId, wavelength)\n kernel = self.psf.computeKernelImage(fiberId, wavelength)\n\n # Image should have xy0 set somewhere in the middle of the larger image\n self.assertNotEqual(image.getX0(), 0)\n self.assertNotEqual(image.getY0(), 0)\n\n # Kernel should have xy0 set to the half-size\n halfSize = (self.size - 1)//2\n self.assertEqual(kernel.getX0(), -halfSize)\n self.assertEqual(kernel.getY0(), -halfSize)\n\n # Centroid on image should be at the point of interest\n xx, yy = self.detMap.findPoint(fiberId, wavelength)\n centroid = calculateCentroid(image)\n self.assertFloatsAlmostEqual(xx, centroid.x, atol=2.0e-2)\n self.assertFloatsAlmostEqual(yy, centroid.y, atol=2.0e-2)\n\n # Centroid on kernel should be zero\n centroid = calculateCentroid(kernel)\n self.assertFloatsAlmostEqual(centroid.x, 0.0, atol=1.0e-7)\n self.assertFloatsAlmostEqual(centroid.y, 0.0, atol=1.0e-7)", "def test_transformer2d_single_step_e2e(self):\n\n problem_object = allen_brain.Img2imgAllenBrainDim8to32()\n\n with TemporaryDirectory() as tmp_dir:\n\n mock_raw_data(tmp_dir, raw_dim=256, num_images=100)\n\n with TemporaryDirectory() as data_dir:\n\n problem_object.generate_data(data_dir, tmp_dir)\n\n input_xy_dim = problem_object.input_dim\n target_xy_dim = problem_object.output_dim\n num_channels = problem_object.num_channels\n\n hparams = image_transformer_2d.img2img_transformer2d_tiny()\n hparams.data_dir = data_dir\n\n p_hparams = problem_object.get_hparams(hparams)\n\n model = image_transformer_2d.Img2imgTransformer(\n hparams, tf.estimator.ModeKeys.TRAIN, p_hparams\n )\n\n @tfe.implicit_value_and_gradients\n def loss_fn(features):\n _, losses = model(features)\n return losses[\"training\"]\n\n batch_size = 1\n train_dataset = problem_object.dataset(Modes.TRAIN, data_dir)\n train_dataset = train_dataset.repeat(None).batch(batch_size)\n\n optimizer = tf.train.AdamOptimizer()\n\n example = tfe.Iterator(train_dataset).next()\n example[\"targets\"] = tf.reshape(example[\"targets\"],\n [batch_size,\n target_xy_dim,\n target_xy_dim,\n num_channels])\n _, gv = loss_fn(example)\n optimizer.apply_gradients(gv)\n\n model.set_mode(Modes.EVAL)\n dataset = problem_object.dataset(Modes.EVAL, data_dir)\n\n example = tfe.Iterator(dataset).next()\n example[\"inputs\"] = tf.reshape(example[\"inputs\"],\n [1,\n input_xy_dim,\n input_xy_dim,\n num_channels])\n example[\"targets\"] = tf.reshape(example[\"targets\"],\n [1,\n target_xy_dim,\n target_xy_dim,\n num_channels])\n\n predictions, _ = model(example)\n\n self.assertEqual(predictions.numpy().shape,\n (1,\n target_xy_dim,\n target_xy_dim,\n num_channels,\n 256))", "def preprocessing(image, smooth_size, folder):\n from skimage.restoration import denoise_tv_chambolle\n \n dim = int(image.shape[0] / 50.)\n smoothed = rank.median(image, disk(smooth_size))\n #smoothed = denoise_tv_chambolle(image, weight=0.002)\n smoothed = rank.enhance_contrast(smoothed, disk(smooth_size))\n \n pl.subplot(2, 3, 1)\n pl.title(\"after median\")\n pl.imshow(smoothed)\n pl.gray()\n # If after smoothing the \"dot\" disappears\n # use the image value\n \n # TODO: wat do with thresh?\n try:\n im_max = smoothed.max()\n thresh = threshold_otsu(image)\n except:\n im_max = image.max()\n thresh = threshold_otsu(image)\n\n \n if im_max < thresh:\n labeled = np.zeros(smoothed.shape, dtype=np.int32)\n \n else:\n binary = smoothed > thresh\n \n # TODO: this array size is the fault of errors\n bin_open = binary_opening(binary, np.ones((dim, dim)), iterations=5)\n bin_close = binary_closing(bin_open, np.ones((5,5)), iterations=5)\n \n pl.subplot(2, 3, 2)\n pl.title(\"threshold\")\n pl.imshow(binary, interpolation='nearest')\n pl.subplot(2, 3, 3)\n pl.title(\"opening\")\n pl.imshow(bin_open, interpolation='nearest')\n pl.subplot(2, 3, 4)\n pl.title(\"closing\")\n pl.imshow(bin_close, interpolation='nearest')\n \n distance = ndimage.distance_transform_edt(bin_open)\n local_maxi = peak_local_max(distance,\n indices=False, labels=bin_open)\n \n markers = ndimage.label(local_maxi)[0]\n \n labeled = watershed(-distance, markers, mask=bin_open)\n pl.subplot(2, 3, 5)\n pl.title(\"label\")\n pl.imshow(labeled)\n #pl.show()\n pl.savefig(folder)\n pl.close('all')\n\n #misc.imsave(folder, labeled)\n# labels_rw = random_walker(bin_close, markers, mode='cg_mg')\n# \n# pl.imshow(labels_rw, interpolation='nearest')\n# pl.show()\n\n return labeled", "def test_full_setup(n):\n for x in range(n):\n for y in range(n):\n Stitch(x,y)\n Stitch.stitches[(x,y)].vital = True if round(rnd.random()) == 1 else False", "def setup():\n img = Image.new('RGB', (10, 20))\n img.putpixel((5, 10), (0, 255, 0))\n img.save('green-dot.tif')\n img.save('green-dot.jpg')\n img.save('green-dot.png')", "def imageprepare(argv):\n im = Image.open(argv).convert('L')\n width = float(im.size[0])\n height = float(im.size[1])\n newImage = Image.new('L', (28, 28), (255)) # creates white canvas of 28x28 pixels\n\n if width > height: # check which dimension is bigger\n # Width is bigger. Width becomes 20 pixels.\n nheight = int(round((20.0 / width * height), 0)) # resize height according to ratio width\n if (nheight == 0): # rare case but minimum is 1 pixel\n nheight = 1\n # resize and sharpen\n img = im.resize((20, nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n wtop = int(round(((28 - nheight) / 2), 0)) # calculate horizontal position\n newImage.paste(img, (4, wtop)) # paste resized image on white canvas\n else:\n # Height is bigger. Heigth becomes 20 pixels.\n nwidth = int(round((20.0 / height * width), 0)) # resize width according to ratio height\n if (nwidth == 0): # rare case but minimum is 1 pixel\n nwidth = 1\n # resize and sharpen\n img = im.resize((nwidth, 20), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n wleft = int(round(((28 - nwidth) / 2), 0)) # caculate vertical pozition\n newImage.paste(img, (wleft, 4)) # paste resized image on white canvas\n\n # newImage.save(\"sample.png\n\n tv = list(newImage.getdata()) # get pixel values\n\n # normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\n tva = [(255 - x) * 1.0 / 255.0 for x in tv]\n print(tva)\n return tva", "def test_synthetic_auto():\n background = Image.new('RGB', (7, 3), (125, 125, 125))\n red = Image.new('RGB', (1, 1), (255, 0, 0))\n green = Image.new('RGB', (1, 1), (0, 255, 0))\n blue = Image.new('RGB', (1, 1), (0, 0, 255))\n\n parameters = {\n 'data': [background, red, green, blue],\n 'positions': 'auto'\n }\n\n synth = images.synthetic(parameters)\n\n assert_equal(synth.size, (7, 3))\n assert_equal(synth.getpixel((1, 1)), (255, 0, 0, 255))\n assert_equal(synth.getpixel((3, 1)), (0, 255, 0, 255))\n assert_equal(synth.getpixel((5, 1)), (0, 0, 255, 255))", "def run(self, image):\n start = datetime.datetime.now()\n\n width, height = image.size\n resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)\n target_size = (int(resize_ratio * width), int(resize_ratio * height))\n resized_image = image.convert('RGB').resize(target_size, Image.ANTIALIAS)\n batch_seg_map = self.sess.run(\n self.OUTPUT_TENSOR_NAME,\n feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]})\n seg_map = batch_seg_map[0]\n\n end = datetime.datetime.now()\n\n diff = end - start\n print(\"Time taken to evaluate segmentation is : \" + str(diff))\n\n return resized_image, seg_map", "def getLoader(s_image_dir,c_image_dir, \n style_selected_dir, content_selected_dir,\n crop_size=178, batch_size=16, num_workers=8, \n colorJitterEnable=True, colorConfig={\"brightness\":0.05,\"contrast\":0.05,\"saturation\":0.05,\"hue\":0.05}):\n s_transforms = []\n c_transforms = []\n \n s_transforms.append(StyleResize())\n # s_transforms.append(T.Resize(900))\n c_transforms.append(T.Resize(900))\n\n s_transforms.append(T.RandomCrop(crop_size,pad_if_needed=True,padding_mode='reflect'))\n c_transforms.append(T.RandomCrop(crop_size))\n\n s_transforms.append(T.RandomHorizontalFlip())\n c_transforms.append(T.RandomHorizontalFlip())\n \n s_transforms.append(T.RandomVerticalFlip())\n c_transforms.append(T.RandomVerticalFlip())\n\n if colorJitterEnable:\n if colorConfig is not None:\n print(\"Enable color jitter!\")\n colorBrightness = colorConfig[\"brightness\"]\n colorContrast = colorConfig[\"contrast\"]\n colorSaturation = colorConfig[\"saturation\"]\n colorHue = (-colorConfig[\"hue\"],colorConfig[\"hue\"])\n s_transforms.append(T.ColorJitter(brightness=colorBrightness,\\\n contrast=colorContrast,saturation=colorSaturation, hue=colorHue))\n c_transforms.append(T.ColorJitter(brightness=colorBrightness,\\\n contrast=colorContrast,saturation=colorSaturation, hue=colorHue))\n s_transforms.append(T.ToTensor())\n c_transforms.append(T.ToTensor())\n\n s_transforms.append(T.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)))\n c_transforms.append(T.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)))\n \n s_transforms = T.Compose(s_transforms)\n c_transforms = T.Compose(c_transforms)\n\n content_dataset = TotalDataset(c_image_dir,s_image_dir, content_selected_dir, style_selected_dir\n , c_transforms,s_transforms)\n content_data_loader = data.DataLoader(dataset=content_dataset,batch_size=batch_size,\n drop_last=True,shuffle=True,num_workers=num_workers,pin_memory=True)\n prefetcher = data_prefetcher(content_data_loader)\n return prefetcher" ]
[ "0.6173433", "0.5833571", "0.5788354", "0.5786499", "0.56434995", "0.56299114", "0.56189775", "0.5616973", "0.5609669", "0.5601143", "0.5585079", "0.55399793", "0.549523", "0.5494799", "0.5486404", "0.5449044", "0.5434429", "0.543015", "0.5419234", "0.53946966", "0.5386449", "0.5381422", "0.53632486", "0.53564316", "0.53476274", "0.5335628", "0.53297967", "0.5321238", "0.5311802", "0.52949303", "0.529455", "0.5284156", "0.5277508", "0.52709275", "0.52701014", "0.52701014", "0.52563655", "0.52562475", "0.52546114", "0.52511233", "0.52479535", "0.52377003", "0.5237434", "0.5233856", "0.5228259", "0.5227636", "0.5225944", "0.5225101", "0.5223803", "0.52177083", "0.52143836", "0.5213979", "0.5204428", "0.52022403", "0.5195938", "0.51950336", "0.5186388", "0.5174006", "0.5163454", "0.51633215", "0.5161844", "0.51612103", "0.5161151", "0.5160143", "0.5155749", "0.5155259", "0.5154269", "0.51508945", "0.51494414", "0.51484156", "0.514462", "0.51442206", "0.51433957", "0.5142639", "0.513768", "0.5129706", "0.5125053", "0.51195484", "0.5111757", "0.5105749", "0.5103291", "0.509953", "0.5095181", "0.50950617", "0.5086164", "0.508548", "0.5084705", "0.5081266", "0.50790405", "0.5078905", "0.5077679", "0.5077362", "0.50768393", "0.507496", "0.50713354", "0.5070153", "0.5058217", "0.50580275", "0.5053292", "0.5049513", "0.50482327" ]
0.0
-1
run the tesselation on a empty image
def test_on_map_of_sinus(synthetic_checkerboard): img = synthetic_checkerboard['img'] di = synthetic_checkerboard['sindi'] cpp_vorimg = tess.tessellate_labimg(img,di) py_vorimg = pytess.tessellate_labimg(img,di) assert np.alltrue(py_vorimg[:4,:4] == 1) printers.store_ndarray("py_voronoi_on_map_of_sinus_output.txt",py_vorimg) assert cpp_vorimg.size > 0 assert cpp_vorimg.shape == synthetic_checkerboard['img'].shape assert np.alltrue(synthetic_checkerboard['img'][1:3,1:3] == 1) printers.store_ndarray("cpp_voronoi_input.txt",img) printers.store_ndarray("cpp_voronoi_on_map_of_sinus_output.txt",cpp_vorimg) assert np.alltrue(cpp_vorimg[:4,:4] == 1) assert np.alltrue(cpp_vorimg == py_vorimg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_stuff(self):\n self.create_tourism_raster()", "def write_stitched_image(self):\r\n\r\n self.write_debug(\"End of train detected. Writing stitched image.\")\r\n cv2.imwrite(os.path.join(self.output_dir_stitched, 'stitched.jpg'), self.stitched_image)", "def final_plain():\n\n\tconfig = Config()\n\tconfig.layer1_size = 256\n\tconfig.num_channels = 15\n\tconfig.target_channels = 3\n\tconfig.target_loss = 0.01\n\tconfig.lifetime = 32\n\tconfig.size = 32\n\tconfig.initial_state = 'sconf_center_black_dot'\n\tconfig.edge_strategy = 'EdgeStrategy.TF_SAME'\n\tconfig.growing_jump = 0\n\n\tfor path in glob.glob(\"images/final/*.png\"):\n\t\timg_name = os.path.basename(path)\n\t\tconfig.target_state = f'sconf_image(\"final/{img_name}\")'\n\t\tbuild_and_train(\"final_compare_gradual\", config)", "def make_t(self):\n self.img[1, 1:-1] = 1\n self.img[2:-1, self.l_i / 2] = 1\n self.img_name = 'T'", "def draw_T(self):\n for i in range(self.n):\n for j in range(self.m):\n t = self.T[i, j]\n if t != 0 and self.V[i, j] == 1:\n if len(self.images) > 0:\n self.draw_img(i, j, t)\n else:\n self.draw_text(i, j, str(t), BLACK)", "def run(self,workspace):\n image_name = self.image_name.value\n cpimage = workspace.image_set.get_image(image_name)\n image = cpimage.pixel_data\n mask = cpimage.mask\n workspace.display_data.statistics = []\n level = int(self.atrous_level.value)\n\n wavelet = self.a_trous(1.0*image, level+1)\n wlevprod = wavelet[:,:,level-1] * 3.0\n\n spotthresh = wlevprod.mean() + float(self.noise_removal_factor.value) * wlevprod.std()\n tidx = wlevprod < spotthresh\n wlevprod[tidx] = 0\n\n wlevprod = self.circular_average_filter(wlevprod, int(self.smoothing_filter_size.value))\n wlevprod = self.smooth_image(wlevprod, mask)\n\n max_wlevprod = scipy.ndimage.filters.maximum_filter(wlevprod,3)\n maxloc = (wlevprod == max_wlevprod)\n twlevprod = max_wlevprod > float(self.final_spot_threshold.value)\n maxloc[twlevprod == 0] = 0\n \n labeled_image,object_count = scipy.ndimage.label(maxloc,\n np.ones((3,3),bool))\n\n unedited_labels = labeled_image.copy()\n # Filter out objects touching the border or mask\n border_excluded_labeled_image = labeled_image.copy()\n labeled_image = self.filter_on_border(image, labeled_image)\n border_excluded_labeled_image[labeled_image > 0] = 0\n \n # Relabel the image\n labeled_image,object_count = relabel(labeled_image)\n new_labeled_image, new_object_count = self.limit_object_count(\n labeled_image, object_count)\n if new_object_count < object_count:\n # Add the labels that were filtered out into the border\n # image.\n border_excluded_mask = ((border_excluded_labeled_image > 0) |\n ((labeled_image > 0) & \n (new_labeled_image == 0)))\n border_excluded_labeled_image = scipy.ndimage.label(border_excluded_mask,\n np.ones((3,3),bool))[0]\n object_count = new_object_count\n labeled_image = new_labeled_image\n \n # Make an outline image\n outline_image = cellprofiler.cpmath.outline.outline(labeled_image)\n outline_border_excluded_image = cellprofiler.cpmath.outline.outline(border_excluded_labeled_image)\n \n if self.show_window:\n statistics = workspace.display_data.statistics\n statistics.append([\"# of accepted objects\",\n \"%d\"%(object_count)])\n\n workspace.display_data.image = image\n workspace.display_data.labeled_image = labeled_image\n workspace.display_data.border_excluded_labels = border_excluded_labeled_image\n\n # Add image measurements\n objname = self.object_name.value\n measurements = workspace.measurements\n cpmi.add_object_count_measurements(measurements,\n objname, object_count)\n # Add label matrices to the object set\n objects = cellprofiler.objects.Objects()\n objects.segmented = labeled_image\n objects.unedited_segmented = unedited_labels\n objects.parent_image = image\n \n workspace.object_set.add_objects(objects,self.object_name.value)\n cpmi.add_object_location_measurements(workspace.measurements, \n self.object_name.value,\n labeled_image)\n if self.should_save_outlines.value:\n out_img = cpi.Image(outline_image.astype(bool),\n parent_image = image)\n workspace.image_set.add(self.save_outlines.value, out_img)", "def unpropagateImage(self, dryrun):\n pass", "def main():\n test_image = load_image()\n\n pixelate_image(\n normalize_image(test_image)\n )\n pass", "def exercise():\n\n #\n # Convert Lena Tiff image to raw format\n #\n for f in glob.glob('*.jpg'):\n os.remove(f)\n \n for f in glob.glob('*.dat'):\n os.remove(f)\n \n input_raw_file = convert_to_raw('Lena.tiff')\n\n for device in ['cpu', 'gpu']:\n for interp in ['nn', 'bl']:\n for (w,h) in ((256, 300), (486, 486),(2000, 1000),(1000, 2000),(8000, 4000)):\n (t, f) = interpolate(input_raw_file, device + '_' + interp + '_lena.dat', device, 0, interp, w, h)\n convert_to_jpg(f)\n\n \n for f in glob.glob('*.dat'):\n convert_to_jpg(f)\n os.remove(f)\n \n quit()", "def process(self, image):", "def setUp(self):\n test_file_1 = path.join(\n path.dirname(datasets.__file__), \"twod_image_1.npy\"\n )\n\n original_image = np.load(test_file_1)\n\n # get a single tile from the image to test\n # note this image is currently unpadded.\n # how many boundary elements are needed to pad?\n extracted_image = original_image[0:32, 0:32]\n\n self.img = np.expand_dims(extracted_image, axis=-1)\n\n # Don't make this too huge for brevity.\n self.J = 3\n # 0 = no overlap etc.\n self.overlap_log_2 = 0\n # apply to all available orders\n self.order = 3\n # Should be one or more to avoid aliasing, if you want overlapping\n # tiles this can increase too.\n self.oversampling = 1\n\n self.num_angles = 3\n self.angles = tuple(\n [\n 90.0\n - np.rad2deg(\n (int(self.num_angles - self.num_angles / 2 - 1) - theta)\n * np.pi\n / self.num_angles\n )\n for theta in range(self.num_angles)\n ]\n )\n\n # details of the input data\n self.sample_rate = 0.004 * 3\n\n # vanilla filter bank\n wavelets = [\n vanilla_morlet_2d(self.sample_rate, j=i) for i in range(0, self.J)\n ]\n father_wavelet = vanilla_gabor_2d(self.sample_rate, j=self.J)\n\n # extract the kernels of each of the wavelets for manual convolution\n # we'll test using three different angles that we used to create the\n # transform above.\n wav1 = wavelets[0]\n wav2 = wavelets[1]\n wav3 = wavelets[2]\n\n # extract the kernels of each of the wavelets for manual convolution\n # we'll test using three different angles that we used to create the\n # transform above.\n wav1_k = wav1.kernel(self.angles[0])\n wav2_k = wav2.kernel(self.angles[1])\n wav3_k = wav3.kernel(self.angles[2])\n\n phi = father_wavelet.kernel(0.0)\n\n npad = 31\n img_pad = np.pad(\n self.img, ((npad, npad), (npad, npad), (0, 0)), mode=\"reflect\"\n )\n # get numpy array of the test input image\n x = img_pad[:, :, 0]\n\n # manual convolution, |x * psi_1|\n conv = np.abs(convolve2d(x, wav1_k, mode=\"same\"))\n conv2 = np.abs(convolve2d(conv, wav2_k, mode=\"same\"))\n conv3 = np.abs(convolve2d(conv2, wav3_k, mode=\"same\"))\n\n # unpad the original image, and convolve with the phi\n # note that the dimensions for phi are one less than the\n # conv result, so we get a 4x4 result. Take the first one\n self.manual_result1 = convolve2d(\n conv[npad:-npad, npad:-npad], phi.real, mode=\"valid\"\n )[0, 0]\n self.manual_result2 = convolve2d(\n conv2[npad:-npad, npad:-npad], phi.real, mode=\"valid\"\n )[0, 0]\n self.manual_result3 = convolve2d(\n conv3[npad:-npad, npad:-npad], phi.real, mode=\"valid\"\n )[0, 0]", "def execute(self, image):\n undist = self.undistort(image)\n result = self.threshold_image(undist, self.thresholds['ksize'],\n self.thresholds['sobel'],\n self.thresholds['magnitude'],\n self.thresholds['direction'],\n self.thresholds['saturation'],\n self.thresholds['lightness'],\n self.thresholds['blue-yellow'])\n warped = self.warp(result)\n if self.args.is_test:\n self.image_logger.save_image(warped, 'warped_image.png')\n ploty, left_fit, right_fit, left_fitx, right_fitx = self.get_line_fit(warped)\n left_rad, right_rad = measure_curvature(warped, left_fitx, right_fitx, self.args.is_test)\n self.left_line.update(left_fit, left_rad)\n self.right_line.update(right_fit, right_rad)\n result = self.draw_final_image(image, warped, undist, ploty, left_fitx, right_fitx, self.Minv,\n self.left_line.best_curvature,\n self.right_line.best_curvature)\n return result", "def retarget_image(img, T, C, r, c):\n row, col = img.shape[:2]\n seam_path = optimal_path(T, C, r, c)\n img_final = img\n for i in seam_path:\n if i == 0:\n img_final, _ = seam_removal_horizontal(img_final)\n else:\n img_final, _ = seam_removal_vertical(img_final, [])\n return img_final", "def prepare_test_img(self, idx):\n img_info = self.img_infos[idx]\n img_path = osp.join(self.img_prefix, img_info['filename'])\n\n if self.proposals is not None:\n proposal = self.proposals[idx][:self.num_max_proposals]\n if not proposal.shape[1] == 4 or proposal.shape[1] == 5:\n raise AssertionError(\n 'proposals should have shapes (n, 4) or (n, 5), '\n 'but found {}'.format(proposal.shape))\n else:\n proposal = None\n\n if self.with_background_erasing:\n ann = self.get_ann_info(idx)\n gt_bboxes = ann['bboxes']\n else:\n gt_bboxes = None\n\n def prepare_single_scale(img_path, expected_size, flip_ratio=0,\n proposal=None, bbox=None):\n _img, img_shape, pad_shape, scale_factor, \\\n flipped_flag, flipped_direction = self.img_transforms(\n img_path, expected_size, flip_ratio=flip_ratio)\n if bbox is not None:\n if not len(bbox) == 0:\n _gt_bboxes = self.bbox_transforms(bbox,\n img_shape,\n scale_factor,\n flipped_flag,\n flipped_direction)\n else:\n _gt_bboxes = bbox\n _img = self.background_erasing(\n _img, img_shape, _gt_bboxes,\n cell_size=self.be_cell_size,\n random_ratio=self.be_random_ratio)\n _img = to_tensor(_img)\n _img_meta = dict(\n filename=img_info['filename'],\n ori_shape=(img_info['height'], img_info['width'], 3),\n img_shape=img_shape,\n pad_shape=pad_shape,\n scale_factor=scale_factor,\n flipped_flag=flipped_flag,\n flipped_direction=flipped_direction\n )\n if proposal is not None:\n if proposal.shape[1] == 5:\n score = proposal[:, 4, None]\n proposal = proposal[:, :4]\n else:\n score = None\n _proposal = self.bbox_transforms(proposal,\n img_shape,\n scale_factor,\n flipped_flag,\n flipped_direction)\n _proposal = np.hstack([_proposal, score]) \\\n if score is not None else _proposal\n _proposal = to_tensor(_proposal)\n else:\n _proposal = None\n return _img, _img_meta, _proposal\n\n imgs = []\n img_metas = []\n proposals = []\n for expected_size in self.img_expected_sizes:\n # at first, we do not flip the image\n _img, _img_meta, _proposal = prepare_single_scale(\n img_path, expected_size, flip_ratio=0,\n proposal=proposal, bbox=gt_bboxes)\n imgs.append(_img)\n img_metas.append(DataContainer(_img_meta, cpu_only=True))\n proposals.append(_proposal)\n if self.flip_ratio > 0:\n _img, _img_meta, _proposal = prepare_single_scale(\n img_path, expected_size, flip_ratio=1,\n proposal=proposal, bbox=gt_bboxes)\n imgs.append(_img)\n img_metas.append(DataContainer(_img_meta, cpu_only=True))\n proposals.append(_proposal)\n data = dict(img=imgs, img_meta=img_metas)\n if self.proposals is not None:\n data['proposals'] = proposals\n return data", "def __update_tesseract__(self):\n if self.row_bitmaps != []:\n self.__write_out_row__()\n cv2.imwrite(\"active_weather.basic.exp\" + str(self.box_count) + \".tiff\", self.training_page)\n # call([\"convert\", \"-density 300\", \"-depth 4\", \"active_weather.basic.exp0.tiff\",\"active_weather.basic.exp0.tiff\"])\n call([\"/usr/bin/tesseract\", \"active_weather.basic.exp0.tiff\", \"active_weather.basic.exp0\", \"nobatch\", \"box.train\"])\n\n with open(\"font_properties\",\"w\") as f:\n f.write(\"basic 0 0 0 0 0\\n\")\n\n call([\"unicharset_extractor\", \"active_weather.basic.exp0.box\"])\n os.system(\"/home/ggdhines/github/tesseract/training/set_unicharset_properties -F font_properties -U unicharset -O unicharset --script_dir=/home/ggdhines/langdata\")\n # os.system(\"shapeclustering -F font_properties -U unicharset active_weather.basic.exp0.tr\")\n # os.system(\"shapeclustering -F font_properties active_weather.basic.exp0.tr\")\n os.system(\"mftraining -F font_properties -U unicharset -O active_weather.unicharset active_weather.basic.exp0.tr\")\n os.system(\"cntraining active_weather.basic.exp0.tr\")\n\n os.system(\"mv inttemp active_weather.inttemp\")\n os.system(\"mv normproto active_weather.normproto\")\n os.system(\"mv pffmtable active_weather.pffmtable\")\n os.system(\"mv shapetable active_weather.shapetable\")\n os.system(\"combine_tessdata active_weather.\")\n\n os.system(\"mv active_weather.basic.* /tmp/tessdata/\")\n os.system(\"mv active_weather.inttemp /tmp/tessdata/\")\n os.system(\"mv active_weather.normproto /tmp/tessdata/\")\n os.system(\"mv active_weather.pffmtable /tmp/tessdata/\")\n os.system(\"mv active_weather.shapetable /tmp/tessdata/\")\n os.system(\"mv active_weather.traineddata /tmp/tessdata/\")\n os.system(\"mv active_weather.unicharset /tmp/tessdata/\")\n os.system(\"mv font_properties /tmp/tessdata/\")", "def expose_test(self):\n with self.lock:\n self.dark = 1\n self.tstart = time.time()\n self.timestamp = time.strftime(\"%Y-%m-%dT%H:%M:%S\", time.localtime(self.tstart))\n imagesize = (self.expArea[3] - self.expArea[1],\n self.expArea[2] - self.expArea[0])\n self.data = np.ones(shape=imagesize, dtype=np.uint16)\n self.tend = time.time()", "def run_frame(self, ti, img):\n pass", "def reset_image_estimate(self):\n # reset_shared_var(self.t_A)\n self.t_A.set_value(self.t_QUAD_REG_MEAN.get_value())\n reset_shared_var(self.t_Ap)", "def main():\n \n # for inserting other images, add tem to /input folder and list them here\n images = (\n 'image-0',\n 'image-1',\n 'image-2'\n )\n\n for image_name in images:\n print(image_name, \"image:\")\n\n image = open_image(image_name)\n display_image(image, \"Original input \" + image_name)\n\n grayscale_v = transform_colors(image)\n display_image(grayscale_v[:,:,0], \"Grayscale \" + image_name)\n save_image(image_name + \"-grayscale\", grayscale_v[:,:,0])\n\n contours_v, contours = get_contours(grayscale_v)\n display_image(contours_v, \"Contours \" + image_name)\n save_image(image_name + \"-contours\", contours_v)\n\n labeled_img, areas = get_measures(image, contours[1:])\n display_image(labeled_img, \"Labeled \" + image_name)\n save_image(image_name + \"-labeled\", labeled_img)\n\n areas_histogram(areas, image_name)", "def teardown():\n os.remove('green-dot.tif')\n os.remove('green-dot.jpg')\n os.remove('green-dot.png')", "def single_channel_stacking(tifs):\n template_ID=int(len(tifs)/2)\n \n template_raster=gdal_array.LoadFile(tifs[template_ID-1])\n avg_raster=np.zeros_like(template_raster)\n avg_raster=avg_raster+1\n new_raster=np.copy(template_raster)\n # ones=np.full(template_raster.shape, 1)\n for i, tif in enumerate(tifs, start=1):\n if i==template_ID: \n continue\n \n tif_raster=gdal_array.LoadFile(tif)\n # tif_raster=cut_transformed_array_borders(tif_raster)\n result=ird.similarity(template_raster,tif_raster , numiter=1, order=1)\n img_transformed= ird.transform_img(tif_raster, scale=result['scale'], angle=result['angle'], tvec=result['tvec'], mode='constant', bgval=0, order=2)\n \n img_transformed=cut_transformed_array_borders(img_transformed)\n \n # ones_transformed=ird.transform_img(ones, scale=result['scale'], angle=result['angle'], tvec=result['tvec'], mode='constant', bgval=0, order=1)\n ones_transformed=np.zeros_like(template_raster)\n ones_transformed[np.where(img_transformed>0)]=1\n print(ones_transformed)\n \n print(np.mean(ones_transformed), np.max(ones_transformed), np.min(ones_transformed))\n print(ones_transformed[np.where(ones_transformed>0)])\n print(np.min(ones_transformed[np.where(ones_transformed>0)]))\n print(np.max(ones_transformed[np.where(ones_transformed>0)]))\n\n plt.imshow(ones_transformed)\n plt.show()\n plt.close()\n \n # ones_transformed=cut_transformed_array_borders(ones_transformed)\n \n avg_raster=avg_raster+ones_transformed\n # ird.imshow(template_raster, tif_raster, img_transformed)\n \n new_raster=new_raster+img_transformed\n \n # new_raster=new_raster+template_raster \n # new_raster=new_raster/len(tifs)\n\n gtz=np.where(avg_raster>0)\n \n\n \n\n \n \n plt.imshow(new_raster)\n plt.show()\n plt.close()\n # gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_not_abvertaghe_stacked_.tiff\")\n new_raster[gtz]=new_raster[gtz]/avg_raster[gtz] \n gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_stacked_.tiff\")\n plt.imshow(new_raster)\n plt.savefig(\"test.tif\", dpi=800)\n plt.show()\n plt.close()\n\n def discrete_cmap(N, base_cmap=None):\n \"\"\"Create an N-bin discrete colormap from the specified input map\"\"\"\n \n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n \n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)\n\n cmap=discrete_cmap(int(avg_raster.max())+1, base_cmap=\"ocean\") \n \n norm=mpl.colors.BoundaryNorm(np.arange(-0.5,int(avg_raster.max()+1)), cmap.N)\n fig=plt.figure()\n fig.set_size_inches(5,4)\n ax=fig.add_subplot(111)\n data=ax.matshow(avg_raster, cmap=cmap, norm=norm)\n fig.colorbar(data, ticks=np.linspace(0,int(avg_raster.max()),int(avg_raster.max()+1)), drawedges=True)\n\n plt.show()\n plt.close()\n\n\n # gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_stacked_.tiff\")", "def blackout_images(image,ticlass):\n rgb = ocropy.intarray()\n ticlass.textImageProbabilities(rgb,image)\n r = ocropy.bytearray()\n g = ocropy.bytearray()\n b = ocropy.bytearray()\n ocropy.unpack_rgb(r,g,b,rgb)\n components = ocropy.intarray()\n components.copy(g)\n n = ocropy.label_components(components)\n print \"[note] number of image regions\",n\n tirects = ocropy.rectarray()\n ocropy.bounding_boxes(tirects,components)\n for i in range(1,tirects.length()):\n r = tirects.at(i)\n ocropy.fill_rect(image,r,0)\n r.pad_by(-5,-5)\n ocropy.fill_rect(image,r,255)", "def imageprepare():\r\n file_name = '9-test.png'\r\n im = Image.open(file_name).convert('L')\r\n\r\n im.save(\"9-t.png\")\r\n plt.imshow(im)\r\n plt.show()\r\n tv = list(im.getdata())\r\n\r\n # normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\r\n tva = [(255 - x) * 1.0 / 255.0 for x in tv]\r\n return tva", "def test_on_tiff(self):\n im = np.random.randint(0, 127, size=(512, 512))\n path = Path(\".\\\\test_tif.tif\")\n\n # Annoying low contrast warning\n with suppress_warnings():\n imsave(str(path), im)\n\n from_skued = diffread(path)\n self.assertTrue(np.allclose(im, from_skued))\n os.remove(path)", "def main(image_path):\n temp_dir = tempfile.mkdtemp()\n print('Saving output to {}'.format(temp_dir))\n estimator = run_image(image_path)\n visualize(estimator, image_path, temp_dir)", "def run_algo(self, th):\n p = self.run_proc(['threshold', str(th), 'input_0.png',\n 'output.png'])\n self.wait_proc(p, timeout=self.timeout)\n return", "def freespaceImageAnalysis( fids, guesses = None, fit=True, bgInput=None, bgPcInput=None, shapes=[None], zeroCorrection=0, zeroCorrectionPC=0,\n keys=None, fitModule=bump, extraPicDictionaries=None, newAnnotation=False, onlyThisPic=None, pltVSize=5, \n plotSigmas=False, plotCounts=False, manualColorRange=None, calcTemperature=False, clearOutput=True, \n dataRange=None, guessTemp=10e-6, trackFitCenter=False, picsPerRep=1, startPic=0, binningParams=None, \n win=pw.PictureWindow(), transferAnalysisOpts=None, tferBinningParams=None, tferWin= pw.PictureWindow(),\n extraTferAnalysisArgs={}, emGainSetting=300, lastConditionIsBackGround=True, showTferAnalysisPlots=True,\n show2dFitsAndResiduals=True, plotFitAmps=False, indvColorRanges=False, fitF2D=gaussian_2d.f_notheta, \n rmHighCounts=True, useBase=True, weightBackgroundByLoading=True, returnPics=False, forceNoAnnotation=False):\n fids = [fids] if type(fids) == int else fids\n keys = [None for _ in fids] if keys is None else keys\n sortedStackedPics = {}\n initThresholds = [None]\n picsForBg = []\n bgWeights = []\n isAnnotatedList = []\n for filenum, fid in enumerate(fids):\n if transferAnalysisOpts is not None:\n res = ta.stage1TransferAnalysis( fid, transferAnalysisOpts, useBase=useBase, **extraTferAnalysisArgs )\n (initAtoms, tferAtoms, initAtomsPs, tferAtomsPs, key, keyName, initPicCounts, tferPicCounts, repetitions, initThresholds,\n avgPics, tferThresholds, initAtomImages, tferAtomImages, basicInfoStr, ensembleHits, groupedPostSelectedPics, isAnnotated) = res\n isAnnotatedList.append(isAnnotated)\n # assumes that you only want to look at the first condition. \n for varPics in groupedPostSelectedPics: # don't remember why 0 works if false...\n picsForBg.append(varPics[-1 if lastConditionIsBackGround else 0])\n bgWeights.append(len(varPics[0]))\n allFSIPics = [ varpics[0][startPic::picsPerRep] for varpics in groupedPostSelectedPics]\n if showTferAnalysisPlots:\n fig, axs = plt.subplots(1,2)\n mp.makeAvgPlts( axs[0], axs[1], avgPics, transferAnalysisOpts, ['r','g','b'] ) \n allFSIPics = [win.window( np.array(pics) ) for pics in allFSIPics]\n allFSIPics = ah.softwareBinning( binningParams, allFSIPics )\n elif type(fid) == int:\n ### For looking at either PGC imgs or FSI imgs \n with exp.ExpFile(fid) as file:\n # I think this only makes sense if there is a specific bg pic in the rotation\n picsForBg.append(list(file.get_pics()))\n allFSIPics = file.get_pics()[startPic::picsPerRep]\n _, key = file.get_key()\n if len(np.array(key).shape) == 2:\n key = key[:,0]\n file.get_basic_info()\n allFSIPics = win.window( allFSIPics )\n allFSIPics = ah.softwareBinning( binningParams, allFSIPics )\n allFSIPics = np.reshape( allFSIPics, (len(key), int(allFSIPics.shape[0]/len(key)), allFSIPics.shape[1], allFSIPics.shape[2]) )\n else:\n ### Assumes given pics have the same start pic and increment (picsPerRep).\n # doesn't combine well w/ transfer analysis\n picsForBg.append(fid)\n allFSIPics = fid[startPic::picsPerRep]\n print(\"Assuming input is list of all pics, then splices to get FSI pics. Old code assumed the given were FSI pics.\")\n allFSIPics = win.window( allFSIPics )\n allFSIPics = ah.softwareBinning( binningParams, allFSIPics )\n allFSIPics = np.reshape( allFSIPics, (len(key), int(allFSIPics.shape[0]/len(key)), allFSIPics.shape[1], allFSIPics.shape[2]) )\n # ##############\n if keys[filenum] is not None:\n key = keys[filenum]\n for i, keyV in enumerate(key):\n keyV = misc.round_sig_str(keyV)\n sortedStackedPics[keyV] = np.append(sortedStackedPics[keyV], allFSIPics[i],axis=0) if (keyV in sortedStackedPics) else allFSIPics[i] \n if lastConditionIsBackGround:\n bgInput, pcBgInput = getBgImgs(picsForBg, startPic = startPic, picsPerRep = picsPerRep, rmHighCounts=rmHighCounts, bgWeights=bgWeights, \n weightBackgrounds=weightBackgroundByLoading)\n elif bgInput == 'lastPic':\n bgInput, pcBgInput = getBgImgs(picsForBg, startPic = picsPerRep-1, picsPerRep = picsPerRep, rmHighCounts=rmHighCounts, bgWeights=bgWeights,\n weightBackgrounds=weightBackgroundByLoading )\n if bgInput is not None: # was broken and not working if not given bg\n bgInput = win.window(bgInput)\n bgInput = ah.softwareBinning(binningParams, bgInput)\n if bgPcInput is not None:\n bgPcInput = win.window(bgPcInput)\n bgPcInput = ah.softwareBinning(binningParams, bgPcInput) \n \n if extraPicDictionaries is not None:\n if type(extraPicDictionaries) == dict:\n extraPicDictionaries = [extraPicDictionaries]\n for dictionary in extraPicDictionaries:\n for keyV, pics in dictionary.items():\n sortedStackedPics[keyV] = (np.append(sortedStackedPics[keyV], pics,axis=0) if keyV in sortedStackedPics else pics) \n sortedStackedKeyFl = [float(keyStr) for keyStr in sortedStackedPics.keys()]\n sortedKey, sortedStackedPics = ah.applyDataRange(dataRange, sortedStackedPics, list(sorted(sortedStackedKeyFl)))\n numVars = len(sortedStackedPics.items())\n if len(np.array(shapes).shape) == 1:\n shapes = [shapes for _ in range(numVars)] \n if guesses is None:\n guesses = [[None for _ in range(4)] for _ in range(numVars)]\n if len(np.array(bgInput).shape) == 2 or bgInput == None:\n bgInput = [bgInput for _ in range(numVars)]\n if len(np.array(bgPcInput).shape) == 2 or bgPcInput == None:\n bgPcInput = [bgPcInput for _ in range(numVars)]\n \n datalen, avgFitSigmas, images, hFitParams, hFitErrs, vFitParams, vFitErrs, fitParams2D, fitErrs2D = [{} for _ in range(9)]\n titles = ['Bare', 'Photon-Count', 'Bare-mbg', 'Photon-Count-mbg']\n assert(len(sortedKey)>0)\n for vari, keyV in enumerate(sortedKey):\n keyV=misc.round_sig_str(keyV)\n if vari==0:\n initKeyv = keyV\n varPics = sortedStackedPics[keyV]\n # 0 is init atom pics for post-selection on atom number... if we wanted to.\n expansionPics = rmHighCountPics(varPics,7000) if rmHighCounts else varPics\n datalen[keyV] = len(expansionPics)\n expPhotonCountImage = photonCounting(expansionPics, 120)[0] / len(expansionPics)\n bgPhotonCountImage = np.zeros(expansionPics[0].shape) if bgPcInput[vari] is None else bgPcInput[vari]\n expAvg = np.mean(expansionPics, 0)\n bgAvg = np.zeros(expansionPics[0].shape) if (bgInput[vari] is None or len(bgInput[vari]) == 1) else bgInput[vari]\n \n if bgPhotonCountImage is None:\n print('no bg photon', expAvg.shape)\n bgPhotonCount = np.zeros(photonCountImage.shape)\n avg_mbg = expAvg - bgAvg\n avg_mbgpc = expPhotonCountImage - bgPhotonCountImage\n images[keyV] = [expAvg, expPhotonCountImage, avg_mbg, avg_mbgpc]\n hFitParams[keyV], hFitErrs[keyV], vFitParams[keyV], vFitErrs[keyV], fitParams2D[keyV], fitErrs2D[keyV] = [[] for _ in range(6)]\n for imnum, (im, guess) in enumerate(zip(images[keyV], guesses[vari])):\n if fit:\n # fancy guess_x and guess_y values use the initial fitted value, typically short time, as a guess.\n _, pictureFitParams2d, pictureFitErrors2d, v_params, v_errs, h_params, h_errs = ah.fitPic(\n im, guessSigma_x=5, guessSigma_y=5, showFit=False, \n guess_x=None if vari==0 else fitParams2D[initKeyv][imnum][1], guess_y=None if vari==0 else fitParams2D[initKeyv][imnum][2],\n fitF=fitF2D)\n fitParams2D[keyV].append(pictureFitParams2d)\n fitErrs2D[keyV].append(pictureFitErrors2d)\n hFitParams[keyV].append(h_params)\n hFitErrs[keyV].append(h_errs)\n vFitParams[keyV].append(v_params)\n vFitErrs[keyV].append(v_errs)\n # conversion from the num of pixels on the camera to microns at the focus of the tweezers\n cf = 16e-6/64\n mins, maxes = [[], []]\n imgs_ = np.array(list(images.values()))\n for imgInc in range(4):\n if indvColorRanges:\n mins.append(None)\n maxes.append(None)\n elif manualColorRange is None:\n mins.append(min(imgs_[:,imgInc].flatten()))\n maxes.append(max(imgs_[:,imgInc].flatten()))\n else:\n mins.append(manualColorRange[0])\n maxes.append(manualColorRange[1])\n numVariations = len(images)\n if onlyThisPic is None:\n fig, axs = plt.subplots(numVariations, 4, figsize=(20, pltVSize*numVariations))\n if numVariations == 1:\n axs = np.array([axs])\n bgFig, bgAxs = plt.subplots(1, 2, figsize=(20, pltVSize))\n else:\n numRows = int(np.ceil((numVariations+3)/4))\n fig, axs = plt.subplots(numRows, 4 if numVariations>1 else 3, figsize=(20, pltVSize*numRows))\n avgPicAx = axs.flatten()[-3]\n avgPicFig = fig\n bgAxs = [axs.flatten()[-1], axs.flatten()[-2]]\n bgFig = fig\n if show2dFitsAndResiduals:\n fig2d, axs2d = plt.subplots(*((2,numVariations) if numVariations>1 else (1,2)))\n keyPlt = np.zeros(len(images))\n (totalSignal, hfitCenter, hFitCenterErrs, hSigmas, hSigmaErrs, h_amp, hAmpErrs, vfitCenter, vFitCenterErrs, vSigmas, vSigmaErrs, v_amp, \n vAmpErrs, hSigma2D, hSigma2dErr, vSigma2D, vSigma2dErr) = [np.zeros((len(images), 4)) for _ in range(17)]\n \n for vari, ((keyV,ims), hParamSet, hErr_set, vParamSet, vErr_set, paramSet2D, errSet2D) in enumerate(zip(\n images.items(), *[dic.values() for dic in [hFitParams, hFitErrs, vFitParams, vFitErrs, fitParams2D, fitErrs2D]])):\n for which in range(4):\n if onlyThisPic is None:\n (im, ax, title, min_, max_, hparams, hErrs, vparams, vErrs, param2d, err2d\n ) = [obj[which] for obj in (ims, axs[vari], titles, mins, maxes, hParamSet, hErr_set, vParamSet, vErr_set, paramSet2D, errSet2D)] \n else:\n which = onlyThisPic\n ax = axs.flatten()[vari]\n (im, title, min_, max_, hparams, hErrs, vparams, vErrs, param2d, err2d\n ) = [obj[which] for obj in (ims, titles, mins, maxes, hParamSet, hErr_set, vParamSet, vErr_set, paramSet2D, errSet2D)] \n h_amp[vari][which], hfitCenter[vari][which], hSigmas[vari][which] = hparams[0], hparams[1], hparams[2]*cf*1e6\n hAmpErrs[vari][which], hFitCenterErrs[vari][which], hSigmaErrs[vari][which] = hErrs[0], hErrs[1], hErrs[2]*cf*1e6\n v_amp[vari][which], vfitCenter[vari][which], vSigmas[vari][which] = vparams[0], vparams[1], vparams[2]*cf*1e6\n vAmpErrs[vari][which], vFitCenterErrs[vari][which], vSigmaErrs[vari][which] = vErrs[0], vErrs[1], vErrs[2]*cf*1e6\n hSigma2D[vari][which], hSigma2dErr[vari][which], vSigma2D[vari][which], vSigma2dErr[vari][which] = [\n val*cf*1e6 for val in [param2d[-3], err2d[-3], param2d[-2], err2d[-2]]]\n \n totalSignal[vari][which] = np.sum(im.flatten())\n keyPlt[vari] = keyV\n res = mp.fancyImshow(fig, ax, im, imageArgs={'cmap':dark_viridis_cmap, 'vmin':min_, 'vmax':max_}, \n hFitParams=hparams, vFitParams=vparams, fitModule=fitModule, flipVAx = True, fitParams2D=param2d)\n ax.set_title(keyV + ': ' + str(datalen[keyV]) + ';\\n' + title + ': ' + misc.errString(hSigmas[vari][which],hSigmaErrs[vari][which]) \n + r'$\\mu m$ sigma, ' + misc.round_sig_str(totalSignal[vari][which],5), fontsize=12) \n if show2dFitsAndResiduals:\n X, Y = np.meshgrid(np.arange(len(im[0])), np.arange(len(im)))\n data_fitted = fitF2D((X,Y), *param2d)\n fitProper = data_fitted.reshape(im.shape[0],im.shape[1])\n ax1 = axs2d[0] if numVariations == 1 else axs2d[0,vari]\n ax2 = axs2d[1] if numVariations == 1 else axs2d[1,vari]\n imr = ax1.imshow(fitProper, vmin=min_, vmax=max_)\n mp.addAxColorbar(fig2d, ax1, imr)\n ax1.contour(np.arange(len(im[0])), np.arange(len(im)), fitProper, 4, colors='w', alpha=0.2)\n imr = ax2.imshow(fitProper-im)\n mp.addAxColorbar(fig2d, ax2, imr)\n ax2.contour(np.arange(len(im[0])), np.arange(len(im)), fitProper, 4, colors='w', alpha=0.2)\n if onlyThisPic is not None:\n break\n \n mp.fancyImshow(avgPicFig, avgPicAx, np.mean([img[onlyThisPic] for img in images.values()],axis=0), imageArgs={'cmap':dark_viridis_cmap},flipVAx = True)\n avgPicAx.set_title('Average Over Variations')\n ### Plotting background and photon counted background\n mp.fancyImshow(bgFig, bgAxs[0], bgAvg, imageArgs={'cmap':dark_viridis_cmap},flipVAx = True) \n bgAxs[0].set_title('Background image (' + str(len(picsForBg)/picsPerRep) + ')')\n mp.fancyImshow(bgFig, bgAxs[1], bgPhotonCountImage, imageArgs={'cmap':dark_viridis_cmap},flipVAx = True) \n bgAxs[1].set_title('Photon counted background image (' + str(len(picsForBg)/picsPerRep) + ')')\n fig.subplots_adjust(left=0,right=1,bottom=0.1, hspace=0.2, **({'top': 0.7, 'wspace': 0.4} if (onlyThisPic is None) else {'top': 0.9, 'wspace': 0.3}))\n \n disp.display(fig)\n temps, tempErrs, tempFitVs, = [],[],[]\n if calcTemperature: \n for sigmas, sigmaerrs in zip([hSigmas, vSigmas, hSigma2D, vSigma2D],[hSigmaErrs, vSigmaErrs, hSigma2dErr, vSigma2dErr]):\n mbgSigmas = np.array([elt[2] for elt in sigmas])\n mbgSigmaErrs = np.array([elt[2] for elt in sigmaerrs])\n myGuess = [0.0, min((mbgSigmas)*1e-6), guessTemp]\n temp, fitV, cov = ah.calcBallisticTemperature(keyPlt*1e-3, (mbgSigmas)*1e-6, guess = myGuess, sizeErrors = mbgSigmaErrs)\n error = np.sqrt(np.diag(cov))\n temps.append(temp)\n tempErrs.append(error[2])\n tempFitVs.append(fitV)\n numAxisCol = int(plotSigmas) + int(plotCounts) + int(trackFitCenter)\n if numAxisCol != 0:\n fig2, axs = plt.subplots(1, numAxisCol, figsize = (15, 5)) \n fig2.subplots_adjust(top=0.75, wspace = 0.4)\n colors = ['b','k','c','purple']\n if plotSigmas:\n ax = (axs if numAxisCol == 1 else axs[0]) \n stdStyle = dict(marker='o',linestyle='',capsize=3)\n if onlyThisPic is not None:\n ax.errorbar(keyPlt, hSigmas[:,onlyThisPic], hSigmaErrs[:,onlyThisPic], color=colors[0], label='h '+titles[onlyThisPic], **stdStyle);\n ax.errorbar(keyPlt, hSigma2D[:,onlyThisPic], hSigma2dErr[:,onlyThisPic], color=colors[1], label='2dh '+titles[onlyThisPic], **stdStyle);\n ax.errorbar(keyPlt, vSigmas[:,onlyThisPic], vSigmaErrs[:,onlyThisPic], color=colors[2], label='v '+titles[onlyThisPic], **stdStyle);\n ax.errorbar(keyPlt, vSigma2D[:,onlyThisPic], vSigma2dErr[:,onlyThisPic], color=colors[3], label='2dv '+titles[onlyThisPic], **stdStyle);\n else:\n for whichPic in range(4):\n ax.errorbar(keyPlt, hSigmas[:,whichPic], hSigmaErrs[:,whichPic], color='b', label='h '+titles[whichPic], **stdStyle);\n ax.errorbar(keyPlt, vSigmas[:,whichPic], vSigmaErrs[:,whichPic], color='c', label='v '+titles[whichPic], **stdStyle);\n ax.set_ylim(max(0,ax.get_ylim()[0]),min([ax.get_ylim()[1],5]))\n ax.set_ylabel(r'Fit Sigma ($\\mu m$)')\n \n if calcTemperature:\n # converting time to s, hSigmas in um \n xPoints = np.linspace(min(keyPlt), max(keyPlt))*1e-3\n for num, fitV in enumerate(tempFitVs):\n #ax.plot(xPoints*1e3, LargeBeamMotExpansion.f(xPoints, *myGuess)*1e6, label = 'guess')\n ax.plot(xPoints*1e3, LargeBeamMotExpansion.f(xPoints, *fitV)*1e6, color=colors[num])\n ax.legend()\n\n if plotFitAmps: \n ax = (axs if numAxisCol == 1 else axs[0])\n ampAx = ax.twinx()\n\n if onlyThisPic is not None:\n ampAx.errorbar(keyPlt, h_amp[:,onlyThisPic], hAmpErrs[:,onlyThisPic], label='h '+titles[onlyThisPic], color = 'orange', **stdStyle);\n ampAx.errorbar(keyPlt, v_amp[:,onlyThisPic], vAmpErrs[:,onlyThisPic], label='v '+titles[onlyThisPic], color = 'r', **stdStyle);\n else:\n for whichPic in range(4):\n ampAx.errorbar(keyPlt, h_amp[:,whichPic], hAmpErrs[:,whichPic], label='h '+titles[whichPic], color = 'orange', **stdStyle);\n ampAx.errorbar(keyPlt, v_amp[:,whichPic], vAmpErrs[:,whichPic], label='v '+titles[whichPic], color = 'r', **stdStyle);\n [tick.set_color('red') for tick in ampAx.yaxis.get_ticklines()]\n [tick.set_color('red') for tick in ampAx.yaxis.get_ticklabels()]\n ampAx.set_ylabel(r'Fit h_amps', color = 'r')\n \n hTotalPhotons, vTotalPhotons = None, None\n if plotCounts:\n # numAxCol = 1: ax = axs\n # numAxCol = 2: plotSigmas + plotCounts -- ax = axs[1]\n # numAxCol = 2: plotCounts + trackFitCenter -- ax = axs[0]\n # numAxCol = 3: ax = axs[1]\n if numAxisCol == 1:\n ax = axs\n elif numAxisCol == 2:\n ax = axs[1 if plotSigmas else 0]\n else:\n ax = axs[1]\n # Create axis to plot photon counts\n ax.set_ylabel(r'Integrated signal')\n photon_axis = ax.twinx()\n # This is not currently doing any correct for e.g. the loading rate.\n countToCameraPhotonEM = 0.018577 / (emGainSetting/200) # the float is is EM200. \n countToScatteredPhotonEM = 0.018577 / 0.07 / (emGainSetting/200)\n\n if onlyThisPic is not None:\n # calculate number of photons\n hamp = h_amp[:,onlyThisPic]*len(expansionPics[0][0]) # Horizontal \"un\"normalization for number of columns begin averaged.\n vamp = v_amp[:,onlyThisPic]*len(expansionPics[0]) \n hsigpx = hSigmas[:,onlyThisPic]/(16/64) # Convert from um back to to pixels.\n vsigpx = vSigmas[:,onlyThisPic]/(16/64)\n htotalCountsPerPic = bump.area_under(hamp, hsigpx)\n vtotalCountsPerPic = bump.area_under(vamp, vsigpx)\n hTotalPhotons = countToScatteredPhotonEM*htotalCountsPerPic\n vTotalPhotons = countToScatteredPhotonEM*vtotalCountsPerPic\n ax.plot(keyPlt, totalSignal[:,onlyThisPic], marker='o', linestyle='', label=titles[onlyThisPic]);\n photon_axis.plot(keyPlt, hTotalPhotons, marker='o', linestyle='', color = 'r', label='Horizontal')\n photon_axis.plot(keyPlt, vTotalPhotons, marker='o', linestyle='', color = 'orange', label='Vertical')\n else:\n for whichPic in range(4):\n # See above comments\n amp = h_amp[:,whichPic]*len(expansionPics[0][0]) \n sig = hSigmas[:,whichPic]/(16/64) \n totalCountsPerPic = bump.area_under(amp, sig)\n hTotalPhotons = countToScatteredPhotonEM*totalCountsPerPic\n ax.plot(keyPlt, totalSignal[:,whichPic], marker='o', linestyle='', label=titles[whichPic]);\n photon_axis.plot(keyPlt, hTotalPhotons, marker='o', linestyle='', color = ['red', 'orange', 'yellow', 'pink'][whichPic]) \n ax.legend()\n photon_axis.legend()\n [tick.set_color('red') for tick in photon_axis.yaxis.get_ticklines()]\n [tick.set_color('red') for tick in photon_axis.yaxis.get_ticklabels()]\n photon_axis.set_ylabel(r'Fit-Based Avg Scattered Photon/Img', color = 'r')\n if trackFitCenter:\n #numaxcol = 1: ax = axs\n #numaxcol = 2: trackfitcenter + plothSigmas: ax = axs[1]\n #numaxcol = 2: trackfitcenter + plotCounts: ax = axs[1]\n #numaxcol = 3: ax = axs[2]\n ax = (axs if numAxisCol == 1 else axs[-1])\n if onlyThisPic is not None:\n #ax.errorbar(keyPlt, hfitCenter[:,onlyThisPic], hFitCenterErrs[:,onlyThisPic], marker='o', linestyle='', capsize=3, label=titles[onlyThisPic]);\n ax.errorbar(keyPlt, vfitCenter[:,onlyThisPic], vFitCenterErrs[:,onlyThisPic], marker='o', linestyle='', capsize=3, label=titles[onlyThisPic]);\n #def accel(t, x0, a):\n # return x0 + 0.5*a*t**2\n #accelFit, AccelCov = opt.curve_fit(accel, keyPlt*1e-3, hfitCenter[:,onlyThisPic], sigma = hFitCenterErrs[:,onlyThisPic])\n #fitx = np.linspace(keyPlt[0], keyPlt[-1])*1e-3\n #fity = accel(fitx, *accelFit)\n #ax.plot(fitx*1e3, fity)\n else:\n for whichPic in range(4):\n ax.errorbar(keyPlt, hfitCenter[:,whichPic], hFitCenterErrs[:,whichPic], marker='o', linestyle='', capsize=3, label=titles[whichPic]);\n #accelErr = np.sqrt(np.diag(AccelCov))\n fig2.legend()\n ax.set_ylabel(r'Fit Centers (pix)')\n ax.set_xlabel('time (ms)')\n \n if numAxisCol != 0:\n disp.display(fig2) \n \n if not forceNoAnnotation:\n for fid, isAnnotated in zip(fids, isAnnotatedList):\n if not isAnnotated:\n if type(fid) == int or type(fid) == type(''):\n if newAnnotation or not exp.checkAnnotation(fid, force=False, quiet=True, useBase=useBase):\n exp.annotate(fid, useBase=useBase)\n if clearOutput:\n disp.clear_output()\n if calcTemperature: \n for temp, err, label in zip(temps, tempErrs, ['Hor', 'Vert', 'Hor2D', 'Vert2D']): \n print(label + ' temperature = ' + misc.errString(temp*1e6, err*1e6) + 'uk')\n\n for fid in fids:\n if type(fid) == int:\n expTitle, _, lev = exp.getAnnotation(fid)\n expTitle = ''.join('#' for _ in range(lev)) + ' File ' + str(fid) + ': ' + expTitle\n disp.display(disp.Markdown(expTitle))\n with exp.ExpFile(fid) as file:\n file.get_basic_info()\n if trackFitCenter:\n pass\n #print('Acceleration in Mpix/s^2 = ' + misc.errString(accelFit[1], accelErr[1]))\n if transferAnalysisOpts is not None and showTferAnalysisPlots:\n colors, colors2 = misc.getColors(len(transferAnalysisOpts.initLocs()) + 2)#, cmStr=dataColor)\n pltShape = (transferAnalysisOpts.initLocsIn[-1], transferAnalysisOpts.initLocsIn[-2])\n # mp.plotThresholdHists([initThresholds[0][0],initThresholds[1][0]], colors, shape=pltShape)\n mp.plotThresholdHists([initThresholds[0][0], initThresholds[0][0]], colors, shape=[1,2])\n returnDictionary = {'images':images, 'fits':hFitParams, 'errs':hFitErrs, 'hSigmas':hSigmas, 'sigmaErrors':hSigmaErrs, 'dataKey':keyPlt, \n 'hTotalPhotons':hTotalPhotons, 'tempCalc':temps, 'tempCalcErr':tempErrs, 'initThresholds':initThresholds[0], \n '2DFit':fitParams2D, '2DErr':fitErrs2D, 'bgPics':picsForBg, 'dataLength':datalen}\n if returnPics: \n returnDictionary['pics'] = sortedStackedPics\n return returnDictionary", "def test_full(args, model, device): \n test_path = '../data/full/original/'\n generate_path = '../data/full/generate/'\n test_image_num = len([name for name in os.listdir(test_path)\n if os.path.isfile(os.path.join(test_path, name))])\n\n score_psnr, score_ssim_skimage, score_ssim_minstar, score_msssim_minstar = 0.0, 0.0, 0.0, 0.0\n ind = 0\n for name in os.listdir(test_path):\n if os.path.isfile(os.path.join(test_path, name)):\n ind += 1\n test_original, test_style, image_height, image_width = load_test_dataset(name)\n x = torch.from_numpy(test_original).float()\n y_real = torch.from_numpy(test_style).float()\n x = x.view(image_height, image_width, config.channels).permute(2, 0, 1).to(device)\n y_real = y_real.view(image_height, image_width, config.channels).permute(2, 0, 1).to(device)\n \n y_fake = model.gen_g(x.view(-1, config.channels, image_height, image_width))\n y_fake = y_fake.view(config.channels, image_height, image_width)\n \n # Calculate PSNR & SSIM scores\n score_psnr += psnr_full(y_fake, y_real)\n \n y_fake_np = y_fake.detach().cpu().numpy().transpose(1, 2, 0)\n y_real_np = y_real.cpu().numpy().transpose(1, 2, 0)\n temp_ssim, _ = compare_ssim(y_fake_np, y_real_np, multichannel=True, gaussian_weights=True, full=True)\n score_ssim_skimage += temp_ssim\n \n temp_ssim, _ = ssim(y_fake, y_real, kernel_size=11, kernel_sigma=1.5)\n score_ssim_minstar += temp_ssim\n \n score_msssim_minstar += multi_scale_ssim(y_fake, y_real, kernel_size=11, kernel_sigma=1.5)\n print('PSNR & SSIM scores of {} images are calculated.'.format(ind))\n \n utils.save_image(y_fake, os.path.join(generate_path, '{}-x.jpg'.format(name[:5] + args.model_type)))\n\n score_psnr /= test_image_num\n score_ssim_skimage /= test_image_num\n score_ssim_minstar /= test_image_num\n score_msssim_minstar /= test_image_num\n print('PSNR : {:.4f}, SSIM_skimage : {:.4f}, SSIM_minstar : {:.4f}, SSIM_msssim: {:.4f}'.format(\n score_psnr, score_ssim_skimage, score_ssim_minstar, score_msssim_minstar))", "def science_reduction(input_file):\n #name of the planet\n planet = input_file['exoplanet']\n #set original directory\n original_path = os.getcwd()\n save_path = input_file['save_path']\n data_path = input_file['data_path']\n #Change your directory to data diretory\n os.chdir(data_path)\n #list all flat images\n exoplanet = glob.glob(planet+'*.fits')\n print '\\nLoading exoplanet images \\nTotal of '+planet+'*.fits files = ',len(exoplanet),'\\nFiles = \\n'\n print exoplanet\n #if save_path exist, continue; if not, create.\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n #create a list of bias images and copy images to save_path\n print '\\nCopy science images to save_path directory to main reduction: ....'\n os.system('cp '+planet+'*.fits '+save_path)\n print '\\n .... done. \\n'\n #change to save_path\n os.chdir(save_path)\n #create the names for exoplanet science mages with bias subtracted\n bexoplanet = []\n for i in exoplanet:\n bexoplanet.append('B'+i)\n #verify if previous superbias exist\n if os.path.isfile('B'+i) == True:\n os.system('rm B'+i)\n print '\\n Will be create this images: \\n'\n print bexoplanet\n #exoplanet = string.join(exoplanet,',') #create the list string of exoplanet science images\n #bexoplanet = string.join(bexoplanet,',')#create the list string of bexoplanet science images\n print '\\nSubtracting superbias.fits from all '+planet+'*.fits images ....\\n'\n for i in range(len(exoplanet)):\n iraf.imarith(exoplanet[i],'-','superbias.fits',bexoplanet[i])\n use.update_progress((i+1.)/len(bexoplanet))\n print '\\n.... cleaning '+planet+'*.fits images\\n'\n os.system('rm '+planet+'*.fits')\n print '\\n Statistics of B'+planet+'*.fits images: \\n'\n for i in range(len(bexoplanet)):\n iraf.imstat(bexoplanet[i])\n print '\\nFlatfielding the B'+planet+'*.fits ....\\n'\n #create the names for exoplanet science images with bias subtracted and flatfielding\n abexoplanet = []\n for i in bexoplanet:\n abexoplanet.append('A'+i)\n #verify if previous superbias exist\n if os.path.isfile('A'+i) == True:\n os.system('rm A'+i)\n print '\\n Will be create this images: \\n'\n print abexoplanet\n #flatifielding images\n for i in range(len(abexoplanet)):\n iraf.imarith(bexoplanet[i],'/','superflat.fits',abexoplanet[i])\n use.update_progress((i+1.)/len(abexoplanet))\n # print '\\n.... cleaning B'+planet+'*.fits images\\n'\n # os.system('rm B'+planet+'*.fits')\n print '\\n Statistics of AB'+planet+'*.fits images: \\n'\n for i in range(len(abexoplanet)):\n iraf.imstat(abexoplanet[i])\n os.chdir(original_path) #change to save_path\n return", "def test_no_shared_transformations():\n sdata = blobs()\n element_name = \"blobs_image\"\n test_space = \"test\"\n set_transformation(sdata.images[element_name], Identity(), to_coordinate_system=test_space)\n\n gen = sdata._gen_elements()\n for _, name, obj in gen:\n if name != element_name:\n assert test_space not in get_transformation(obj, get_all=True)\n else:\n assert test_space in get_transformation(obj, get_all=True)", "def test_synthetic():\n background = Image.new('RGB', (100, 50), (125, 125, 125))\n red = Image.new('RGB', (10, 5), (255, 0, 0))\n green = Image.new('RGB', (5, 5), (0, 255, 0))\n blue = Image.new('RGB', (20, 5), (0, 0, 255))\n positions = [\n [0, 0],\n [9, 5],\n [99, 20]\n ]\n\n parameters = {\n 'data': [background, red, green, blue],\n 'positions': positions\n }\n\n synth = images.synthetic(parameters)\n\n assert_equal(synth.size, (100, 50))\n assert_equal(synth.getpixel((0, 0)), (255, 0, 0, 255))\n # if there was no overwrite of overlapping patches, this should be:\n # assert_equal(synth.getpixel((9, 5)), (255, 255, 0, 255))\n # but since green is pasted last it is:\n assert_equal(synth.getpixel((9, 5)), (0, 255, 0, 255))", "def teardown(self):\n self.wf.write_graph(dotfilename = self.test_path / \"wf_diagram\", graph2use=\"orig\")\n self.wf.run()\n \n\n self.helpers.plot_timeseries(\n self.export_path, self.sample_raw_image, \n highlight_ranges=self.highlight_ranges,\n num_figs=1\n )\n\n if self.plot_img:\n self.helpers.plot_4D_img_slice(self.export_path, \"sample_processed.png\")", "def process_tir_image(ds, data_res, t_thresh=-50, min_mcs_size=5000):\n ctt = (ds['tb']).squeeze()-273.15\n min_pix_nb = min_mcs_size / data_res**2\n\n max_pix_nb = 300000 / data_res**2 # this is to capture satellite artefacts that come in large contiguous stripes.\n labels, goodinds = mcs_define(ctt.values, t_thresh, minmax_area=[min_pix_nb, max_pix_nb]) # 7.7x7.7km = 64km2 per pix in gridsat? 83 pix is 5000km2\n dic = dictionary()\n #plt.figure()\n #plt.pcolormesh(labels)\n #plt.colorbar()\n #plt.show()\n for g in goodinds:\n\n if g==0:\n continue\n\n pos = np.where(labels==g)\n npos = np.where(labels!=g)\n datestr = str(int(ctt['time.year'].values))+'-'+str(int(ctt['time.month'].values)).zfill(2)+'-'+str(int(ctt['time.day'].values)).zfill(2)+'_'+\\\n str(int(ctt['time.hour'].values)).zfill(2)+':'+str(int(ctt['time.minute'].values)).zfill(2)\n \n dic['date'].append(datestr)\n dic['month'].append(int(ctt['time.month']))\n dic['hour'].append(int(ctt['time.hour']))\n dic['year'].append(int(ctt['time.year']))\n dic['day'].append(int(ctt['time.day']))\n dic['minute'].append(int(ctt['time.minute']))\n\n storm = ctt.copy()\n storm.values[npos] = np.nan\n tmin_pos = np.nanargmin(storm.values)\n tpos_2d = np.unravel_index(tmin_pos, storm.shape)\n \n latmin = np.nanmin(ctt.lat.values[pos[0]])\n latmax = np.nanmax(ctt.lat.values[pos[0]])\n lonmin = np.nanmin(ctt.lon.values[pos[1]])\n lonmax = np.nanmax(ctt.lon.values[pos[1]])\n dic['area'].append(np.sum(np.isfinite(storm.values))*data_res**2)\n dic['70area'].append(np.sum(storm.values<=-70)*data_res**2)\n dic['minlon'].append(lonmin)\n dic['minlat'].append(latmin)\n dic['maxlon'].append(lonmax)\n dic['maxlat'].append(latmax)\n dic['clon'].append(lonmin + (lonmax - lonmin)/2)\n dic['clat'].append(latmin + (latmax - latmin)/2)\n dic['tmin'].append(np.nanmin(storm))\n dic['tminlat'].append(float(ctt.lat[tpos_2d[0]].values))\n dic['tminlon'].append(float(ctt.lon[tpos_2d[1]].values))\n dic['tmean'].append(float(np.nanmean(storm)))\n dic['tp1'].append(float(np.nanpercentile(storm, 1)))\n dic['tp99'].append(float(np.nanpercentile(storm, 99)))\n dic['stormID'].append(datestr + '_' + str(g))\n dic['cloudMask'].append(labels==g)\n dic['tir'].append(storm.values)\n\n # for k in dic.keys():\n # print(k, len(dic[k]))\n return dic", "def testTrivial(self):\n self.doTest(afwGeom.makeIdentityTransform())", "def testTrivial(self):\n self.doTest(afwGeom.makeIdentityTransform())", "def __init__(self, data_dir, mode='train'):\n self.mode = mode\n self.data_dir = data_dir\n if self.mode == 'train':\n self.img_dir = os.path.join(self.data_dir, 'train')\n self.gt_dir = os.path.join(self.data_dir, 'train_gt')\n elif self.mode == 'test':\n self.img_dir = os.path.join(self.data_dir, 'test')\n self.gt_dir = os.path.join(self.data_dir, 'test_gt')\n\n ''' set up list of filenames for retrieval purposes'''\n self.filenames = [image_basename(f) for f in os.listdir(self.img_dir)]\n self.filenames.sort()\n self.gt_names = [image_basename(f) for f in os.listdir(self.gt_dir)]\n self.gt_names.sort()\n\n ''' set up image transform '''\n if self.mode == 'train':\n self.transform = transforms.Compose([\n transforms.Resize((1024, 1024)),\n # transforms.RandomHorizontalFlip(),\n # transforms.CenterCrop((512, 512)),\n transforms.ToTensor(), # (H,W,C)->(C,H,W), [0,255]->[0, 1.0] RGB->RGB\n # transforms.Normalize(MEAN, STD)\n ])\n self.mask_transform = transforms.Compose([\n # transforms.Pad(padding =50, fill =0),\n transforms.Resize((1024, 1024)),\n \n # transforms.RandomHorizontalFlip(),\n transforms.ToTensor(), # (H,W,C)->(C,H,W), [0,255]->[0, 1.0] RGB->RGB\n ])\n self.gt_transform = transforms.Compose([\n transforms.Resize((1024,1024)),\n transforms.ToTensor(),\n ])\n\n elif self.mode == 'test':\n self.transform = transforms.Compose([\n# transforms.Pad(padding =50, fill =1),\n transforms.Resize((1024, 1024)),\n # /!\\ to remove later\n #transforms.RandomHorizontalFlip(),\n transforms.ToTensor(), # (H,W,C)->(C,H,W), [0,255]->[0, 1.0] RGB->RGB\n # transforms.Normalize(MEAN, STD)\n ])\n self.mask_transform = transforms.Compose([\n # transforms.Pad(padding =50, fill =1),\n transforms.Resize((1024, 1024)), # /!\\ to remove later\n \n # transforms.RandomHorizontalFlip(),\n transforms.ToTensor(), # (H,W,C)->(C,H,W), [0,255]->[0, 1.0] RGB->RGB\n ])\n\n self.gt_transform = transforms.Compose([\n transforms.Resize((1024,1024)),\n transforms.ToTensor()\n ])", "def problem2():\n\n data = loaddata(\"data/bayerdata.npy\")\n r, g, b = separatechannels(data)\n\n img = assembleimage(r, g, b)\n display_image(img)\n\n img_interpolated = interpolate(r, g, b)\n display_image(img_interpolated)", "def main():\n # ------------------------\n # 0 SETUP\n # ------------------------\n log.getLogger().setLevel(log.INFO)\n torch.autograd.set_detect_anomaly(True)\n parser = argparse.ArgumentParser(\n description='Image Style Transfer Training')\n parser = pl.Trainer.add_argparse_args(parser)\n # data\n parser.add_argument('-s', '--style-image', default='cropamara', type=str)\n parser.add_argument('-d', '--dataset',\n default=os.path.join('/', 'fridge', 'coco'), type=str)\n parser.add_argument('-t', '--to-style',\n default=os.path.join('images', 'test'), type=str)\n parser.add_argument('-m', '--model', default='transformer', type=str)\n parser.add_argument('-b', '--batch-size', default=1, type=int)\n parser.add_argument('-lr', '--learning-rate', default=0.001, type=float,\n help='initial learning rate')\n parser.add_argument(\"--image-size\", type=int, default=256,\n help=\"size of training images, default is 256\")\n\n parser.add_argument(\"--seed\", type=int, default=4747,\n help=\"random seed for training\")\n parser.add_argument(\"--content-weight\", type=float, default=1e5,\n help=\"weight for content-loss, default is 1e5\")\n parser.add_argument(\"--style-weight\", type=float, default=1e10,\n help=\"weight for style-loss, default is 1e10\")\n parser.add_argument(\"--weights\", type=str, default='flat',\n help=\"weight for layer losses, default is 1 each\")\n\n parser.add_argument(\"--content-image\", type=str, default='./images/content-images/gbimage2.jpeg',\n help=\"path to content image you want to stylize\")\n parser.add_argument(\"--content-scale\", type=float, default=None,\n help=\"factor for scaling down the content image\")\n parser.add_argument(\"--output-dir\", type=str, default='./images/output-images/',\n help=\"path for saving the output images\")\n\n parser.add_argument(\"-cp\", \"--checkpoint\", type=str, default='',\n help=\"path for starting weights\")\n parser.add_argument(\"--single\", action='store_true')\n\n parser.set_defaults(progress_bar_refresh_rate=5,\n gpus='0,1,2',\n max_epochs=50,\n overfit_pct=0.01,\n profiler=True,\n weights_summary='full',\n logger=False,\n distributed_backend=\"dp\")\n args = parser.parse_args()\n\n # ------------------------\n # 1 INIT LIGHTNING MODEL\n # ------------------------\n model = FastNeuralStyleSystem(args)\n if args.checkpoint is not '':\n print(f'loading checkpoint: {args.checkpoint}')\n FastNeuralStyleSystem.load_from_checkpoint(args.checkpoint)\n print(model.hparams)\n if args.single:\n print('single image optimize')\n model.to('cuda')\n model.prepare_data()\n model.optimize()\n print('Done single image')\n return\n # ------------------------\n # 2 INIT TRAINER\n # ------------------------\n trainer = pl.Trainer.from_argparse_args(args)\n trainer.checkpoint_callback = pl.callbacks.ModelCheckpoint(\n filepath='./trained_models',\n save_top_k=2,\n verbose=True,\n monitor='train_loss',\n mode='min',\n prefix=args.style_image\n )\n # ------------------------\n # 3 START TRAINING\n # ------------------------\n trainer.fit(model)\n\n import glob\n saved_images = glob.glob(\n f\"{args.output_dir}/{args.style_image}_steps_c_{args.content_weight}_s_{args.style_weight}/*png\")\n gif_images = []\n for step_img in saved_images:\n gif_images.append(imageio.imread(step_img))\n imageio.mimsave(os.path.join(temp_dir, '0_optimization.gif'), gif_images)", "def detect(img, template):\r\n\r\n #detect threshold\r\n args = parse_args()\r\n threshold=dictornary(args)\r\n\r\n # detect edges of image\r\n \"\"\"prewitt_x = [[1, 0, -1]] * 3\r\n prewitt_y = [[1] * 3, [0] * 3, [-1] * 3]\r\n img_x = task1.detect_edges(img, prewitt_x, False)\r\n img_y = task1.detect_edges(img, prewitt_y, False)\r\n img_norm = task1.edge_magnitude(img_x, img_y)\r\n\r\n task1.write_image(task1.normalize(img_norm), \".//img_norm.jpg\")\r\n\r\n # detect edges in template\r\n\r\n temp_x = task1.detect_edges(template, prewitt_x, False)\r\n temp_y = task1.detect_edges(template, prewitt_y, False)\r\n template_norm = task1.edge_magnitude(temp_x, temp_y)\r\n\r\n task1.write_image(task1.normalize(template_norm), \".//template_norm.jpg\") \"\"\"\r\n\r\n img_norm = task1.normalize(img)\r\n template_norm = task1.normalize(template)\r\n\r\n coordinates = []\r\n temp_h = len(template_norm)\r\n temp_w = len(template_norm[0])\r\n\r\n rows = len(img_norm)\r\n cols = len(img_norm[0])\r\n\r\n output = [[0 for x in range(len(img_norm[0]))] for y in range(len(img_norm))]\r\n cropped_img = [[0 for x in range(temp_w)] for y in range(temp_h)]\r\n\r\n for i in range(rows):\r\n for j in range(cols):\r\n\r\n if ((i +temp_h) < rows and (j + temp_w < cols)):\r\n cropped_img = utils.crop(img_norm, i, i + temp_h, j, j + temp_w)\r\n\r\n\r\n img_mul_temp = utils.elementwise_mul(cropped_img, template_norm)\r\n sum = 0\r\n # sum of every elemnet in img_mul_temp\r\n for p in range(temp_h):\r\n for q in range(temp_w):\r\n sum += img_mul_temp[p][q]\r\n\r\n # squaring every element in denominator of image\r\n square_img = utils.elementwise_mul(cropped_img, cropped_img)\r\n numsum_img = 0\r\n for d in range(len(cropped_img)):\r\n for e in range(len(cropped_img[0])):\r\n numsum_img += square_img[d][e]\r\n\r\n # squaring every element in denominator of template\r\n square_temp = utils.elementwise_mul(template_norm, template_norm)\r\n numsum_temp = 0\r\n for k in range(temp_h):\r\n for l in range(temp_w):\r\n numsum_temp += square_temp[k][l]\r\n\r\n denominator = np.sqrt((numsum_img * numsum_temp))\r\n\r\n if (denominator != 0):\r\n output[i][j] = (sum / denominator)\r\n if (output[i][j] > threshold):\r\n coordinates.append([i, j])\r\n\r\n # TODO: implement this function.\r\n # raise NotImplementedError\r\n return coordinates", "def test_empty_img():\n assert detected_boxes[-1] == ground_truth_boxes[-1]", "def act(self, x: np.ndarray, t: int = None, noise: np.ndarray = None) -> np.ndarray:", "def main_ededge(dataset):\n Application.set_input_image_folder('TestData/BSR/BSDS500/data/images/' + dataset)\n\n # Application.delete_folder_appl_out()\n # Benchmarking.delete_folder_benchmark_out()\n\n Application.do_get_image_job(port_output_name='RAW')\n Application.do_grayscale_transform_job(port_input_name='RAW', port_output_name='GRAY_RAW')\n blur = Application.do_gaussian_blur_image_job(port_input_name='GRAY_RAW', sigma=0, kernel_size=9)\n\n list_to_eval_edge = []\n\n first_order_edge = [\n CONFIG.FILTERS.PIXEL_DIFF_3x3, CONFIG.FILTERS.PIXEL_DIFF_SEPARATED_3x3\n , CONFIG.FILTERS.PIXEL_DIFF_SEPARATED_5x5, CONFIG.FILTERS.PIXEL_DIFF_SEPARATED_7x7\n , CONFIG.FILTERS.PIXEL_DIFF_5x5, CONFIG.FILTERS.PIXEL_DIFF_7x7\n\n , CONFIG.FILTERS.SOBEL_3x3, CONFIG.FILTERS.SOBEL_5x5, CONFIG.FILTERS.SOBEL_7x7\n , CONFIG.FILTERS.SOBEL_DILATED_5x5, CONFIG.FILTERS.SOBEL_DILATED_7x7\n\n , CONFIG.FILTERS.PREWITT_3x3, CONFIG.FILTERS.PREWITT_5x5, CONFIG.FILTERS.PREWITT_7x7\n , CONFIG.FILTERS.PREWITT_DILATED_5x5, CONFIG.FILTERS.PREWITT_DILATED_7x7\n\n , CONFIG.FILTERS.KIRSCH_3x3, CONFIG.FILTERS.KIRSCH_5x5\n , CONFIG.FILTERS.KIRSCH_DILATED_5x5, CONFIG.FILTERS.KIRSCH_DILATED_7x7\n\n , CONFIG.FILTERS.KITCHEN_MALIN_3x3\n , CONFIG.FILTERS.KITCHEN_MALIN_DILATED_5x5, CONFIG.FILTERS.KITCHEN_MALIN_DILATED_7x7\n\n , CONFIG.FILTERS.KAYYALI_3x3\n , CONFIG.FILTERS.KAYYALI_DILATED_5x5, CONFIG.FILTERS.KAYYALI_DILATED_7x7\n\n , CONFIG.FILTERS.SCHARR_3x3, CONFIG.FILTERS.SCHARR_5x5\n , CONFIG.FILTERS.SCHARR_DILATED_5x5, CONFIG.FILTERS.SCHARR_DILATED_7x7\n\n , CONFIG.FILTERS.KROON_3x3\n , CONFIG.FILTERS.KROON_DILATED_5x5, CONFIG.FILTERS.KROON_DILATED_7x7\n\n , CONFIG.FILTERS.ORHEI_3x3, CONFIG.FILTERS.ORHEI_B_5x5\n , CONFIG.FILTERS.ORHEI_DILATED_5x5, CONFIG.FILTERS.ORHEI_DILATED_7x7\n ]\n\n for edge in first_order_edge:\n for gr_thr in [50]:\n for anc_thr in [10]:\n e1, e2, = Application.do_edge_drawing_mod_job(port_input_name=blur, operator=edge,\n gradient_thr=gr_thr, anchor_thr=anc_thr, scan_interval=1,\n max_edges=100, max_points_edge=100)\n list_to_eval_edge.append(e1 + '_L0')\n\n Application.create_config_file(verbose=False)\n Application.configure_save_pictures(job_name_in_port=False, ports_to_save='ALL')\n # Application.configure_show_pictures(ports_to_show=list_to_save, time_to_show=200)\n\n # Application.run_application()\n\n # Do bsds benchmarking\n # Be ware not to activate job_name_in_port in Application.configure_save_pictures\n # Benchmarking.run_bsds500_boundary_benchmark(input_location='Logs/application_results',\n # gt_location='TestData/BSR/BSDS500/data/groundTruth/' + dataset,\n # raw_image='TestData/BSR/BSDS500/data/images/' + dataset,\n # jobs_set=list_to_eval_edge, do_thinning=False)\n\n Utils.plot_first_cpm_results(prefix='EDGE_DRAWING_MOD_', level='L0', order_by='f1', name='ed_results',\n list_of_data=list_to_eval_edge, number_of_series=50,\n inputs=[''], self_contained_list=True, set_legend_left=False,\n suffix_to_cut_legend='_S_0_GRAY_RAW_L0',\n replace_list=[('EDGE_DRAWING_MOD_THR_50_ANC_THR_10_SCAN_1_', ''),\n ('SEPARATED_PIXEL_DIFFERENCE_', 'Separated Px Dif '),\n ('PIXEL_DIFFERENCE_', 'Pixel Dif '),\n ('PREWITT_', 'Prewitt '), ('KIRSCH_', 'Kirsch '), ('SOBEL_', 'Sobel '),\n ('SCHARR_', 'Scharr '), ('KROON_', 'Kroon '), ('ORHEI_V1_', 'Orhei '),\n ('ORHEI_', 'Orhei '),\n ('KITCHEN_', 'Kitchen '), ('KAYYALI_', 'Kayyali '),\n ('DILATED_', 'dilated '),\n ('_GAUSS_BLUR_K_9', '')],\n save_plot=True, show_plot=False, set_all_to_legend=False)\n\n # Utils.create_latex_cpm_table_list()\n\n Utils.close_files()", "def imageprepare(self, argv):\n im = Image.open(argv).convert('L')\n img = im.resize((28, 28), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n tv = list(img.getdata()) \n \n # normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\n tva = [(255 - x) * 1.0 / 255.0 for x in tv]\n return tva", "def test_without_target(self, X, y):\n try:\n resize_batch(X, y, 1.0, 'crop', resize_targets=False)\n except:\n pytest.fail('apply_progressive_resizing failed with y == None')", "def run(self):\n self.run_tasks()\n self.images = np.array(self.images)\n self.shapes.extend(self.images.shape[-2:])\n\n self.images = np.reshape(self.images, self.shapes)", "def propagateImage(self, dryrun):\n pass", "def process(image):\n pass", "def main():\n base_dir = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n os.pardir,\n )\n default_output_path = os.path.join(base_dir, \"output\", \"out.png\")\n default_texture_path = os.path.join(base_dir, \"textures\", \"grid.png\")\n\n default_options = {\n \"resolution\": (1512, 762),\n \"texture_path\": default_texture_path,\n \"output_path\": default_output_path,\n \"iterations\": 200, # Increase this for good results\n \"camera_position\": [3.1, 1.570796, 0.],\n \"num_processes\": multi.cpu_count(),\n \"chunk_size\": 9000,\n \"gain\": 1,\n \"normalize\": 0,\n \"spin\": 0.7,\n }\n args = parse_args(default_options)\n\n output_path = os.path.dirname(args.output_path)\n if not os.path.exists(output_path):\n print(\"Error: Output path does not exist at:\")\n print(args.output_path)\n print(\"Create the directory or change the path then try again.\")\n print_help_and_exit()\n\n\n try:\n texture = spm.imread(args.texture_path)\n except FileNotFoundError as error:\n print(error)\n print(\"Error: Texture file not found at:\")\n print(args.texture_path)\n print_help_and_exit()\n\n # Convert to float to work in linear colour space\n texture = convert_image_to_float(texture)\n if not args.no_srgb:\n # Convert to sRGB before resizing for correct results\n srgbtorgb(texture)\n\n texture = convert_image_to_float(\n spm.imresize(texture, 2.0, interp=\"bicubic\"),\n )\n\n black_hole = KerrBlackHole(args.spin)\n raytracer = KerrRaytracer(\n black_hole,\n args.camera_position,\n texture,\n args.resolution,\n args.iterations,\n args.num_processes,\n args.chunk_size,\n shuffle=not args.disable_shuffle,\n )\n raytracer.generate_image()\n print(\"Raytracing Completed Succesfully.\")\n print(\n \"Total raytracing time:\",\n datetime.timedelta(seconds=(time.time() - raytracer.start_time)),\n )\n\n colour = post_process(raytracer.colour_buffer_preproc, args.gain, args.normalize)\n\n save_to_img(\n colour,\n args.output_path,\n args.resolution,\n srgb_out=not args.no_srgb,\n )", "def evaluate(t, x, y):\n from PIL import Image\n im = Image.open(filename)\n duration = im.info[\"duration\"]*pq.ms if im.info[\"duration\"] is not 0 else 30*pq.ms\n\n Nt = t.shape[0]\n Nx = x.shape[2]\n Ny = y.shape[1]\n\n stim = np.zeros([Nt, Ny, Nx])\n t_map = (t.flatten().rescale(\"ms\") / duration).astype(int)\n t_map = t_map[1:] - t_map[:-1]\n for i, ti in enumerate(t_map):\n try:\n im.seek(im.tell()+ti)\n except EOFError:\n break\n frame = im.convert(\"L\").transpose(Image.FLIP_TOP_BOTTOM).resize((Ny, Nx))\n stim[i, :, :] = np.array(frame)\n stim[i, :, :] = 2 * ((stim[i, :, :] - stim[i, :, :].min()) / (stim[i, :, :].max() - stim[i, :, :].min())) - 1\n\n return stim", "def main():\n original = SimpleImage('images/mt-rainier.jpg')\n original.show()\n reflected = make_reflected('images/mt-rainier.jpg')\n reflected.show()", "def main():\n\n os.system(\"rm -rf images; mkdir images\")\n\n if (len(sys.argv) > 1):\n N = int(sys.argv[1])\n else:\n N = 10\n\n x_test = np.load(\"../../../../data/mnist/mnist_test_images.npy\")\n\n for i in range(N):\n r,c = random.randint(6,12), random.randint(6,12)\n g = np.zeros(r*c)\n for j in range(r*c):\n if (random.random() < 0.15):\n g[j] = 1\n g = g.reshape((r,c))\n g[:,0] = g[0,:] = g[:,-1] = g[-1,:] = 0\n\n img = np.zeros((28*r,28*c), dtype=\"uint8\")\n for x in range(r):\n for y in range(c):\n if (g[x,y] == 1):\n n = random.randint(0, x_test.shape[0])\n im = x_test[n]\n img[28*x:(28*x+28), 28*y:(28*y+28)] = im\n \n Image.fromarray(img).save(\"images/image_%04d.png\" % i)", "def __call__(self, img: torch.Tensor) -> torch.Tensor:\n return self._trafo(img)", "def sample_damaging(image):\r\n return crease_image(blotch_image(image, 100, True), 10, False)", "def test_one_image(self, img):\n return self.__image_pipeline(img)", "def runauto(self, istart, nrows, rstep):\n self.ImageSolution=self.arcdisplay.autoidentify(istart=istart, nrows=nrows, rstep=rstep, oneline=False)", "def test_image_task_early_fusion(self):\n args = BASE_ARGS.copy()\n args.update(IMAGE_ARGS)\n args.update(EARLY_FUSION_ARGS)\n\n valid, test = testing_utils.train_model(args)\n self.assertLessEqual(\n valid['ppl'], 8.6, 'failed to train image_seq2seq on image task'\n )", "def imageprepare(filename):\n img = Image.open(filename).convert('L')\n rect = img.getbbox()\n im = img.crop(rect)\n im.save(filename + '_pressprocessed.png')\n\n width = float(im.size[0])\n height = float(im.size[1])\n newImage = Image.new('L', (28, 28), (0)) #creates white canvas of 28x28 pixels\n if width > height: #check which dimension is bigger\n #Width is bigger. Width becomes 20 pixels.\n nheight = int(round((20.0/width*height),0)) #resize height according to ratio width\n if (nheight == 0): #rare case but minimum is 1 pixel\n nheight = 1\n # resize and sharpen\n img = im.resize((20,nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n wtop = int(round(((28 - nheight)/2),0)) #caculate horizontal pozition\n newImage.paste(img, (4, wtop)) #paste resized image on white canvas\n newImage.save(filename + '_final.png')\n else:\n #Height is bigger. Heigth becomes 20 pixels. \n nwidth = int(round((20.0/height*width),0)) #resize width according to ratio height\n if (nwidth == 0): #rare case but minimum is 1 pixel\n nwidth = 1\n # resize and sharpen\n img = im.resize((nwidth,20), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n wleft = int(round(((28 - nwidth)/2),0)) #caculate vertical pozition\n newImage.paste(img, (wleft, 4)) #paste resized image on white canvas\n newImage.save(filename + '_final.png')\n tv = list(newImage.getdata()) #get pixel values\n #normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\n tva = [ (x)*1.0/255.0 for x in tv] \n return tva", "def __call__(self, src, label):\n\n h, w, _ = src.shape\n # interp = np.random.randint(0, 5)\n img = timage.resize_short_within(src, self._short, self._max_size, interp=1)\n img, flips = timage.random_flip(img, px=0.5)\n img = img.astype(np.float32)\n\n if self.teacher_aug:\n target_image_1 = self.random_color_aug(img)\n else:\n target_image_1 = img\n target_image_2 = self.random_color_aug(img)\n\n # target_image_1 = mx.nd.image.to_tensor(target_image_1)\n target_image_1 = mx.nd.image.to_tensor(target_image_1)\n target_image_1 = mx.nd.image.normalize(target_image_1, mean=self._mean, std=self._std)\n\n target_image_2 = mx.nd.image.to_tensor(target_image_2)\n target_image_2 = mx.nd.image.normalize(target_image_2, mean=self._mean, std=self._std)\n\n return target_image_1, target_image_2", "def resetTransformations():\n dislin.trfres()", "def process_image(self):\n pass", "def run(self, image):\n start = datetime.datetime.now()\n\n width, height = image.size\n resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)\n target_size = (int(resize_ratio * width), int(resize_ratio * height))\n resized_image = image.convert('RGB').resize(target_size, Image.ANTIALIAS)\n batch_seg_map = self.sess.run(\n self.OUTPUT_TENSOR_NAME,\n feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]})\n seg_map = batch_seg_map[0]\n\n end = datetime.datetime.now()\n\n diff = end - start\n print(\"Time taken to evaluate segmentation is : \" + str(diff))\n\n return resized_image, seg_map", "def main() -> None:\n\n # Define file name\n file_name = define_file()\n\n # Open chosen image\n img = image.load_img(IMAGES + file_name, color_mode='grayscale')\n\n # Show user image\n plt.imshow(img)\n plt.show()\n\n # Convert image to array\n img_arr = image.img_to_array(img)\n img_arr = np.array([img_arr])\n img_arr = img_arr.astype(\"float32\") / 255.0\n\n # Classify image\n img_class = classification(img_arr)\n\n # Suggest user add noise to original image\n if img_class == ORIGINAL:\n while True:\n command = input('Seems like your image is original. Do you want to add noise? y/n: ')\n if command.strip().lower() in ('y', 'yes'):\n noisy_array = noise(img_arr)\n display(img_arr, noisy_array)\n img = image.array_to_img(noisy_array[0])\n img.save(IMAGES + file_name[:-4] + '_noise' + file_name[-4:])\n main()\n elif command.strip().lower() in ('n', 'no'):\n main()\n else:\n continue\n\n # Suggest user remove noise from noised image\n elif img_class == NOISED:\n while True:\n command = input('Seems like your image has noise. Do you want to remove noise? y/n: ')\n if command.strip().lower() in ('y', 'yes'):\n denoise_array = denoise_image(img_arr)\n display(img_arr, denoise_array)\n img = image.array_to_img(denoise_array[0])\n img.save(IMAGES + file_name[:-4] + '_denoised' + file_name[-4:])\n main()\n elif command.strip().lower() in ('n', 'no'):\n main()\n else:\n continue\n\n # Image denoised. Nothing to do\n else:\n print('Seems like your image denoised.')\n main()", "def imageprepare(argv):\r\n im = Image.open(argv).convert('L')\r\n width = float(im.size[0])\r\n height = float(im.size[1])\r\n newImage = Image.new('L', (28, 28), (255)) #creates white canvas of 28x28 pixels\r\n \r\n if width > height: #check which dimension is bigger\r\n #Width is bigger. Width becomes 20 pixels.\r\n nheight = int(round((20.0/width*height),0)) #resize height according to ratio width\r\n if (nheight == 0): #rare case but minimum is 1 pixel\r\n nheight = 1 \r\n # resize and sharpen\r\n img = im.resize((20,nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\r\n wtop = int(round(((28 - nheight)/2),0)) #caculate horizontal pozition\r\n newImage.paste(img, (4, wtop)) #paste resized image on white canvas\r\n else:\r\n #Height is bigger. Heigth becomes 20 pixels. \r\n nwidth = int(round((20.0/height*width),0)) #resize width according to ratio height\r\n if (nwidth == 0): #rare case but minimum is 1 pixel\r\n nwidth = 1\r\n # resize and sharpen\r\n img = im.resize((nwidth,20), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\r\n wleft = int(round(((28 - nwidth)/2),0)) #caculate vertical pozition\r\n newImage.paste(img, (wleft, 4)) #paste resized image on white canvas\r\n \r\n #newImage.save(\"sample.png\")\r\n\r\n tv = list(newImage.getdata()) #get pixel values\r\n \r\n #normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\r\n tva = [ (255-x)*1.0/255.0 for x in tv] \r\n #print(tva)\r\n return tva", "def demo(image, model_class, do_add_noise=True):\n Log.enable_output = True\n Log.set_log_max_depth(8)\n\n image = normalise(image)\n image = numpy.expand_dims(image, axis=0)\n image = numpy.expand_dims(image, axis=0)\n noisy = add_noise(image) if do_add_noise else image\n print(noisy.shape)\n\n # noisy = models.tensor(noisy)\n image = torch.tensor(image)\n\n model = model_class(\n nb_unet_levels=2,\n spacetime_ndim=2,\n )\n\n print(\"training starts\")\n\n start = time.time()\n n2t_train(noisy, model, nb_epochs=128)\n stop = time.time()\n print(f\"Training: elapsed time: {stop - start} \")\n\n noisy = torch.tensor(noisy)\n model.eval()\n model = model.cpu()\n print(f\"noisy tensor shape: {noisy.shape}\")\n # in case of batching we have to do this:\n start = time.time()\n denoised = model(noisy)\n stop = time.time()\n print(f\"inference: elapsed time: {stop - start} \")\n\n noisy = noisy.detach().numpy()[0, 0, :, :]\n image = image.detach().numpy()[0, 0, :, :]\n denoised = denoised.detach().numpy()[0, 0, :, :]\n\n image = numpy.clip(image, 0, 1)\n noisy = numpy.clip(noisy, 0, 1)\n denoised = numpy.clip(denoised, 0, 1)\n\n return calculate_print_psnr_ssim(image, noisy, denoised)\n\n # import napari\n #\n # viewer = napari.Viewer() # no prior setup needed\n # viewer.add_image(image, name='image')\n # viewer.add_image(noisy, name='noisy')\n # viewer.add_image(denoised, name='denoised')\n # napari.run()", "def applyARUNet(self):\n assert(self.files and len(self.files) > 0)\n\n # TODO: move this to the init method\n session_conf = tf.ConfigProto()\n session_conf.gpu_options.visible_device_list = self.gpu_device\n pred = None\n with tf.Session(graph=self.graph, config=session_conf) as sess:\n x = self.graph.get_tensor_by_name('inImg:0')\n predictor = self.graph.get_tensor_by_name('output:0')\n \n progress = getProgressBar()\n for i in progress(range(len(self.files))):\n# print(self.files[i])\n # img: numpy array (height x width x channels)\n # scipy's misc.imread is deprecated\n # TODO: switch maybe to opencv instead of pillow with its image\n # class overhead\n pil_img = Image.open(self.files[i]).convert('L') # grayscale\n img = np.array(pil_img)\n size = (int(img.shape[1]*self.scale),int(img.shape[0]*self.scale))\n small = np.array(pil_img.resize(size, resample=Image.BICUBIC))\n origsize = (img.shape[1],img.shape[0]) \n\n # TODO: can we actually put them in 1 or 2 batches?\n pred1 = self.runSession(sess, x, predictor, small)\n out = self.pruneBaselines(pred1, size=origsize)\n\n # try other orientation\n # TODO: think of a better check! \n # this one depends on the resolution and due to noise might\n # still pass...\n if self.test_orientation and \\\n np.count_nonzero(out) < 100: \n print('WARNING: no baseline found for img:'\n ' {}'.format(self.files[i]))\n print('rotate it now and try again...')\n # rotate 90 degree counter clock-wise\n small2 = rotate(small, 90, True)\n pred2 = self.runSession(sess, x, predictor, small2) \n origsize = (origsize[1],origsize[0])\n out2 = self.pruneBaselines(pred2, size=origsize)\n # check which direction has higher probability\n # Note: unfortunately the probas are similar high for 0 + 180,\n # as well as 90 and 270 degree, so we cannot test for these\n # orientations!\n # Note 2: raw probability map didnt work out for me, so lets do\n # it that way\n n_comp, _, stats1, _ =\\\n cv2.connectedComponentsWithStats(out.astype(np.uint8))\n n_comp2, _, stats2, _ =\\\n cv2.connectedComponentsWithStats(out2.astype(np.uint8))\n # test for area, assumption is that we get larger\n # mean/median/sum area if it's correctly rotated\n # TODO: might still be a bad test due to noise...\n stat1 = np.sum(stats1[1:,cv2.CC_STAT_AREA])\n stat2 = np.sum(stats2[1:,cv2.CC_STAT_AREA])\n if stat2 > stat1: \n print('rotation by 90 degree counter clockwise gives higher'\n ' probability (orig {} vs rot: {}) for file {}\\n'\n ' -> rotate this file (90 degree'\n ' counter clock-wise), too!'.format(stat1, stat2, self.files[i]))\n out = out2\n \n # small check if we have found a line at all\n if np.count_nonzero(out) < 50: # TODO: think of a better check\n print('WARNING: no baseline found for img:'\n ' {}'.format(self.files[i]))\n\n if self.to_line:\n out = baseToLine(out)\n\n # save it\n name = os.path.splitext(os.path.basename(self.files[i]))[0]\n suffix = self.out_suffix if self.out_suffix else ''\n path = os.path.join(self.outdir, '{}{}.png'.format(name,suffix))\n# print('save to: {}'.format(path))\n out = out * 255\n misc.imsave(path, out)\n \n return pred", "def demo(image, name):\n\n # Log.set_log_max_depth(5)\n\n image = normalise(image.astype(np.float32))\n # noisy = add_noise(image, intensity=None, variance=0.1, sap=0, clip=False)\n noisy = random_noise(image, mode=\"gaussian\", var=0.1, seed=0, clip=False)\n noisier = random_noise(noisy, mode=\"gaussian\", var=0.1, seed=100, clip=False)\n\n generator = StandardFeatureGenerator(\n include_corner_features=True,\n include_scale_one=True,\n include_fine_features=True,\n include_spatial_features=True,\n )\n regressor = CBRegressor(\n patience=16, loss='l1', learning_rate=0.002, max_num_estimators=4096\n )\n\n it = ImageTranslatorFGR(feature_generator=generator, regressor=regressor)\n\n it.train(noisy, noisy, jinv=True)\n n2s_denoised = it.translate(noisy)\n\n it.exclude_center_feature = False\n it.train(noisier, noisy, jinv=False)\n denoised = it.translate(noisy)\n denoised_corrected = 2 * denoised - noisy\n\n # denoised2 = it.translate(it.translate(it.translate(denoised)))\n denoised2 = it.translate(denoised)\n\n image = numpy.clip(image, 0, 1)\n noisy = numpy.clip(noisy, 0, 1)\n n2s_denoised = numpy.clip(n2s_denoised, 0, 1)\n denoised_corrected = numpy.clip(denoised_corrected, 0, 1)\n denoised2 = numpy.clip(denoised2, 0, 1)\n\n psnr_noisy = psnr(image, noisy)\n ssim_noisy = ssim(image, noisy)\n\n psnr_n2s_denoised = psnr(image, n2s_denoised)\n ssim_n2s_denoised = ssim(image, n2s_denoised)\n\n psnr_denoised = psnr(image, denoised)\n ssim_denoised = ssim(image, denoised)\n\n psnr_denoised_corrected = psnr(image, denoised_corrected)\n ssim_denoised_corrected = ssim(image, denoised_corrected)\n\n psnr_denoised2 = psnr(image, denoised2)\n ssim_denoised2 = ssim(image, denoised2)\n\n print(\"noisy :\", psnr_noisy, ssim_noisy)\n print(\n \"denoised (classic_denoisers) :\",\n psnr_n2s_denoised,\n ssim_n2s_denoised,\n )\n print(\"denoised (noiser2noise) :\", psnr_denoised, ssim_denoised)\n print(\n \"denoised (noiser2noise corrected) :\",\n psnr_denoised_corrected,\n ssim_denoised_corrected,\n )\n print(\"denoised (x2) :\", psnr_denoised2, ssim_denoised2)\n\n Log.enable_output = False\n denoised_images = []\n for i in range(1, 32):\n psnr_denoised = psnr(image, numpy.clip(denoised, 0, 1))\n ssim_denoised = ssim(image, numpy.clip(denoised, 0, 1))\n psnr_sslos = psnr(numpy.clip(n2s_denoised, 0, 1), numpy.clip(denoised, 0, 1))\n ssim_sslos = ssim(numpy.clip(n2s_denoised, 0, 1), numpy.clip(denoised, 0, 1))\n print(f\"x{i} :\", psnr_sslos, ssim_sslos, psnr_denoised, ssim_denoised)\n\n denoised_images.append(numpy.clip(denoised, 0, 1))\n denoised = it.translate(denoised)\n\n import napari\n\n with napari.gui_qt():\n viewer = napari.Viewer()\n viewer.add_image(image, name='image')\n viewer.add_image(noisy, name='noisy')\n viewer.add_image(noisier, name='noisier')\n viewer.add_image(n2s_denoised, name='denoised (classic_denoisers)')\n viewer.add_image(denoised, name='denoised (noiser3noise)')\n viewer.add_image(denoised_corrected, name='denoised (noiser3noise corrected)')\n viewer.add_image(numpy.stack(denoised_images), name=f'denoised images')", "def run_image(image_path, lattice_size=35):\n im = plt.imread(image_path)[:, :, 2]\n im_pixels = _pixels(im)\n\n print('compression ratio is ', lattice_size**2 / float(im.size))\n\n # Hyperparameters.\n num_keypoints = 2\n hparams = tfl.CalibratedRtlHParams(\n num_keypoints=num_keypoints,\n num_lattices=1,\n lattice_rank=2,\n learning_rate=0.003,\n lattice_size=lattice_size)\n\n # Estimator.\n # input: coordinate of the pixel\n # output: value of the pixel\n feature_columns = [\n tf.feature_column.numeric_column('pixel_x'),\n tf.feature_column.numeric_column('pixel_y'),\n ]\n\n def keypoints_initializers():\n return tfl.uniform_keypoints_for_signal(\n num_keypoints,\n input_min=0.0,\n input_max=im_pixels.max(),\n output_min=0.0,\n output_max=lattice_size - 1\n )\n rtl_estimator = tfl.calibrated_rtl_regressor(\n feature_columns=feature_columns,\n hparams=hparams,\n keypoints_initializers_fn=keypoints_initializers\n )\n\n # Example input function.\n input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(\n x={\n 'pixel_x': im_pixels[:, 0],\n 'pixel_y': im_pixels[:, 1]\n },\n y=im_pixels[:, 2],\n batch_size=5000,\n num_epochs=15,\n shuffle=True)\n\n # Train!\n rtl_estimator.train(input_fn=input_fn)\n\n # Evaluate!\n eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(\n x={\n 'pixel_x': im_pixels[:, 0],\n 'pixel_y': im_pixels[:, 1]\n },\n y=im_pixels[:, 2],\n batch_size=5000,\n num_epochs=1,\n shuffle=True)\n print(rtl_estimator.evaluate(input_fn=eval_input_fn))\n\n return rtl_estimator", "def applyMorphologicalCleaning(self, image):", "def __call__(self, images, targets):\n pass", "def test_scrubbing_wf_no_insert_na(\n artifact_dir, sample_raw_image, plot_img, request, helpers\n):\n\n test_path = helpers.create_test_dir(artifact_dir, request.node.name)\n scrubbed_path = test_path / \"scrubbed.nii.gz\"\n\n scrub_vector = [0, 1, 0, 0, 0, 0, 1, 0, 0, 0]\n\n wf = build_scrubbing_workflow(\n scrub_vector,\n import_path=sample_raw_image,\n insert_na=False,\n export_path=scrubbed_path,\n base_dir=test_path,\n crashdump_dir=test_path,\n )\n\n wf.write_graph(dotfilename=test_path / \"scrubbed_flow\", graph2use=\"colored\")\n\n wf.run()\n\n helpers.plot_timeseries(scrubbed_path, sample_raw_image)\n\n if plot_img:\n helpers.plot_4D_img_slice(scrubbed_path, \"scrubbed.png\")", "def evaluate(t, x, y):\n # TODO: fix normalization\n from PIL import Image\n\n Nt = t.shape[0]\n Nx = x.shape[2]\n Ny = y.shape[1]\n stim = np.zeros([Nt, Nx, Ny])\n\n for i, filename in enumerate(filenames):\n im = Image.open(filename).convert(\"L\").transpose(Image.FLIP_TOP_BOTTOM)\n t_start = delay + i * (delay + duration)\n t_stop = (i+1) * (duration + delay)\n stim += np.array(im.resize((Ny, Nx))) * (heaviside(t - t_start) - heaviside(t - t_stop))\n\n if stim.max() - stim.min() != 0:\n stim = 2 * ((stim - stim.min()) / (stim.max() - stim.min())) - 1\n return stim", "def main():\n\n # first figure: betas for each predictor\n fig, axes = plt.subplots(figsize=(8, 18), nrows=3)\n\n image_paths = {\n \"3T\": (\n f\"{PATHS['figures_misc']}/figure_3_images/\"\n f\"C1051_20161006_childVSall_depth_1.png\"\n ),\n \"7T\": (\n f\"{PATHS['figures_misc']}/figure_3_images/\"\n f\"C1051_20160212_childVSall_depth_1_sim2pt4.png\"\n ),\n \"7T_noise\": (\n f\"{PATHS['figures_misc']}/figure_3_images/\"\n f\"C1051_20160212_childVSall_depth_1_sim2pt4_noise.png\"\n ),\n }\n\n for ax, (_, image_path) in zip(axes, image_paths.items()):\n assert os.path.isfile(image_path)\n img = imread(image_path)\n img[np.where(np.sum(img, axis=2) == 0.0)] = 1.0\n\n ax.imshow(img[200:-100, 50:-50, :])\n ax.axis(\"off\")\n\n savefig(f\"{PATHS['figures']}/figure_3ac.png\")\n plt.close(fig)", "def testDetect(name = \"smokey.gif\", amount = 20):\n image = Image(name)\n print(\"Close the image window to see the transformation\")\n image.draw()\n image2 = detectEdges(image, amount)\n image2.draw()", "def testTinttsysNNSp(self):\n self._runTest('tinttsys', True, self.tsys_funcs.keys(), 'nearest,nearest')", "def main_proc(self, ds=5.0):\n\n # Preprocessing\n # downsampling edge pixels\n pcd_t_ds = self.pcd_t.voxel_down_sample(voxel_size=ds)\n pcd_t_ds, center_t = centering(pcd_t_ds)\n self.result_id = 0\n reg_trans = None\n\n self.pcd_registrated = list() # results of ICP\n for i in range(len(self.pcd_s)):\n self.pcd_s[i].paint_uniform_color([0.0, 0.0, 1.0])\n pcd_s_ds = self.pcd_s[i].voxel_down_sample(voxel_size=ds)\n\n pcd_s_ds, center_s = centering(pcd_s_ds)\n ts_c = np.identity(4)\n ts_c[:3, 3] = -center_s\n tt_c = np.identity(4)\n tt_c[:3, 3] = center_t\n\n # Registration by ICP algorithm\n reg = ICPRegistration(pcd_s_ds, pcd_t_ds)\n reg.set_distance_tolerance(ds * 0.5)\n mse, rt = reg.registration()\n if mse < self.mse:\n self.result_id = i\n print(\"Init:\", self.initial_angles[i], self.mse, \"==>\", mse)\n self.mse = mse\n reg_trans = rt\n TT = np.dot(reg_trans, ts_c)\n self.trans_final = np.dot(tt_c, TT)\n\n # check transformation progress\n \"\"\"\n hoge = copy.deepcopy(pcd_s_ds)\n hoge.paint_uniform_color([1,0,0])\n mesh_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=100., origin=[0.0,0.0,0.0])\n o3d.visualization.draw_geometries( [mesh_frame,hoge, pcd_t_ds], width=640, height=500)\n hoge.transform( rt )\n o3d.visualization.draw_geometries( [mesh_frame,hoge, pcd_t_ds], width=640, height=500)\n \"\"\"\n\n self.pcds = reg.pcds\n self.d = reg.d\n # Get registration result\n # translation[x,y] and rotation\n _, _, rotate = mat2rpy(self.trans_final)\n print(\"Initial angle is:\", self.initial_angles[self.result_id])\n rotate = np.radians(self.initial_angles[self.result_id]) + rotate\n translation = self.trans_final[:2, 3]\n\n # Choose the direction that results in the smaller rotation\n if rotate > tau:\n rotate -= tau\n elif rotate < 0:\n rotate += tau\n\n self.rotate = rotate\n return self.rotate, translation, self.mse", "def test(one=True, training=True, detector_debug=False):\n total = 0\n imgs = []\n if one:\n\n print(\"Detecting:\")\n file_sign = \"./Data/Reglamentarias/STC-RG-3.jpg\"\n sign = cv2.imread(file_sign, 1)\n d = Detector(sign, show=True, debug=detector_debug)\n s, th = d.detect()\n seg = Segmenter(s)\n\n seg.keypoints()\n seg.descriptors()\n res = np.concatenate((seg.origi, seg.th, seg.img, seg.kpimg), axis=1)\n cv2.imshow(\"res\", res)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n if training:\n print (\"Training\")\n for imagePath in paths.list_images(\"./training/PV/\"):\n print (imagePath)\n sign = cv2.imread(imagePath, 1)\n\n seg = Segmenter(sign)\n seg.watershed()\n seg.keypoints()\n res = np.concatenate((seg.origi, seg.th, seg.img, seg.kpimg), axis=1)\n cv2.imshow(\"res\", res)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n else:\n if not one:\n for i in range(1, 90):\n # file = \"./Data/Preventivas/STC-PV-\"+str(i)+\".jpg\"\n file_sign = \"./Data/Reglamentarias/STC-RG-\" + str(i) + \".jpg\"\n # file = \"./Data/Mixtas/STC-MX-\"+ str(i) +\".jpg\"\n sign = cv2.imread(file_sign, 1)\n d = Detector(sign, show=False)\n s, th = d.detect()\n if s is not None:\n total += 1\n imgs.append((i, s, th))\n\n print (\"Detected:\", str(total))\n\n for i in range(1, len(imgs)-1):\n seg = Segmenter(imgs[i][1])\n seg.watershed()\n seg.keypoints()\n res = np.concatenate((seg.origi, seg.th, seg.img, seg.kpimg), axis=1)\n cv2.imshow(\"img\"+str(imgs[i][0]), res)\n print (str(imgs[i][0]))\n\n cv2.waitKey(0)\n cv2.destroyAllWindows()", "def test_image_task(self):\n args = BASE_ARGS.copy()\n args.update(IMAGE_ARGS)\n\n valid, test = testing_utils.train_model(args)\n self.assertLessEqual(\n valid['ppl'], 8.6, 'failed to train image_seq2seq on image task'\n )", "def skeletonize(data,subscriber = 0):\n nx,ny=data.shape\n #zero padding\n image = zeros((nx+2,ny+2),'int16')\n image[:,:] = IP.BACKGROUND_COLOR\n image[1:-1,1:-1]=data\n\n erosionComplete = False\n runs = 0\n erosionComplete = False\n runs = 0\n isCorner = zeros((nx+2,ny+2),'bool')\n while not erosionComplete:\n ruleI = (image == IP.FEATURE_COLOR)\n XFeat, YFeat = ruleI.nonzero()\n numberFeatures = len(XFeat)\n erosedPixels = 0\n if runs == 0:\n progressbar = progress(numberFeatures)\n neighbourhood = zeros((nx+2,ny+2,3),'int16')\n for x,y in zip(XFeat.tolist(),YFeat.tolist()):\n fingerprint = checkNeighbours(image[x-1:x+2,y-1:y+2])\n neighbourhood[x,y,:]=numpy.array(fingerprint)\n\n ruleII = neighbourhood[:,:,1]>=1\n ruleIII = neighbourhood[:,:,0]> 1\n border = (ruleI & ruleII & ruleIII)\n #ruleIV and ruleV\n XBord, YBord = border.nonzero()\n XBord2 = []\n YBord2 = []\n for x,y in zip(XBord.tolist(),YBord.tolist()):\n if checkTransitions(image[x-1:x+2,y-1:y+2]) <= 1 and not isCorner[x,y]:\n image[x,y] = IP.BACKGROUND_COLOR\n erosedPixels += 1\n subscriber %= progressbar.step()\n else:\n XBord2.append(x)\n YBord2.append(y)\n for x,y in zip(XBord2,YBord2):\n if checkTransitions(image[x-1:x+2,y-1:y+2]) <= 1 and not isCorner[x,y]:\n image[x,y] = IP.BACKGROUND_COLOR\n erosedPixels += 1\n subscriber %= progressbar.step()\n if erosedPixels == 0:\n erosionComplete = True\n subscriber %= 100.\n else:\n xCorn, yCorn = (neighbourhood[:,:,2] > 0 ).nonzero()\n for x,y in zip(xCorn.tolist(),yCorn.tolist()):\n if neighbourhood[x,y,2] == 1:\n isCorner[x+1,y-1] = True\n elif neighbourhood[x,y,2] == 2:\n isCorner[x+1,y+1] = True\n elif neighbourhood[x,y,2] == 3:\n isCorner[x-1,y+1] = True\n elif neighbourhood[x,y,2] == 4:\n isCorner[x-1,y-1] = True\n runs += 1\n return image[1:-1,1:-1].copy()", "def run_visualization(image):\n # for image in images:\n try:\n with tf.gfile.FastGFile(image, 'rb') as f:\n jpeg_str = f.read()\n original_im = Image.open(BytesIO(jpeg_str))\n except IOError:\n print('Cannot retrieve image')\n return\n\n # print('running deeplab on image {0}'.format(image))\n resized_im, seg_map = MODEL.run(original_im)\n seg_map = seg_map.astype(np.uint8) * 255\n resized_im = np.array(resized_im, dtype=np.uint8)\n resized_im = cv2.cvtColor(resized_im, cv2.COLOR_BGR2RGB)\n # vis_segmentation(resized_im, seg_map,FULL_COLOR_MAP ,LABEL_NAMES)\n overlay_image = cv2.addWeighted(resized_im, 0.8, cv2.merge((seg_map * 0, seg_map, seg_map * 0)), 0.2, 0)\n # time.sleep(params.SEC_BETWEEN_PREDICTION)\n\n return resized_im, seg_map, overlay_image.astype(np.uint8)", "def testPosterize(name = \"smokey.gif\", triple = (0,0,0)):\n image = Image(name)\n print(\"Close the image window to see the transformation\")\n image.draw()\n posterize(image, triple)\n image.draw()", "def transform(self, previousimage):", "def main():\n print_banner()\n params = read_steering()\n s, x, y, cur, theta = build_kinoshita()\n s, x, y, cur, theta = read_centerline(s, x, y, cur, theta)\n s, x, y, cur, theta = extend_centerline(s, x, y, cur, theta)\n for t in range(TSTEPS+1):\n cur, theta = tan2curv(s, x, y)\n cur_ori = np.copy(cur)\n cur = filter_curvature(cur, t)\n cur_flt = np.copy(cur)\n cur = lag(s, cur, t)\n cur_lag = np.copy(cur)\n beck_bed = build_beck(cur, s, t)\n allxyz = offset_all(x, y, beck_bed, t)\n if t == 0:\n write_xyz_file(allxyz)\n write_mesh_file(allxyz, beck_bed)\n oxbowxList, oxbowyList = [], []\n centerlinexList, centerlineyList = [], []\n if np.mod(t, GPRINT) == 0:\n centerlinexList.append(x)\n centerlineyList.append(y)\n mf.make_figure(x, y, allxyz, cur_ori, cur_flt, cur_lag, s, beck_bed,\n params, t, oxbowxList, oxbowyList, centerlinexList, centerlineyList)\n if t == TSTEPS:\n break\n s, x, y = migration(s, x, y, cur_flt, cur_lag, theta, t)\n s, x, y, oxbowx, oxbowy, found_cutoff = cutoff(s, x, y)\n s, x, y = smooth_centerline(x, y)\n s, x, y, cur, theta = resample_centerline(s, x, y)\n if found_cutoff:\n oxbowxList.append(oxbowx)\n oxbowyList.append(oxbowy)\n make_gif()\n job_done()", "def train(args):\n # Create the data loader\n loader = sunnerData.DataLoader(\n dataset = sunnerData.ImageDataset(\n root = [[args.train]],\n transforms = transforms.Compose([\n \n# transforms.RandomCrop(720,720)\n# transforms.RandomRotation(45)\n# transforms.RandomHorizontalFlip(), \n# transforms.ColorJitter(brightness=0.5, contrast=0.5),\n \n\n sunnerTransforms.Resize(output_size = (args.H, args.W)),\n #transforms.RandomCrop(512,512)\n sunnerTransforms.ToTensor(),\n sunnerTransforms.ToFloat(),\n # sunnerTransforms.Transpose(),\n sunnerTransforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),\n ])\n ), batch_size = args.batch_size, shuffle = True, num_workers = 2\n )\n loader = sunnerData.IterationLoader(loader, max_iter = args.n_iter)\n\n # Create the model\n model = GANomaly2D(r = args.r, device = args.device)\n model.IO(args.resume, direction = 'load')\n model.train()\n \n # Train!\n bar = tqdm(loader)\n for i, (normal_img,) in enumerate(bar):\n model.forward(normal_img)\n model.backward()\n loss_G, loss_D = model.getLoss()\n bar.set_description(\"Loss_G: \" + str(loss_G) + \" loss_D: \" + str(loss_D))\n bar.refresh()\n if i % args.record_iter == 0:\n model.eval()\n with torch.no_grad():\n z, z_ = model.forward(normal_img)\n img, img_ = model.getImg()\n visualizeEncoderDecoder(img, img_, z, z_,i)\n model.train()\n model.IO(args.det, direction = 'save')\n model.IO(args.det, direction = 'save')", "def tessellate(self):\n\n self.tessellation = Delaunay(self.grid)", "def test(self):\n # Load the trained models.\n self.train_ev_ea()\n self.restore_model(self.test_iters)\n self.encoder_v.eval()\n # Set data loader.\n data_loader = self.data_loader\n empty = torch.FloatTensor(1, 3,self.image_size,self.image_size).to(self.device) \n empty.fill_(1)\n noise = torch.FloatTensor(self.batch_size, self.nz_num)\n noise = noise.to(self.device)\n step = 0\n data_loader.test.reinitialize_index()\n with torch.no_grad():\n while True:\n try:\n x_real, wrong_images, attributes, _, label_org = data_loader.test.next_batch_test(self.batch_size,10)\n except:\n break\n x_real = x_real.to(self.device) \n label_org = label_org.to(self.device)\n attributes = attributes.to(self.device)\n \n \n ev_x = self.encoder_v(x_real)\n noise.normal_(0, 1)\n ea_a = self.encoder_a(attributes, noise)\n \n out_A2B_results = [empty]\n out_A2B_results_a = [empty]\n\n for idx1 in range(label_org.size(0)):\n out_A2B_results.append(x_real[idx1:idx1+1])\n out_A2B_results_a.append(x_real[idx1:idx1+1])\n\n for idx2 in range(label_org.size(0)):\n out_A2B_results.append(x_real[idx2:idx2+1])\n out_A2B_results_a.append(x_real[idx2:idx2+1])\n \n for idx1 in range(label_org.size(0)):\n x_fake = self.decoder(self.encoder(x_real[idx2:idx2+1]), ev_x[idx1:idx1+1])\n out_A2B_results.append(x_fake)\n \n x_fake_a = self.decoder(self.encoder(x_real[idx2:idx2+1]), ea_a[idx1:idx1+1])\n out_A2B_results_a.append(x_fake_a)\n results_concat = torch.cat(out_A2B_results)\n x_AB_results_path = os.path.join(self.result_dir, '{}_x_AB_results_test_v.jpg'.format(step+1)) \n save_image(self.denorm(results_concat.data.cpu()), x_AB_results_path, nrow=label_org.size(0)+1,padding=0)\n print('Saved real and fake images into {}...'.format(x_AB_results_path))\n \n results_concat = torch.cat(out_A2B_results_a)\n x_AB_results_path = os.path.join(self.result_dir, '{}_x_AB_results_test_a.jpg'.format(step+1)) \n save_image(self.denorm(results_concat.data.cpu()), x_AB_results_path, nrow=label_org.size(0)+1,padding=0)\n print('Saved real and fake images into {}...'.format(x_AB_results_path))\n \n step += 1", "def detectSpots(img, detectSpotsParameter = None, correctIlluminationParameter = None, removeBackgroundParameter = None,\n filterDoGParameter = None, findExtendedMaximaParameter = None, detectCellShapeParameter = None,\n verbose = False, out = sys.stdout, **parameter):\n\n timer = Timer();\n \n # normalize data -> to check\n #img = img.astype('float');\n #dmax = 0.075 * 65535;\n #ids = img > dmax;\n #img[ids] = dmax;\n #img /= dmax; \n #out.write(timer.elapsedTime(head = 'Normalization'));\n #img = dataset[600:1000,1600:1800,800:830];\n #img = dataset[600:1000,:,800:830];\n \n # correct illumination\n correctIlluminationParameter = getParameter(detectSpotsParameter, \"correctIlluminationParameter\", correctIlluminationParameter);\n img1 = img.copy();\n img1 = correctIllumination(img1, correctIlluminationParameter = correctIlluminationParameter, verbose = verbose, out = out, **parameter) \n\n # background subtraction in each slice\n #img2 = img.copy();\n removeBackgroundParameter = getParameter(detectSpotsParameter, \"removeBackgroundParameter\", removeBackgroundParameter);\n img2 = removeBackground(img1, removeBackgroundParameter = removeBackgroundParameter, verbose = verbose, out = out, **parameter) \n \n # mask\n #timer.reset();\n #if mask == None: #explicit mask\n # mask = img > 0.01;\n # mask = binary_opening(mask, self.structureELement('Disk', (3,3,3)));\n #img[img < 0.01] = 0; # masking in place # extended maxima\n #out.write(timer.elapsedTime(head = 'Mask')); \n \n #DoG filter\n filterDoGParameter = getParameter(detectSpotsParameter, \"filterDoGParameter\", filterDoGParameter);\n dogSize = getParameter(filterDoGParameter, \"size\", None);\n #img3 = img2.copy(); \n img3 = filterDoG(img2, filterDoGParameter = filterDoGParameter, verbose = verbose, out = out, **parameter);\n \n # normalize \n # imax = img.max();\n # if imax == 0:\n # imax = 1;\n # img /= imax;\n \n # extended maxima\n findExtendedMaximaParameter = getParameter(detectSpotsParameter, \"findExtendedMaximaParameter\", findExtendedMaximaParameter);\n hMax = getParameter(findExtendedMaximaParameter, \"hMax\", None);\n imgmax = findExtendedMaxima(img3, findExtendedMaximaParameter = findExtendedMaximaParameter, verbose = verbose, out = out, **parameter);\n \n #center of maxima\n if not hMax is None:\n centers = findCenterOfMaxima(img, imgmax, verbose = verbose, out = out, **parameter);\n else:\n centers = findPixelCoordinates(imgmax, verbose = verbose, out = out, **parameter);\n \n #cell size detection\n detectCellShapeParameter = getParameter(detectSpotsParameter, \"detectCellShapeParameter\", detectCellShapeParameter);\n cellShapeThreshold = getParameter(detectCellShapeParameter, \"threshold\", None);\n if not cellShapeThreshold is None:\n \n # cell shape via watershed\n imgshape = detectCellShape(img2, centers, detectCellShapeParameter = detectCellShapeParameter, verbose = verbose, out = out, **parameter);\n \n #size of cells \n csize = findCellSize(imgshape, maxLabel = centers.shape[0], out = out, **parameter);\n \n #intensity of cells\n cintensity = findCellIntensity(img, imgshape, maxLabel = centers.shape[0], verbose = verbose, out = out, **parameter);\n\n #intensity of cells in background image\n cintensity2 = findCellIntensity(img2, imgshape, maxLabel = centers.shape[0], verbose = verbose, out = out, **parameter);\n \n #intensity of cells in dog filtered image\n if dogSize is None:\n cintensity3 = cintensity2;\n else:\n cintensity3 = findCellIntensity(img3, imgshape, maxLabel = centers.shape[0], verbose = verbose, out = out, **parameter);\n \n if verbose:\n out.write(timer.elapsedTime(head = 'Spot Detection') + '\\n');\n \n #remove cell;s of size 0\n idz = csize > 0;\n \n return ( centers[idz], numpy.vstack((cintensity[idz], cintensity3[idz], cintensity2[idz], csize[idz])).transpose()); \n \n \n else:\n #intensity of cells\n cintensity = findIntensity(img, centers, verbose = verbose, out = out, **parameter);\n\n #intensity of cells in background image\n cintensity2 = findIntensity(img2, centers, verbose = verbose, out = out, **parameter);\n \n #intensity of cells in dog filtered image\n if dogSize is None:\n cintensity3 = cintensity2;\n else:\n cintensity3 = findIntensity(img3, centers, verbose = verbose, out = out, **parameter);\n\n if verbose:\n out.write(timer.elapsedTime(head = 'Spot Detection') + '\\n');\n \n return ( centers, numpy.vstack((cintensity, cintensity3, cintensity2)).transpose());", "def test_random_single_image():\n\n shap.image_plot(np.random.randn(3, 20, 20), np.random.randn(3, 20, 20), show=False)", "def test_time_optimize(args, model, optim, imgs, poses, hwf, bound):\n pixels = imgs.reshape(-1, 3)\n\n rays_o, rays_d = get_rays_shapenet(hwf, poses)\n rays_o, rays_d = rays_o.reshape(-1, 3), rays_d.reshape(-1, 3)\n\n num_rays = rays_d.shape[0]\n for step in range(args.tto_steps):\n indices = torch.randint(num_rays, size=[args.tto_batchsize])\n raybatch_o, raybatch_d = rays_o[indices], rays_d[indices]\n pixelbatch = pixels[indices] \n t_vals, xyz = sample_points(raybatch_o, raybatch_d, bound[0], bound[1],\n args.num_samples, perturb=True)\n \n optim.zero_grad()\n rgbs, sigmas = model(xyz)\n colors = volume_render(rgbs, sigmas, t_vals, white_bkgd=True)\n loss = F.mse_loss(colors, pixelbatch)\n loss.backward()\n optim.step()", "def test_replace_image(self):\n pass", "def test_resize_noop(self, X, y, mode):\n Xc, _ = resize_batch(X, y, 1.0, mode, resize_targets=False)\n assert X is Xc", "def testComputeImage(self):\n for fiberId in self.detMap.fiberId:\n for fraction in (0.1, 0.5, 0.9):\n yy = self.synthConfig.height*fraction\n if yy == int(yy):\n # Ensure we have a non-integer pixel position,\n # so computeImage and computeKernelImage differ\n yy += 0.5\n wavelength = self.detMap.findWavelength(fiberId, yy)\n image = self.psf.computeImage(fiberId, wavelength)\n kernel = self.psf.computeKernelImage(fiberId, wavelength)\n\n # Image should have xy0 set somewhere in the middle of the larger image\n self.assertNotEqual(image.getX0(), 0)\n self.assertNotEqual(image.getY0(), 0)\n\n # Kernel should have xy0 set to the half-size\n halfSize = (self.size - 1)//2\n self.assertEqual(kernel.getX0(), -halfSize)\n self.assertEqual(kernel.getY0(), -halfSize)\n\n # Centroid on image should be at the point of interest\n xx, yy = self.detMap.findPoint(fiberId, wavelength)\n centroid = calculateCentroid(image)\n self.assertFloatsAlmostEqual(xx, centroid.x, atol=2.0e-2)\n self.assertFloatsAlmostEqual(yy, centroid.y, atol=2.0e-2)\n\n # Centroid on kernel should be zero\n centroid = calculateCentroid(kernel)\n self.assertFloatsAlmostEqual(centroid.x, 0.0, atol=1.0e-7)\n self.assertFloatsAlmostEqual(centroid.y, 0.0, atol=1.0e-7)", "def run_predictive(op) -> None:\n\n try:\n img = Image.open(op['input'])\n except Exception as e:\n print(e)\n sys.exit(1)\n\n algo.predictive.run(op)", "def run(self, image):\n # width, height = image.size\n # resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)\n # target_size = (int(resize_ratio * width), int(resize_ratio * height))\n target_size = (self.INPUT_SIZE, self.INPUT_SIZE)\n resized_image = image.convert('RGB').resize(target_size, Image.ANTIALIAS)\n net_image = resized_image\n if params.HZ_preprocess_activate:\n net_image = params.image_preprocess_func(resized_image)\n net_image = np.expand_dims(net_image, axis=-1)\n batch_seg_map = self.sess.run(\n self.OUTPUT_TENSOR_NAME,\n feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(net_image)]})\n seg_map = batch_seg_map[0]\n return resized_image, seg_map", "def test_transformer2d_single_step_e2e(self):\n\n problem_object = allen_brain.Img2imgAllenBrainDim8to32()\n\n with TemporaryDirectory() as tmp_dir:\n\n mock_raw_data(tmp_dir, raw_dim=256, num_images=100)\n\n with TemporaryDirectory() as data_dir:\n\n problem_object.generate_data(data_dir, tmp_dir)\n\n input_xy_dim = problem_object.input_dim\n target_xy_dim = problem_object.output_dim\n num_channels = problem_object.num_channels\n\n hparams = image_transformer_2d.img2img_transformer2d_tiny()\n hparams.data_dir = data_dir\n\n p_hparams = problem_object.get_hparams(hparams)\n\n model = image_transformer_2d.Img2imgTransformer(\n hparams, tf.estimator.ModeKeys.TRAIN, p_hparams\n )\n\n @tfe.implicit_value_and_gradients\n def loss_fn(features):\n _, losses = model(features)\n return losses[\"training\"]\n\n batch_size = 1\n train_dataset = problem_object.dataset(Modes.TRAIN, data_dir)\n train_dataset = train_dataset.repeat(None).batch(batch_size)\n\n optimizer = tf.train.AdamOptimizer()\n\n example = tfe.Iterator(train_dataset).next()\n example[\"targets\"] = tf.reshape(example[\"targets\"],\n [batch_size,\n target_xy_dim,\n target_xy_dim,\n num_channels])\n _, gv = loss_fn(example)\n optimizer.apply_gradients(gv)\n\n model.set_mode(Modes.EVAL)\n dataset = problem_object.dataset(Modes.EVAL, data_dir)\n\n example = tfe.Iterator(dataset).next()\n example[\"inputs\"] = tf.reshape(example[\"inputs\"],\n [1,\n input_xy_dim,\n input_xy_dim,\n num_channels])\n example[\"targets\"] = tf.reshape(example[\"targets\"],\n [1,\n target_xy_dim,\n target_xy_dim,\n num_channels])\n\n predictions, _ = model(example)\n\n self.assertEqual(predictions.numpy().shape,\n (1,\n target_xy_dim,\n target_xy_dim,\n num_channels,\n 256))", "def preprocessing(image, smooth_size, folder):\n from skimage.restoration import denoise_tv_chambolle\n \n dim = int(image.shape[0] / 50.)\n smoothed = rank.median(image, disk(smooth_size))\n #smoothed = denoise_tv_chambolle(image, weight=0.002)\n smoothed = rank.enhance_contrast(smoothed, disk(smooth_size))\n \n pl.subplot(2, 3, 1)\n pl.title(\"after median\")\n pl.imshow(smoothed)\n pl.gray()\n # If after smoothing the \"dot\" disappears\n # use the image value\n \n # TODO: wat do with thresh?\n try:\n im_max = smoothed.max()\n thresh = threshold_otsu(image)\n except:\n im_max = image.max()\n thresh = threshold_otsu(image)\n\n \n if im_max < thresh:\n labeled = np.zeros(smoothed.shape, dtype=np.int32)\n \n else:\n binary = smoothed > thresh\n \n # TODO: this array size is the fault of errors\n bin_open = binary_opening(binary, np.ones((dim, dim)), iterations=5)\n bin_close = binary_closing(bin_open, np.ones((5,5)), iterations=5)\n \n pl.subplot(2, 3, 2)\n pl.title(\"threshold\")\n pl.imshow(binary, interpolation='nearest')\n pl.subplot(2, 3, 3)\n pl.title(\"opening\")\n pl.imshow(bin_open, interpolation='nearest')\n pl.subplot(2, 3, 4)\n pl.title(\"closing\")\n pl.imshow(bin_close, interpolation='nearest')\n \n distance = ndimage.distance_transform_edt(bin_open)\n local_maxi = peak_local_max(distance,\n indices=False, labels=bin_open)\n \n markers = ndimage.label(local_maxi)[0]\n \n labeled = watershed(-distance, markers, mask=bin_open)\n pl.subplot(2, 3, 5)\n pl.title(\"label\")\n pl.imshow(labeled)\n #pl.show()\n pl.savefig(folder)\n pl.close('all')\n\n #misc.imsave(folder, labeled)\n# labels_rw = random_walker(bin_close, markers, mode='cg_mg')\n# \n# pl.imshow(labels_rw, interpolation='nearest')\n# pl.show()\n\n return labeled", "def test_full_setup(n):\n for x in range(n):\n for y in range(n):\n Stitch(x,y)\n Stitch.stitches[(x,y)].vital = True if round(rnd.random()) == 1 else False", "def setup():\n img = Image.new('RGB', (10, 20))\n img.putpixel((5, 10), (0, 255, 0))\n img.save('green-dot.tif')\n img.save('green-dot.jpg')\n img.save('green-dot.png')", "def imageprepare(argv):\n im = Image.open(argv).convert('L')\n width = float(im.size[0])\n height = float(im.size[1])\n newImage = Image.new('L', (28, 28), (255)) # creates white canvas of 28x28 pixels\n\n if width > height: # check which dimension is bigger\n # Width is bigger. Width becomes 20 pixels.\n nheight = int(round((20.0 / width * height), 0)) # resize height according to ratio width\n if (nheight == 0): # rare case but minimum is 1 pixel\n nheight = 1\n # resize and sharpen\n img = im.resize((20, nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n wtop = int(round(((28 - nheight) / 2), 0)) # calculate horizontal position\n newImage.paste(img, (4, wtop)) # paste resized image on white canvas\n else:\n # Height is bigger. Heigth becomes 20 pixels.\n nwidth = int(round((20.0 / height * width), 0)) # resize width according to ratio height\n if (nwidth == 0): # rare case but minimum is 1 pixel\n nwidth = 1\n # resize and sharpen\n img = im.resize((nwidth, 20), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n wleft = int(round(((28 - nwidth) / 2), 0)) # caculate vertical pozition\n newImage.paste(img, (wleft, 4)) # paste resized image on white canvas\n\n # newImage.save(\"sample.png\n\n tv = list(newImage.getdata()) # get pixel values\n\n # normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\n tva = [(255 - x) * 1.0 / 255.0 for x in tv]\n print(tva)\n return tva", "def test_synthetic_auto():\n background = Image.new('RGB', (7, 3), (125, 125, 125))\n red = Image.new('RGB', (1, 1), (255, 0, 0))\n green = Image.new('RGB', (1, 1), (0, 255, 0))\n blue = Image.new('RGB', (1, 1), (0, 0, 255))\n\n parameters = {\n 'data': [background, red, green, blue],\n 'positions': 'auto'\n }\n\n synth = images.synthetic(parameters)\n\n assert_equal(synth.size, (7, 3))\n assert_equal(synth.getpixel((1, 1)), (255, 0, 0, 255))\n assert_equal(synth.getpixel((3, 1)), (0, 255, 0, 255))\n assert_equal(synth.getpixel((5, 1)), (0, 0, 255, 255))", "def run(self, image):\n start = datetime.datetime.now()\n\n width, height = image.size\n resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)\n target_size = (int(resize_ratio * width), int(resize_ratio * height))\n resized_image = image.convert('RGB').resize(target_size, Image.ANTIALIAS)\n batch_seg_map = self.sess.run(\n self.OUTPUT_TENSOR_NAME,\n feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]})\n seg_map = batch_seg_map[0]\n\n end = datetime.datetime.now()\n\n diff = end - start\n print(\"Time taken to evaluate segmentation is : \" + str(diff))\n\n return resized_image, seg_map", "def image_undistort():\n # read test images\n all_test_images = os.listdir('test_images')\n test_images = []\n for name in all_test_images:\n if name.endswith(\".jpg\"):\n test_images.append(name)\n # apply distortion correction on test images\n undistort_images(test_images, './camera_calib_dist_pickle.p')\n print(\"DONE: undistorted test-images saved\")" ]
[ "0.6175", "0.5833953", "0.5790842", "0.5783519", "0.56402856", "0.5631712", "0.56218964", "0.56209666", "0.56089985", "0.5602803", "0.5586337", "0.55410194", "0.54952884", "0.54952645", "0.5484749", "0.545018", "0.54336417", "0.54323924", "0.5420694", "0.5396064", "0.53868186", "0.53826827", "0.5362292", "0.53536016", "0.5348376", "0.5334613", "0.5331135", "0.5322052", "0.53130174", "0.52966183", "0.52957296", "0.52858585", "0.52761126", "0.52730453", "0.52730453", "0.52709746", "0.52577615", "0.5255402", "0.5254579", "0.5252006", "0.52486026", "0.52388537", "0.5236693", "0.52361727", "0.52310973", "0.5229211", "0.5226386", "0.52245146", "0.5223437", "0.52189183", "0.52161115", "0.52122986", "0.5206287", "0.5203338", "0.5197991", "0.51952636", "0.518684", "0.51737463", "0.51647913", "0.5163664", "0.5163374", "0.51623523", "0.5162209", "0.51612526", "0.51582986", "0.51550347", "0.5154782", "0.51534045", "0.5149202", "0.5147345", "0.5146975", "0.5145934", "0.5144491", "0.51382864", "0.51379997", "0.51306033", "0.51246226", "0.5121956", "0.5112787", "0.5103682", "0.5103279", "0.5099865", "0.5096792", "0.5092306", "0.50870067", "0.50856745", "0.5083787", "0.5081803", "0.5081272", "0.5081258", "0.50808245", "0.5080545", "0.5077416", "0.5073753", "0.5072539", "0.5069696", "0.5058945", "0.5058356", "0.5054575", "0.5049315", "0.5047406" ]
0.0
-1
run the tesselation on a empty image
def test_on_map_of_noise(synthetic_checkerboard): img = synthetic_checkerboard['img'] di = synthetic_checkerboard['distimg'] cpp_vorimg = tess.tessellate_labimg(img,di) py_vorimg = pytess.tessellate_labimg(img,di) printers.store_ndarray("py_voronoi_on_map_of_noise_output.txt",py_vorimg) assert cpp_vorimg.size > 0 assert cpp_vorimg.shape == synthetic_checkerboard['img'].shape assert np.alltrue(synthetic_checkerboard['img'][1:3,1:3] == 1) printers.store_ndarray("cpp_voronoi_input.txt",img) printers.store_ndarray("cpp_voronoi_on_map_of_noise_output.txt",cpp_vorimg) # assert np.alltrue(cpp_vorimg[:4,:4] == 1) assert np.alltrue(cpp_vorimg == py_vorimg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_stuff(self):\n self.create_tourism_raster()", "def write_stitched_image(self):\r\n\r\n self.write_debug(\"End of train detected. Writing stitched image.\")\r\n cv2.imwrite(os.path.join(self.output_dir_stitched, 'stitched.jpg'), self.stitched_image)", "def final_plain():\n\n\tconfig = Config()\n\tconfig.layer1_size = 256\n\tconfig.num_channels = 15\n\tconfig.target_channels = 3\n\tconfig.target_loss = 0.01\n\tconfig.lifetime = 32\n\tconfig.size = 32\n\tconfig.initial_state = 'sconf_center_black_dot'\n\tconfig.edge_strategy = 'EdgeStrategy.TF_SAME'\n\tconfig.growing_jump = 0\n\n\tfor path in glob.glob(\"images/final/*.png\"):\n\t\timg_name = os.path.basename(path)\n\t\tconfig.target_state = f'sconf_image(\"final/{img_name}\")'\n\t\tbuild_and_train(\"final_compare_gradual\", config)", "def make_t(self):\n self.img[1, 1:-1] = 1\n self.img[2:-1, self.l_i / 2] = 1\n self.img_name = 'T'", "def draw_T(self):\n for i in range(self.n):\n for j in range(self.m):\n t = self.T[i, j]\n if t != 0 and self.V[i, j] == 1:\n if len(self.images) > 0:\n self.draw_img(i, j, t)\n else:\n self.draw_text(i, j, str(t), BLACK)", "def run(self,workspace):\n image_name = self.image_name.value\n cpimage = workspace.image_set.get_image(image_name)\n image = cpimage.pixel_data\n mask = cpimage.mask\n workspace.display_data.statistics = []\n level = int(self.atrous_level.value)\n\n wavelet = self.a_trous(1.0*image, level+1)\n wlevprod = wavelet[:,:,level-1] * 3.0\n\n spotthresh = wlevprod.mean() + float(self.noise_removal_factor.value) * wlevprod.std()\n tidx = wlevprod < spotthresh\n wlevprod[tidx] = 0\n\n wlevprod = self.circular_average_filter(wlevprod, int(self.smoothing_filter_size.value))\n wlevprod = self.smooth_image(wlevprod, mask)\n\n max_wlevprod = scipy.ndimage.filters.maximum_filter(wlevprod,3)\n maxloc = (wlevprod == max_wlevprod)\n twlevprod = max_wlevprod > float(self.final_spot_threshold.value)\n maxloc[twlevprod == 0] = 0\n \n labeled_image,object_count = scipy.ndimage.label(maxloc,\n np.ones((3,3),bool))\n\n unedited_labels = labeled_image.copy()\n # Filter out objects touching the border or mask\n border_excluded_labeled_image = labeled_image.copy()\n labeled_image = self.filter_on_border(image, labeled_image)\n border_excluded_labeled_image[labeled_image > 0] = 0\n \n # Relabel the image\n labeled_image,object_count = relabel(labeled_image)\n new_labeled_image, new_object_count = self.limit_object_count(\n labeled_image, object_count)\n if new_object_count < object_count:\n # Add the labels that were filtered out into the border\n # image.\n border_excluded_mask = ((border_excluded_labeled_image > 0) |\n ((labeled_image > 0) & \n (new_labeled_image == 0)))\n border_excluded_labeled_image = scipy.ndimage.label(border_excluded_mask,\n np.ones((3,3),bool))[0]\n object_count = new_object_count\n labeled_image = new_labeled_image\n \n # Make an outline image\n outline_image = cellprofiler.cpmath.outline.outline(labeled_image)\n outline_border_excluded_image = cellprofiler.cpmath.outline.outline(border_excluded_labeled_image)\n \n if self.show_window:\n statistics = workspace.display_data.statistics\n statistics.append([\"# of accepted objects\",\n \"%d\"%(object_count)])\n\n workspace.display_data.image = image\n workspace.display_data.labeled_image = labeled_image\n workspace.display_data.border_excluded_labels = border_excluded_labeled_image\n\n # Add image measurements\n objname = self.object_name.value\n measurements = workspace.measurements\n cpmi.add_object_count_measurements(measurements,\n objname, object_count)\n # Add label matrices to the object set\n objects = cellprofiler.objects.Objects()\n objects.segmented = labeled_image\n objects.unedited_segmented = unedited_labels\n objects.parent_image = image\n \n workspace.object_set.add_objects(objects,self.object_name.value)\n cpmi.add_object_location_measurements(workspace.measurements, \n self.object_name.value,\n labeled_image)\n if self.should_save_outlines.value:\n out_img = cpi.Image(outline_image.astype(bool),\n parent_image = image)\n workspace.image_set.add(self.save_outlines.value, out_img)", "def unpropagateImage(self, dryrun):\n pass", "def main():\n test_image = load_image()\n\n pixelate_image(\n normalize_image(test_image)\n )\n pass", "def exercise():\n\n #\n # Convert Lena Tiff image to raw format\n #\n for f in glob.glob('*.jpg'):\n os.remove(f)\n \n for f in glob.glob('*.dat'):\n os.remove(f)\n \n input_raw_file = convert_to_raw('Lena.tiff')\n\n for device in ['cpu', 'gpu']:\n for interp in ['nn', 'bl']:\n for (w,h) in ((256, 300), (486, 486),(2000, 1000),(1000, 2000),(8000, 4000)):\n (t, f) = interpolate(input_raw_file, device + '_' + interp + '_lena.dat', device, 0, interp, w, h)\n convert_to_jpg(f)\n\n \n for f in glob.glob('*.dat'):\n convert_to_jpg(f)\n os.remove(f)\n \n quit()", "def process(self, image):", "def setUp(self):\n test_file_1 = path.join(\n path.dirname(datasets.__file__), \"twod_image_1.npy\"\n )\n\n original_image = np.load(test_file_1)\n\n # get a single tile from the image to test\n # note this image is currently unpadded.\n # how many boundary elements are needed to pad?\n extracted_image = original_image[0:32, 0:32]\n\n self.img = np.expand_dims(extracted_image, axis=-1)\n\n # Don't make this too huge for brevity.\n self.J = 3\n # 0 = no overlap etc.\n self.overlap_log_2 = 0\n # apply to all available orders\n self.order = 3\n # Should be one or more to avoid aliasing, if you want overlapping\n # tiles this can increase too.\n self.oversampling = 1\n\n self.num_angles = 3\n self.angles = tuple(\n [\n 90.0\n - np.rad2deg(\n (int(self.num_angles - self.num_angles / 2 - 1) - theta)\n * np.pi\n / self.num_angles\n )\n for theta in range(self.num_angles)\n ]\n )\n\n # details of the input data\n self.sample_rate = 0.004 * 3\n\n # vanilla filter bank\n wavelets = [\n vanilla_morlet_2d(self.sample_rate, j=i) for i in range(0, self.J)\n ]\n father_wavelet = vanilla_gabor_2d(self.sample_rate, j=self.J)\n\n # extract the kernels of each of the wavelets for manual convolution\n # we'll test using three different angles that we used to create the\n # transform above.\n wav1 = wavelets[0]\n wav2 = wavelets[1]\n wav3 = wavelets[2]\n\n # extract the kernels of each of the wavelets for manual convolution\n # we'll test using three different angles that we used to create the\n # transform above.\n wav1_k = wav1.kernel(self.angles[0])\n wav2_k = wav2.kernel(self.angles[1])\n wav3_k = wav3.kernel(self.angles[2])\n\n phi = father_wavelet.kernel(0.0)\n\n npad = 31\n img_pad = np.pad(\n self.img, ((npad, npad), (npad, npad), (0, 0)), mode=\"reflect\"\n )\n # get numpy array of the test input image\n x = img_pad[:, :, 0]\n\n # manual convolution, |x * psi_1|\n conv = np.abs(convolve2d(x, wav1_k, mode=\"same\"))\n conv2 = np.abs(convolve2d(conv, wav2_k, mode=\"same\"))\n conv3 = np.abs(convolve2d(conv2, wav3_k, mode=\"same\"))\n\n # unpad the original image, and convolve with the phi\n # note that the dimensions for phi are one less than the\n # conv result, so we get a 4x4 result. Take the first one\n self.manual_result1 = convolve2d(\n conv[npad:-npad, npad:-npad], phi.real, mode=\"valid\"\n )[0, 0]\n self.manual_result2 = convolve2d(\n conv2[npad:-npad, npad:-npad], phi.real, mode=\"valid\"\n )[0, 0]\n self.manual_result3 = convolve2d(\n conv3[npad:-npad, npad:-npad], phi.real, mode=\"valid\"\n )[0, 0]", "def execute(self, image):\n undist = self.undistort(image)\n result = self.threshold_image(undist, self.thresholds['ksize'],\n self.thresholds['sobel'],\n self.thresholds['magnitude'],\n self.thresholds['direction'],\n self.thresholds['saturation'],\n self.thresholds['lightness'],\n self.thresholds['blue-yellow'])\n warped = self.warp(result)\n if self.args.is_test:\n self.image_logger.save_image(warped, 'warped_image.png')\n ploty, left_fit, right_fit, left_fitx, right_fitx = self.get_line_fit(warped)\n left_rad, right_rad = measure_curvature(warped, left_fitx, right_fitx, self.args.is_test)\n self.left_line.update(left_fit, left_rad)\n self.right_line.update(right_fit, right_rad)\n result = self.draw_final_image(image, warped, undist, ploty, left_fitx, right_fitx, self.Minv,\n self.left_line.best_curvature,\n self.right_line.best_curvature)\n return result", "def retarget_image(img, T, C, r, c):\n row, col = img.shape[:2]\n seam_path = optimal_path(T, C, r, c)\n img_final = img\n for i in seam_path:\n if i == 0:\n img_final, _ = seam_removal_horizontal(img_final)\n else:\n img_final, _ = seam_removal_vertical(img_final, [])\n return img_final", "def prepare_test_img(self, idx):\n img_info = self.img_infos[idx]\n img_path = osp.join(self.img_prefix, img_info['filename'])\n\n if self.proposals is not None:\n proposal = self.proposals[idx][:self.num_max_proposals]\n if not proposal.shape[1] == 4 or proposal.shape[1] == 5:\n raise AssertionError(\n 'proposals should have shapes (n, 4) or (n, 5), '\n 'but found {}'.format(proposal.shape))\n else:\n proposal = None\n\n if self.with_background_erasing:\n ann = self.get_ann_info(idx)\n gt_bboxes = ann['bboxes']\n else:\n gt_bboxes = None\n\n def prepare_single_scale(img_path, expected_size, flip_ratio=0,\n proposal=None, bbox=None):\n _img, img_shape, pad_shape, scale_factor, \\\n flipped_flag, flipped_direction = self.img_transforms(\n img_path, expected_size, flip_ratio=flip_ratio)\n if bbox is not None:\n if not len(bbox) == 0:\n _gt_bboxes = self.bbox_transforms(bbox,\n img_shape,\n scale_factor,\n flipped_flag,\n flipped_direction)\n else:\n _gt_bboxes = bbox\n _img = self.background_erasing(\n _img, img_shape, _gt_bboxes,\n cell_size=self.be_cell_size,\n random_ratio=self.be_random_ratio)\n _img = to_tensor(_img)\n _img_meta = dict(\n filename=img_info['filename'],\n ori_shape=(img_info['height'], img_info['width'], 3),\n img_shape=img_shape,\n pad_shape=pad_shape,\n scale_factor=scale_factor,\n flipped_flag=flipped_flag,\n flipped_direction=flipped_direction\n )\n if proposal is not None:\n if proposal.shape[1] == 5:\n score = proposal[:, 4, None]\n proposal = proposal[:, :4]\n else:\n score = None\n _proposal = self.bbox_transforms(proposal,\n img_shape,\n scale_factor,\n flipped_flag,\n flipped_direction)\n _proposal = np.hstack([_proposal, score]) \\\n if score is not None else _proposal\n _proposal = to_tensor(_proposal)\n else:\n _proposal = None\n return _img, _img_meta, _proposal\n\n imgs = []\n img_metas = []\n proposals = []\n for expected_size in self.img_expected_sizes:\n # at first, we do not flip the image\n _img, _img_meta, _proposal = prepare_single_scale(\n img_path, expected_size, flip_ratio=0,\n proposal=proposal, bbox=gt_bboxes)\n imgs.append(_img)\n img_metas.append(DataContainer(_img_meta, cpu_only=True))\n proposals.append(_proposal)\n if self.flip_ratio > 0:\n _img, _img_meta, _proposal = prepare_single_scale(\n img_path, expected_size, flip_ratio=1,\n proposal=proposal, bbox=gt_bboxes)\n imgs.append(_img)\n img_metas.append(DataContainer(_img_meta, cpu_only=True))\n proposals.append(_proposal)\n data = dict(img=imgs, img_meta=img_metas)\n if self.proposals is not None:\n data['proposals'] = proposals\n return data", "def __update_tesseract__(self):\n if self.row_bitmaps != []:\n self.__write_out_row__()\n cv2.imwrite(\"active_weather.basic.exp\" + str(self.box_count) + \".tiff\", self.training_page)\n # call([\"convert\", \"-density 300\", \"-depth 4\", \"active_weather.basic.exp0.tiff\",\"active_weather.basic.exp0.tiff\"])\n call([\"/usr/bin/tesseract\", \"active_weather.basic.exp0.tiff\", \"active_weather.basic.exp0\", \"nobatch\", \"box.train\"])\n\n with open(\"font_properties\",\"w\") as f:\n f.write(\"basic 0 0 0 0 0\\n\")\n\n call([\"unicharset_extractor\", \"active_weather.basic.exp0.box\"])\n os.system(\"/home/ggdhines/github/tesseract/training/set_unicharset_properties -F font_properties -U unicharset -O unicharset --script_dir=/home/ggdhines/langdata\")\n # os.system(\"shapeclustering -F font_properties -U unicharset active_weather.basic.exp0.tr\")\n # os.system(\"shapeclustering -F font_properties active_weather.basic.exp0.tr\")\n os.system(\"mftraining -F font_properties -U unicharset -O active_weather.unicharset active_weather.basic.exp0.tr\")\n os.system(\"cntraining active_weather.basic.exp0.tr\")\n\n os.system(\"mv inttemp active_weather.inttemp\")\n os.system(\"mv normproto active_weather.normproto\")\n os.system(\"mv pffmtable active_weather.pffmtable\")\n os.system(\"mv shapetable active_weather.shapetable\")\n os.system(\"combine_tessdata active_weather.\")\n\n os.system(\"mv active_weather.basic.* /tmp/tessdata/\")\n os.system(\"mv active_weather.inttemp /tmp/tessdata/\")\n os.system(\"mv active_weather.normproto /tmp/tessdata/\")\n os.system(\"mv active_weather.pffmtable /tmp/tessdata/\")\n os.system(\"mv active_weather.shapetable /tmp/tessdata/\")\n os.system(\"mv active_weather.traineddata /tmp/tessdata/\")\n os.system(\"mv active_weather.unicharset /tmp/tessdata/\")\n os.system(\"mv font_properties /tmp/tessdata/\")", "def expose_test(self):\n with self.lock:\n self.dark = 1\n self.tstart = time.time()\n self.timestamp = time.strftime(\"%Y-%m-%dT%H:%M:%S\", time.localtime(self.tstart))\n imagesize = (self.expArea[3] - self.expArea[1],\n self.expArea[2] - self.expArea[0])\n self.data = np.ones(shape=imagesize, dtype=np.uint16)\n self.tend = time.time()", "def run_frame(self, ti, img):\n pass", "def reset_image_estimate(self):\n # reset_shared_var(self.t_A)\n self.t_A.set_value(self.t_QUAD_REG_MEAN.get_value())\n reset_shared_var(self.t_Ap)", "def main():\n \n # for inserting other images, add tem to /input folder and list them here\n images = (\n 'image-0',\n 'image-1',\n 'image-2'\n )\n\n for image_name in images:\n print(image_name, \"image:\")\n\n image = open_image(image_name)\n display_image(image, \"Original input \" + image_name)\n\n grayscale_v = transform_colors(image)\n display_image(grayscale_v[:,:,0], \"Grayscale \" + image_name)\n save_image(image_name + \"-grayscale\", grayscale_v[:,:,0])\n\n contours_v, contours = get_contours(grayscale_v)\n display_image(contours_v, \"Contours \" + image_name)\n save_image(image_name + \"-contours\", contours_v)\n\n labeled_img, areas = get_measures(image, contours[1:])\n display_image(labeled_img, \"Labeled \" + image_name)\n save_image(image_name + \"-labeled\", labeled_img)\n\n areas_histogram(areas, image_name)", "def teardown():\n os.remove('green-dot.tif')\n os.remove('green-dot.jpg')\n os.remove('green-dot.png')", "def single_channel_stacking(tifs):\n template_ID=int(len(tifs)/2)\n \n template_raster=gdal_array.LoadFile(tifs[template_ID-1])\n avg_raster=np.zeros_like(template_raster)\n avg_raster=avg_raster+1\n new_raster=np.copy(template_raster)\n # ones=np.full(template_raster.shape, 1)\n for i, tif in enumerate(tifs, start=1):\n if i==template_ID: \n continue\n \n tif_raster=gdal_array.LoadFile(tif)\n # tif_raster=cut_transformed_array_borders(tif_raster)\n result=ird.similarity(template_raster,tif_raster , numiter=1, order=1)\n img_transformed= ird.transform_img(tif_raster, scale=result['scale'], angle=result['angle'], tvec=result['tvec'], mode='constant', bgval=0, order=2)\n \n img_transformed=cut_transformed_array_borders(img_transformed)\n \n # ones_transformed=ird.transform_img(ones, scale=result['scale'], angle=result['angle'], tvec=result['tvec'], mode='constant', bgval=0, order=1)\n ones_transformed=np.zeros_like(template_raster)\n ones_transformed[np.where(img_transformed>0)]=1\n print(ones_transformed)\n \n print(np.mean(ones_transformed), np.max(ones_transformed), np.min(ones_transformed))\n print(ones_transformed[np.where(ones_transformed>0)])\n print(np.min(ones_transformed[np.where(ones_transformed>0)]))\n print(np.max(ones_transformed[np.where(ones_transformed>0)]))\n\n plt.imshow(ones_transformed)\n plt.show()\n plt.close()\n \n # ones_transformed=cut_transformed_array_borders(ones_transformed)\n \n avg_raster=avg_raster+ones_transformed\n # ird.imshow(template_raster, tif_raster, img_transformed)\n \n new_raster=new_raster+img_transformed\n \n # new_raster=new_raster+template_raster \n # new_raster=new_raster/len(tifs)\n\n gtz=np.where(avg_raster>0)\n \n\n \n\n \n \n plt.imshow(new_raster)\n plt.show()\n plt.close()\n # gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_not_abvertaghe_stacked_.tiff\")\n new_raster[gtz]=new_raster[gtz]/avg_raster[gtz] \n gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_stacked_.tiff\")\n plt.imshow(new_raster)\n plt.savefig(\"test.tif\", dpi=800)\n plt.show()\n plt.close()\n\n def discrete_cmap(N, base_cmap=None):\n \"\"\"Create an N-bin discrete colormap from the specified input map\"\"\"\n \n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n \n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)\n\n cmap=discrete_cmap(int(avg_raster.max())+1, base_cmap=\"ocean\") \n \n norm=mpl.colors.BoundaryNorm(np.arange(-0.5,int(avg_raster.max()+1)), cmap.N)\n fig=plt.figure()\n fig.set_size_inches(5,4)\n ax=fig.add_subplot(111)\n data=ax.matshow(avg_raster, cmap=cmap, norm=norm)\n fig.colorbar(data, ticks=np.linspace(0,int(avg_raster.max()),int(avg_raster.max()+1)), drawedges=True)\n\n plt.show()\n plt.close()\n\n\n # gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_stacked_.tiff\")", "def blackout_images(image,ticlass):\n rgb = ocropy.intarray()\n ticlass.textImageProbabilities(rgb,image)\n r = ocropy.bytearray()\n g = ocropy.bytearray()\n b = ocropy.bytearray()\n ocropy.unpack_rgb(r,g,b,rgb)\n components = ocropy.intarray()\n components.copy(g)\n n = ocropy.label_components(components)\n print \"[note] number of image regions\",n\n tirects = ocropy.rectarray()\n ocropy.bounding_boxes(tirects,components)\n for i in range(1,tirects.length()):\n r = tirects.at(i)\n ocropy.fill_rect(image,r,0)\n r.pad_by(-5,-5)\n ocropy.fill_rect(image,r,255)", "def imageprepare():\r\n file_name = '9-test.png'\r\n im = Image.open(file_name).convert('L')\r\n\r\n im.save(\"9-t.png\")\r\n plt.imshow(im)\r\n plt.show()\r\n tv = list(im.getdata())\r\n\r\n # normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\r\n tva = [(255 - x) * 1.0 / 255.0 for x in tv]\r\n return tva", "def test_on_tiff(self):\n im = np.random.randint(0, 127, size=(512, 512))\n path = Path(\".\\\\test_tif.tif\")\n\n # Annoying low contrast warning\n with suppress_warnings():\n imsave(str(path), im)\n\n from_skued = diffread(path)\n self.assertTrue(np.allclose(im, from_skued))\n os.remove(path)", "def main(image_path):\n temp_dir = tempfile.mkdtemp()\n print('Saving output to {}'.format(temp_dir))\n estimator = run_image(image_path)\n visualize(estimator, image_path, temp_dir)", "def run_algo(self, th):\n p = self.run_proc(['threshold', str(th), 'input_0.png',\n 'output.png'])\n self.wait_proc(p, timeout=self.timeout)\n return", "def freespaceImageAnalysis( fids, guesses = None, fit=True, bgInput=None, bgPcInput=None, shapes=[None], zeroCorrection=0, zeroCorrectionPC=0,\n keys=None, fitModule=bump, extraPicDictionaries=None, newAnnotation=False, onlyThisPic=None, pltVSize=5, \n plotSigmas=False, plotCounts=False, manualColorRange=None, calcTemperature=False, clearOutput=True, \n dataRange=None, guessTemp=10e-6, trackFitCenter=False, picsPerRep=1, startPic=0, binningParams=None, \n win=pw.PictureWindow(), transferAnalysisOpts=None, tferBinningParams=None, tferWin= pw.PictureWindow(),\n extraTferAnalysisArgs={}, emGainSetting=300, lastConditionIsBackGround=True, showTferAnalysisPlots=True,\n show2dFitsAndResiduals=True, plotFitAmps=False, indvColorRanges=False, fitF2D=gaussian_2d.f_notheta, \n rmHighCounts=True, useBase=True, weightBackgroundByLoading=True, returnPics=False, forceNoAnnotation=False):\n fids = [fids] if type(fids) == int else fids\n keys = [None for _ in fids] if keys is None else keys\n sortedStackedPics = {}\n initThresholds = [None]\n picsForBg = []\n bgWeights = []\n isAnnotatedList = []\n for filenum, fid in enumerate(fids):\n if transferAnalysisOpts is not None:\n res = ta.stage1TransferAnalysis( fid, transferAnalysisOpts, useBase=useBase, **extraTferAnalysisArgs )\n (initAtoms, tferAtoms, initAtomsPs, tferAtomsPs, key, keyName, initPicCounts, tferPicCounts, repetitions, initThresholds,\n avgPics, tferThresholds, initAtomImages, tferAtomImages, basicInfoStr, ensembleHits, groupedPostSelectedPics, isAnnotated) = res\n isAnnotatedList.append(isAnnotated)\n # assumes that you only want to look at the first condition. \n for varPics in groupedPostSelectedPics: # don't remember why 0 works if false...\n picsForBg.append(varPics[-1 if lastConditionIsBackGround else 0])\n bgWeights.append(len(varPics[0]))\n allFSIPics = [ varpics[0][startPic::picsPerRep] for varpics in groupedPostSelectedPics]\n if showTferAnalysisPlots:\n fig, axs = plt.subplots(1,2)\n mp.makeAvgPlts( axs[0], axs[1], avgPics, transferAnalysisOpts, ['r','g','b'] ) \n allFSIPics = [win.window( np.array(pics) ) for pics in allFSIPics]\n allFSIPics = ah.softwareBinning( binningParams, allFSIPics )\n elif type(fid) == int:\n ### For looking at either PGC imgs or FSI imgs \n with exp.ExpFile(fid) as file:\n # I think this only makes sense if there is a specific bg pic in the rotation\n picsForBg.append(list(file.get_pics()))\n allFSIPics = file.get_pics()[startPic::picsPerRep]\n _, key = file.get_key()\n if len(np.array(key).shape) == 2:\n key = key[:,0]\n file.get_basic_info()\n allFSIPics = win.window( allFSIPics )\n allFSIPics = ah.softwareBinning( binningParams, allFSIPics )\n allFSIPics = np.reshape( allFSIPics, (len(key), int(allFSIPics.shape[0]/len(key)), allFSIPics.shape[1], allFSIPics.shape[2]) )\n else:\n ### Assumes given pics have the same start pic and increment (picsPerRep).\n # doesn't combine well w/ transfer analysis\n picsForBg.append(fid)\n allFSIPics = fid[startPic::picsPerRep]\n print(\"Assuming input is list of all pics, then splices to get FSI pics. Old code assumed the given were FSI pics.\")\n allFSIPics = win.window( allFSIPics )\n allFSIPics = ah.softwareBinning( binningParams, allFSIPics )\n allFSIPics = np.reshape( allFSIPics, (len(key), int(allFSIPics.shape[0]/len(key)), allFSIPics.shape[1], allFSIPics.shape[2]) )\n # ##############\n if keys[filenum] is not None:\n key = keys[filenum]\n for i, keyV in enumerate(key):\n keyV = misc.round_sig_str(keyV)\n sortedStackedPics[keyV] = np.append(sortedStackedPics[keyV], allFSIPics[i],axis=0) if (keyV in sortedStackedPics) else allFSIPics[i] \n if lastConditionIsBackGround:\n bgInput, pcBgInput = getBgImgs(picsForBg, startPic = startPic, picsPerRep = picsPerRep, rmHighCounts=rmHighCounts, bgWeights=bgWeights, \n weightBackgrounds=weightBackgroundByLoading)\n elif bgInput == 'lastPic':\n bgInput, pcBgInput = getBgImgs(picsForBg, startPic = picsPerRep-1, picsPerRep = picsPerRep, rmHighCounts=rmHighCounts, bgWeights=bgWeights,\n weightBackgrounds=weightBackgroundByLoading )\n if bgInput is not None: # was broken and not working if not given bg\n bgInput = win.window(bgInput)\n bgInput = ah.softwareBinning(binningParams, bgInput)\n if bgPcInput is not None:\n bgPcInput = win.window(bgPcInput)\n bgPcInput = ah.softwareBinning(binningParams, bgPcInput) \n \n if extraPicDictionaries is not None:\n if type(extraPicDictionaries) == dict:\n extraPicDictionaries = [extraPicDictionaries]\n for dictionary in extraPicDictionaries:\n for keyV, pics in dictionary.items():\n sortedStackedPics[keyV] = (np.append(sortedStackedPics[keyV], pics,axis=0) if keyV in sortedStackedPics else pics) \n sortedStackedKeyFl = [float(keyStr) for keyStr in sortedStackedPics.keys()]\n sortedKey, sortedStackedPics = ah.applyDataRange(dataRange, sortedStackedPics, list(sorted(sortedStackedKeyFl)))\n numVars = len(sortedStackedPics.items())\n if len(np.array(shapes).shape) == 1:\n shapes = [shapes for _ in range(numVars)] \n if guesses is None:\n guesses = [[None for _ in range(4)] for _ in range(numVars)]\n if len(np.array(bgInput).shape) == 2 or bgInput == None:\n bgInput = [bgInput for _ in range(numVars)]\n if len(np.array(bgPcInput).shape) == 2 or bgPcInput == None:\n bgPcInput = [bgPcInput for _ in range(numVars)]\n \n datalen, avgFitSigmas, images, hFitParams, hFitErrs, vFitParams, vFitErrs, fitParams2D, fitErrs2D = [{} for _ in range(9)]\n titles = ['Bare', 'Photon-Count', 'Bare-mbg', 'Photon-Count-mbg']\n assert(len(sortedKey)>0)\n for vari, keyV in enumerate(sortedKey):\n keyV=misc.round_sig_str(keyV)\n if vari==0:\n initKeyv = keyV\n varPics = sortedStackedPics[keyV]\n # 0 is init atom pics for post-selection on atom number... if we wanted to.\n expansionPics = rmHighCountPics(varPics,7000) if rmHighCounts else varPics\n datalen[keyV] = len(expansionPics)\n expPhotonCountImage = photonCounting(expansionPics, 120)[0] / len(expansionPics)\n bgPhotonCountImage = np.zeros(expansionPics[0].shape) if bgPcInput[vari] is None else bgPcInput[vari]\n expAvg = np.mean(expansionPics, 0)\n bgAvg = np.zeros(expansionPics[0].shape) if (bgInput[vari] is None or len(bgInput[vari]) == 1) else bgInput[vari]\n \n if bgPhotonCountImage is None:\n print('no bg photon', expAvg.shape)\n bgPhotonCount = np.zeros(photonCountImage.shape)\n avg_mbg = expAvg - bgAvg\n avg_mbgpc = expPhotonCountImage - bgPhotonCountImage\n images[keyV] = [expAvg, expPhotonCountImage, avg_mbg, avg_mbgpc]\n hFitParams[keyV], hFitErrs[keyV], vFitParams[keyV], vFitErrs[keyV], fitParams2D[keyV], fitErrs2D[keyV] = [[] for _ in range(6)]\n for imnum, (im, guess) in enumerate(zip(images[keyV], guesses[vari])):\n if fit:\n # fancy guess_x and guess_y values use the initial fitted value, typically short time, as a guess.\n _, pictureFitParams2d, pictureFitErrors2d, v_params, v_errs, h_params, h_errs = ah.fitPic(\n im, guessSigma_x=5, guessSigma_y=5, showFit=False, \n guess_x=None if vari==0 else fitParams2D[initKeyv][imnum][1], guess_y=None if vari==0 else fitParams2D[initKeyv][imnum][2],\n fitF=fitF2D)\n fitParams2D[keyV].append(pictureFitParams2d)\n fitErrs2D[keyV].append(pictureFitErrors2d)\n hFitParams[keyV].append(h_params)\n hFitErrs[keyV].append(h_errs)\n vFitParams[keyV].append(v_params)\n vFitErrs[keyV].append(v_errs)\n # conversion from the num of pixels on the camera to microns at the focus of the tweezers\n cf = 16e-6/64\n mins, maxes = [[], []]\n imgs_ = np.array(list(images.values()))\n for imgInc in range(4):\n if indvColorRanges:\n mins.append(None)\n maxes.append(None)\n elif manualColorRange is None:\n mins.append(min(imgs_[:,imgInc].flatten()))\n maxes.append(max(imgs_[:,imgInc].flatten()))\n else:\n mins.append(manualColorRange[0])\n maxes.append(manualColorRange[1])\n numVariations = len(images)\n if onlyThisPic is None:\n fig, axs = plt.subplots(numVariations, 4, figsize=(20, pltVSize*numVariations))\n if numVariations == 1:\n axs = np.array([axs])\n bgFig, bgAxs = plt.subplots(1, 2, figsize=(20, pltVSize))\n else:\n numRows = int(np.ceil((numVariations+3)/4))\n fig, axs = plt.subplots(numRows, 4 if numVariations>1 else 3, figsize=(20, pltVSize*numRows))\n avgPicAx = axs.flatten()[-3]\n avgPicFig = fig\n bgAxs = [axs.flatten()[-1], axs.flatten()[-2]]\n bgFig = fig\n if show2dFitsAndResiduals:\n fig2d, axs2d = plt.subplots(*((2,numVariations) if numVariations>1 else (1,2)))\n keyPlt = np.zeros(len(images))\n (totalSignal, hfitCenter, hFitCenterErrs, hSigmas, hSigmaErrs, h_amp, hAmpErrs, vfitCenter, vFitCenterErrs, vSigmas, vSigmaErrs, v_amp, \n vAmpErrs, hSigma2D, hSigma2dErr, vSigma2D, vSigma2dErr) = [np.zeros((len(images), 4)) for _ in range(17)]\n \n for vari, ((keyV,ims), hParamSet, hErr_set, vParamSet, vErr_set, paramSet2D, errSet2D) in enumerate(zip(\n images.items(), *[dic.values() for dic in [hFitParams, hFitErrs, vFitParams, vFitErrs, fitParams2D, fitErrs2D]])):\n for which in range(4):\n if onlyThisPic is None:\n (im, ax, title, min_, max_, hparams, hErrs, vparams, vErrs, param2d, err2d\n ) = [obj[which] for obj in (ims, axs[vari], titles, mins, maxes, hParamSet, hErr_set, vParamSet, vErr_set, paramSet2D, errSet2D)] \n else:\n which = onlyThisPic\n ax = axs.flatten()[vari]\n (im, title, min_, max_, hparams, hErrs, vparams, vErrs, param2d, err2d\n ) = [obj[which] for obj in (ims, titles, mins, maxes, hParamSet, hErr_set, vParamSet, vErr_set, paramSet2D, errSet2D)] \n h_amp[vari][which], hfitCenter[vari][which], hSigmas[vari][which] = hparams[0], hparams[1], hparams[2]*cf*1e6\n hAmpErrs[vari][which], hFitCenterErrs[vari][which], hSigmaErrs[vari][which] = hErrs[0], hErrs[1], hErrs[2]*cf*1e6\n v_amp[vari][which], vfitCenter[vari][which], vSigmas[vari][which] = vparams[0], vparams[1], vparams[2]*cf*1e6\n vAmpErrs[vari][which], vFitCenterErrs[vari][which], vSigmaErrs[vari][which] = vErrs[0], vErrs[1], vErrs[2]*cf*1e6\n hSigma2D[vari][which], hSigma2dErr[vari][which], vSigma2D[vari][which], vSigma2dErr[vari][which] = [\n val*cf*1e6 for val in [param2d[-3], err2d[-3], param2d[-2], err2d[-2]]]\n \n totalSignal[vari][which] = np.sum(im.flatten())\n keyPlt[vari] = keyV\n res = mp.fancyImshow(fig, ax, im, imageArgs={'cmap':dark_viridis_cmap, 'vmin':min_, 'vmax':max_}, \n hFitParams=hparams, vFitParams=vparams, fitModule=fitModule, flipVAx = True, fitParams2D=param2d)\n ax.set_title(keyV + ': ' + str(datalen[keyV]) + ';\\n' + title + ': ' + misc.errString(hSigmas[vari][which],hSigmaErrs[vari][which]) \n + r'$\\mu m$ sigma, ' + misc.round_sig_str(totalSignal[vari][which],5), fontsize=12) \n if show2dFitsAndResiduals:\n X, Y = np.meshgrid(np.arange(len(im[0])), np.arange(len(im)))\n data_fitted = fitF2D((X,Y), *param2d)\n fitProper = data_fitted.reshape(im.shape[0],im.shape[1])\n ax1 = axs2d[0] if numVariations == 1 else axs2d[0,vari]\n ax2 = axs2d[1] if numVariations == 1 else axs2d[1,vari]\n imr = ax1.imshow(fitProper, vmin=min_, vmax=max_)\n mp.addAxColorbar(fig2d, ax1, imr)\n ax1.contour(np.arange(len(im[0])), np.arange(len(im)), fitProper, 4, colors='w', alpha=0.2)\n imr = ax2.imshow(fitProper-im)\n mp.addAxColorbar(fig2d, ax2, imr)\n ax2.contour(np.arange(len(im[0])), np.arange(len(im)), fitProper, 4, colors='w', alpha=0.2)\n if onlyThisPic is not None:\n break\n \n mp.fancyImshow(avgPicFig, avgPicAx, np.mean([img[onlyThisPic] for img in images.values()],axis=0), imageArgs={'cmap':dark_viridis_cmap},flipVAx = True)\n avgPicAx.set_title('Average Over Variations')\n ### Plotting background and photon counted background\n mp.fancyImshow(bgFig, bgAxs[0], bgAvg, imageArgs={'cmap':dark_viridis_cmap},flipVAx = True) \n bgAxs[0].set_title('Background image (' + str(len(picsForBg)/picsPerRep) + ')')\n mp.fancyImshow(bgFig, bgAxs[1], bgPhotonCountImage, imageArgs={'cmap':dark_viridis_cmap},flipVAx = True) \n bgAxs[1].set_title('Photon counted background image (' + str(len(picsForBg)/picsPerRep) + ')')\n fig.subplots_adjust(left=0,right=1,bottom=0.1, hspace=0.2, **({'top': 0.7, 'wspace': 0.4} if (onlyThisPic is None) else {'top': 0.9, 'wspace': 0.3}))\n \n disp.display(fig)\n temps, tempErrs, tempFitVs, = [],[],[]\n if calcTemperature: \n for sigmas, sigmaerrs in zip([hSigmas, vSigmas, hSigma2D, vSigma2D],[hSigmaErrs, vSigmaErrs, hSigma2dErr, vSigma2dErr]):\n mbgSigmas = np.array([elt[2] for elt in sigmas])\n mbgSigmaErrs = np.array([elt[2] for elt in sigmaerrs])\n myGuess = [0.0, min((mbgSigmas)*1e-6), guessTemp]\n temp, fitV, cov = ah.calcBallisticTemperature(keyPlt*1e-3, (mbgSigmas)*1e-6, guess = myGuess, sizeErrors = mbgSigmaErrs)\n error = np.sqrt(np.diag(cov))\n temps.append(temp)\n tempErrs.append(error[2])\n tempFitVs.append(fitV)\n numAxisCol = int(plotSigmas) + int(plotCounts) + int(trackFitCenter)\n if numAxisCol != 0:\n fig2, axs = plt.subplots(1, numAxisCol, figsize = (15, 5)) \n fig2.subplots_adjust(top=0.75, wspace = 0.4)\n colors = ['b','k','c','purple']\n if plotSigmas:\n ax = (axs if numAxisCol == 1 else axs[0]) \n stdStyle = dict(marker='o',linestyle='',capsize=3)\n if onlyThisPic is not None:\n ax.errorbar(keyPlt, hSigmas[:,onlyThisPic], hSigmaErrs[:,onlyThisPic], color=colors[0], label='h '+titles[onlyThisPic], **stdStyle);\n ax.errorbar(keyPlt, hSigma2D[:,onlyThisPic], hSigma2dErr[:,onlyThisPic], color=colors[1], label='2dh '+titles[onlyThisPic], **stdStyle);\n ax.errorbar(keyPlt, vSigmas[:,onlyThisPic], vSigmaErrs[:,onlyThisPic], color=colors[2], label='v '+titles[onlyThisPic], **stdStyle);\n ax.errorbar(keyPlt, vSigma2D[:,onlyThisPic], vSigma2dErr[:,onlyThisPic], color=colors[3], label='2dv '+titles[onlyThisPic], **stdStyle);\n else:\n for whichPic in range(4):\n ax.errorbar(keyPlt, hSigmas[:,whichPic], hSigmaErrs[:,whichPic], color='b', label='h '+titles[whichPic], **stdStyle);\n ax.errorbar(keyPlt, vSigmas[:,whichPic], vSigmaErrs[:,whichPic], color='c', label='v '+titles[whichPic], **stdStyle);\n ax.set_ylim(max(0,ax.get_ylim()[0]),min([ax.get_ylim()[1],5]))\n ax.set_ylabel(r'Fit Sigma ($\\mu m$)')\n \n if calcTemperature:\n # converting time to s, hSigmas in um \n xPoints = np.linspace(min(keyPlt), max(keyPlt))*1e-3\n for num, fitV in enumerate(tempFitVs):\n #ax.plot(xPoints*1e3, LargeBeamMotExpansion.f(xPoints, *myGuess)*1e6, label = 'guess')\n ax.plot(xPoints*1e3, LargeBeamMotExpansion.f(xPoints, *fitV)*1e6, color=colors[num])\n ax.legend()\n\n if plotFitAmps: \n ax = (axs if numAxisCol == 1 else axs[0])\n ampAx = ax.twinx()\n\n if onlyThisPic is not None:\n ampAx.errorbar(keyPlt, h_amp[:,onlyThisPic], hAmpErrs[:,onlyThisPic], label='h '+titles[onlyThisPic], color = 'orange', **stdStyle);\n ampAx.errorbar(keyPlt, v_amp[:,onlyThisPic], vAmpErrs[:,onlyThisPic], label='v '+titles[onlyThisPic], color = 'r', **stdStyle);\n else:\n for whichPic in range(4):\n ampAx.errorbar(keyPlt, h_amp[:,whichPic], hAmpErrs[:,whichPic], label='h '+titles[whichPic], color = 'orange', **stdStyle);\n ampAx.errorbar(keyPlt, v_amp[:,whichPic], vAmpErrs[:,whichPic], label='v '+titles[whichPic], color = 'r', **stdStyle);\n [tick.set_color('red') for tick in ampAx.yaxis.get_ticklines()]\n [tick.set_color('red') for tick in ampAx.yaxis.get_ticklabels()]\n ampAx.set_ylabel(r'Fit h_amps', color = 'r')\n \n hTotalPhotons, vTotalPhotons = None, None\n if plotCounts:\n # numAxCol = 1: ax = axs\n # numAxCol = 2: plotSigmas + plotCounts -- ax = axs[1]\n # numAxCol = 2: plotCounts + trackFitCenter -- ax = axs[0]\n # numAxCol = 3: ax = axs[1]\n if numAxisCol == 1:\n ax = axs\n elif numAxisCol == 2:\n ax = axs[1 if plotSigmas else 0]\n else:\n ax = axs[1]\n # Create axis to plot photon counts\n ax.set_ylabel(r'Integrated signal')\n photon_axis = ax.twinx()\n # This is not currently doing any correct for e.g. the loading rate.\n countToCameraPhotonEM = 0.018577 / (emGainSetting/200) # the float is is EM200. \n countToScatteredPhotonEM = 0.018577 / 0.07 / (emGainSetting/200)\n\n if onlyThisPic is not None:\n # calculate number of photons\n hamp = h_amp[:,onlyThisPic]*len(expansionPics[0][0]) # Horizontal \"un\"normalization for number of columns begin averaged.\n vamp = v_amp[:,onlyThisPic]*len(expansionPics[0]) \n hsigpx = hSigmas[:,onlyThisPic]/(16/64) # Convert from um back to to pixels.\n vsigpx = vSigmas[:,onlyThisPic]/(16/64)\n htotalCountsPerPic = bump.area_under(hamp, hsigpx)\n vtotalCountsPerPic = bump.area_under(vamp, vsigpx)\n hTotalPhotons = countToScatteredPhotonEM*htotalCountsPerPic\n vTotalPhotons = countToScatteredPhotonEM*vtotalCountsPerPic\n ax.plot(keyPlt, totalSignal[:,onlyThisPic], marker='o', linestyle='', label=titles[onlyThisPic]);\n photon_axis.plot(keyPlt, hTotalPhotons, marker='o', linestyle='', color = 'r', label='Horizontal')\n photon_axis.plot(keyPlt, vTotalPhotons, marker='o', linestyle='', color = 'orange', label='Vertical')\n else:\n for whichPic in range(4):\n # See above comments\n amp = h_amp[:,whichPic]*len(expansionPics[0][0]) \n sig = hSigmas[:,whichPic]/(16/64) \n totalCountsPerPic = bump.area_under(amp, sig)\n hTotalPhotons = countToScatteredPhotonEM*totalCountsPerPic\n ax.plot(keyPlt, totalSignal[:,whichPic], marker='o', linestyle='', label=titles[whichPic]);\n photon_axis.plot(keyPlt, hTotalPhotons, marker='o', linestyle='', color = ['red', 'orange', 'yellow', 'pink'][whichPic]) \n ax.legend()\n photon_axis.legend()\n [tick.set_color('red') for tick in photon_axis.yaxis.get_ticklines()]\n [tick.set_color('red') for tick in photon_axis.yaxis.get_ticklabels()]\n photon_axis.set_ylabel(r'Fit-Based Avg Scattered Photon/Img', color = 'r')\n if trackFitCenter:\n #numaxcol = 1: ax = axs\n #numaxcol = 2: trackfitcenter + plothSigmas: ax = axs[1]\n #numaxcol = 2: trackfitcenter + plotCounts: ax = axs[1]\n #numaxcol = 3: ax = axs[2]\n ax = (axs if numAxisCol == 1 else axs[-1])\n if onlyThisPic is not None:\n #ax.errorbar(keyPlt, hfitCenter[:,onlyThisPic], hFitCenterErrs[:,onlyThisPic], marker='o', linestyle='', capsize=3, label=titles[onlyThisPic]);\n ax.errorbar(keyPlt, vfitCenter[:,onlyThisPic], vFitCenterErrs[:,onlyThisPic], marker='o', linestyle='', capsize=3, label=titles[onlyThisPic]);\n #def accel(t, x0, a):\n # return x0 + 0.5*a*t**2\n #accelFit, AccelCov = opt.curve_fit(accel, keyPlt*1e-3, hfitCenter[:,onlyThisPic], sigma = hFitCenterErrs[:,onlyThisPic])\n #fitx = np.linspace(keyPlt[0], keyPlt[-1])*1e-3\n #fity = accel(fitx, *accelFit)\n #ax.plot(fitx*1e3, fity)\n else:\n for whichPic in range(4):\n ax.errorbar(keyPlt, hfitCenter[:,whichPic], hFitCenterErrs[:,whichPic], marker='o', linestyle='', capsize=3, label=titles[whichPic]);\n #accelErr = np.sqrt(np.diag(AccelCov))\n fig2.legend()\n ax.set_ylabel(r'Fit Centers (pix)')\n ax.set_xlabel('time (ms)')\n \n if numAxisCol != 0:\n disp.display(fig2) \n \n if not forceNoAnnotation:\n for fid, isAnnotated in zip(fids, isAnnotatedList):\n if not isAnnotated:\n if type(fid) == int or type(fid) == type(''):\n if newAnnotation or not exp.checkAnnotation(fid, force=False, quiet=True, useBase=useBase):\n exp.annotate(fid, useBase=useBase)\n if clearOutput:\n disp.clear_output()\n if calcTemperature: \n for temp, err, label in zip(temps, tempErrs, ['Hor', 'Vert', 'Hor2D', 'Vert2D']): \n print(label + ' temperature = ' + misc.errString(temp*1e6, err*1e6) + 'uk')\n\n for fid in fids:\n if type(fid) == int:\n expTitle, _, lev = exp.getAnnotation(fid)\n expTitle = ''.join('#' for _ in range(lev)) + ' File ' + str(fid) + ': ' + expTitle\n disp.display(disp.Markdown(expTitle))\n with exp.ExpFile(fid) as file:\n file.get_basic_info()\n if trackFitCenter:\n pass\n #print('Acceleration in Mpix/s^2 = ' + misc.errString(accelFit[1], accelErr[1]))\n if transferAnalysisOpts is not None and showTferAnalysisPlots:\n colors, colors2 = misc.getColors(len(transferAnalysisOpts.initLocs()) + 2)#, cmStr=dataColor)\n pltShape = (transferAnalysisOpts.initLocsIn[-1], transferAnalysisOpts.initLocsIn[-2])\n # mp.plotThresholdHists([initThresholds[0][0],initThresholds[1][0]], colors, shape=pltShape)\n mp.plotThresholdHists([initThresholds[0][0], initThresholds[0][0]], colors, shape=[1,2])\n returnDictionary = {'images':images, 'fits':hFitParams, 'errs':hFitErrs, 'hSigmas':hSigmas, 'sigmaErrors':hSigmaErrs, 'dataKey':keyPlt, \n 'hTotalPhotons':hTotalPhotons, 'tempCalc':temps, 'tempCalcErr':tempErrs, 'initThresholds':initThresholds[0], \n '2DFit':fitParams2D, '2DErr':fitErrs2D, 'bgPics':picsForBg, 'dataLength':datalen}\n if returnPics: \n returnDictionary['pics'] = sortedStackedPics\n return returnDictionary", "def test_full(args, model, device): \n test_path = '../data/full/original/'\n generate_path = '../data/full/generate/'\n test_image_num = len([name for name in os.listdir(test_path)\n if os.path.isfile(os.path.join(test_path, name))])\n\n score_psnr, score_ssim_skimage, score_ssim_minstar, score_msssim_minstar = 0.0, 0.0, 0.0, 0.0\n ind = 0\n for name in os.listdir(test_path):\n if os.path.isfile(os.path.join(test_path, name)):\n ind += 1\n test_original, test_style, image_height, image_width = load_test_dataset(name)\n x = torch.from_numpy(test_original).float()\n y_real = torch.from_numpy(test_style).float()\n x = x.view(image_height, image_width, config.channels).permute(2, 0, 1).to(device)\n y_real = y_real.view(image_height, image_width, config.channels).permute(2, 0, 1).to(device)\n \n y_fake = model.gen_g(x.view(-1, config.channels, image_height, image_width))\n y_fake = y_fake.view(config.channels, image_height, image_width)\n \n # Calculate PSNR & SSIM scores\n score_psnr += psnr_full(y_fake, y_real)\n \n y_fake_np = y_fake.detach().cpu().numpy().transpose(1, 2, 0)\n y_real_np = y_real.cpu().numpy().transpose(1, 2, 0)\n temp_ssim, _ = compare_ssim(y_fake_np, y_real_np, multichannel=True, gaussian_weights=True, full=True)\n score_ssim_skimage += temp_ssim\n \n temp_ssim, _ = ssim(y_fake, y_real, kernel_size=11, kernel_sigma=1.5)\n score_ssim_minstar += temp_ssim\n \n score_msssim_minstar += multi_scale_ssim(y_fake, y_real, kernel_size=11, kernel_sigma=1.5)\n print('PSNR & SSIM scores of {} images are calculated.'.format(ind))\n \n utils.save_image(y_fake, os.path.join(generate_path, '{}-x.jpg'.format(name[:5] + args.model_type)))\n\n score_psnr /= test_image_num\n score_ssim_skimage /= test_image_num\n score_ssim_minstar /= test_image_num\n score_msssim_minstar /= test_image_num\n print('PSNR : {:.4f}, SSIM_skimage : {:.4f}, SSIM_minstar : {:.4f}, SSIM_msssim: {:.4f}'.format(\n score_psnr, score_ssim_skimage, score_ssim_minstar, score_msssim_minstar))", "def science_reduction(input_file):\n #name of the planet\n planet = input_file['exoplanet']\n #set original directory\n original_path = os.getcwd()\n save_path = input_file['save_path']\n data_path = input_file['data_path']\n #Change your directory to data diretory\n os.chdir(data_path)\n #list all flat images\n exoplanet = glob.glob(planet+'*.fits')\n print '\\nLoading exoplanet images \\nTotal of '+planet+'*.fits files = ',len(exoplanet),'\\nFiles = \\n'\n print exoplanet\n #if save_path exist, continue; if not, create.\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n #create a list of bias images and copy images to save_path\n print '\\nCopy science images to save_path directory to main reduction: ....'\n os.system('cp '+planet+'*.fits '+save_path)\n print '\\n .... done. \\n'\n #change to save_path\n os.chdir(save_path)\n #create the names for exoplanet science mages with bias subtracted\n bexoplanet = []\n for i in exoplanet:\n bexoplanet.append('B'+i)\n #verify if previous superbias exist\n if os.path.isfile('B'+i) == True:\n os.system('rm B'+i)\n print '\\n Will be create this images: \\n'\n print bexoplanet\n #exoplanet = string.join(exoplanet,',') #create the list string of exoplanet science images\n #bexoplanet = string.join(bexoplanet,',')#create the list string of bexoplanet science images\n print '\\nSubtracting superbias.fits from all '+planet+'*.fits images ....\\n'\n for i in range(len(exoplanet)):\n iraf.imarith(exoplanet[i],'-','superbias.fits',bexoplanet[i])\n use.update_progress((i+1.)/len(bexoplanet))\n print '\\n.... cleaning '+planet+'*.fits images\\n'\n os.system('rm '+planet+'*.fits')\n print '\\n Statistics of B'+planet+'*.fits images: \\n'\n for i in range(len(bexoplanet)):\n iraf.imstat(bexoplanet[i])\n print '\\nFlatfielding the B'+planet+'*.fits ....\\n'\n #create the names for exoplanet science images with bias subtracted and flatfielding\n abexoplanet = []\n for i in bexoplanet:\n abexoplanet.append('A'+i)\n #verify if previous superbias exist\n if os.path.isfile('A'+i) == True:\n os.system('rm A'+i)\n print '\\n Will be create this images: \\n'\n print abexoplanet\n #flatifielding images\n for i in range(len(abexoplanet)):\n iraf.imarith(bexoplanet[i],'/','superflat.fits',abexoplanet[i])\n use.update_progress((i+1.)/len(abexoplanet))\n # print '\\n.... cleaning B'+planet+'*.fits images\\n'\n # os.system('rm B'+planet+'*.fits')\n print '\\n Statistics of AB'+planet+'*.fits images: \\n'\n for i in range(len(abexoplanet)):\n iraf.imstat(abexoplanet[i])\n os.chdir(original_path) #change to save_path\n return", "def test_no_shared_transformations():\n sdata = blobs()\n element_name = \"blobs_image\"\n test_space = \"test\"\n set_transformation(sdata.images[element_name], Identity(), to_coordinate_system=test_space)\n\n gen = sdata._gen_elements()\n for _, name, obj in gen:\n if name != element_name:\n assert test_space not in get_transformation(obj, get_all=True)\n else:\n assert test_space in get_transformation(obj, get_all=True)", "def test_synthetic():\n background = Image.new('RGB', (100, 50), (125, 125, 125))\n red = Image.new('RGB', (10, 5), (255, 0, 0))\n green = Image.new('RGB', (5, 5), (0, 255, 0))\n blue = Image.new('RGB', (20, 5), (0, 0, 255))\n positions = [\n [0, 0],\n [9, 5],\n [99, 20]\n ]\n\n parameters = {\n 'data': [background, red, green, blue],\n 'positions': positions\n }\n\n synth = images.synthetic(parameters)\n\n assert_equal(synth.size, (100, 50))\n assert_equal(synth.getpixel((0, 0)), (255, 0, 0, 255))\n # if there was no overwrite of overlapping patches, this should be:\n # assert_equal(synth.getpixel((9, 5)), (255, 255, 0, 255))\n # but since green is pasted last it is:\n assert_equal(synth.getpixel((9, 5)), (0, 255, 0, 255))", "def teardown(self):\n self.wf.write_graph(dotfilename = self.test_path / \"wf_diagram\", graph2use=\"orig\")\n self.wf.run()\n \n\n self.helpers.plot_timeseries(\n self.export_path, self.sample_raw_image, \n highlight_ranges=self.highlight_ranges,\n num_figs=1\n )\n\n if self.plot_img:\n self.helpers.plot_4D_img_slice(self.export_path, \"sample_processed.png\")", "def process_tir_image(ds, data_res, t_thresh=-50, min_mcs_size=5000):\n ctt = (ds['tb']).squeeze()-273.15\n min_pix_nb = min_mcs_size / data_res**2\n\n max_pix_nb = 300000 / data_res**2 # this is to capture satellite artefacts that come in large contiguous stripes.\n labels, goodinds = mcs_define(ctt.values, t_thresh, minmax_area=[min_pix_nb, max_pix_nb]) # 7.7x7.7km = 64km2 per pix in gridsat? 83 pix is 5000km2\n dic = dictionary()\n #plt.figure()\n #plt.pcolormesh(labels)\n #plt.colorbar()\n #plt.show()\n for g in goodinds:\n\n if g==0:\n continue\n\n pos = np.where(labels==g)\n npos = np.where(labels!=g)\n datestr = str(int(ctt['time.year'].values))+'-'+str(int(ctt['time.month'].values)).zfill(2)+'-'+str(int(ctt['time.day'].values)).zfill(2)+'_'+\\\n str(int(ctt['time.hour'].values)).zfill(2)+':'+str(int(ctt['time.minute'].values)).zfill(2)\n \n dic['date'].append(datestr)\n dic['month'].append(int(ctt['time.month']))\n dic['hour'].append(int(ctt['time.hour']))\n dic['year'].append(int(ctt['time.year']))\n dic['day'].append(int(ctt['time.day']))\n dic['minute'].append(int(ctt['time.minute']))\n\n storm = ctt.copy()\n storm.values[npos] = np.nan\n tmin_pos = np.nanargmin(storm.values)\n tpos_2d = np.unravel_index(tmin_pos, storm.shape)\n \n latmin = np.nanmin(ctt.lat.values[pos[0]])\n latmax = np.nanmax(ctt.lat.values[pos[0]])\n lonmin = np.nanmin(ctt.lon.values[pos[1]])\n lonmax = np.nanmax(ctt.lon.values[pos[1]])\n dic['area'].append(np.sum(np.isfinite(storm.values))*data_res**2)\n dic['70area'].append(np.sum(storm.values<=-70)*data_res**2)\n dic['minlon'].append(lonmin)\n dic['minlat'].append(latmin)\n dic['maxlon'].append(lonmax)\n dic['maxlat'].append(latmax)\n dic['clon'].append(lonmin + (lonmax - lonmin)/2)\n dic['clat'].append(latmin + (latmax - latmin)/2)\n dic['tmin'].append(np.nanmin(storm))\n dic['tminlat'].append(float(ctt.lat[tpos_2d[0]].values))\n dic['tminlon'].append(float(ctt.lon[tpos_2d[1]].values))\n dic['tmean'].append(float(np.nanmean(storm)))\n dic['tp1'].append(float(np.nanpercentile(storm, 1)))\n dic['tp99'].append(float(np.nanpercentile(storm, 99)))\n dic['stormID'].append(datestr + '_' + str(g))\n dic['cloudMask'].append(labels==g)\n dic['tir'].append(storm.values)\n\n # for k in dic.keys():\n # print(k, len(dic[k]))\n return dic", "def testTrivial(self):\n self.doTest(afwGeom.makeIdentityTransform())", "def testTrivial(self):\n self.doTest(afwGeom.makeIdentityTransform())", "def __init__(self, data_dir, mode='train'):\n self.mode = mode\n self.data_dir = data_dir\n if self.mode == 'train':\n self.img_dir = os.path.join(self.data_dir, 'train')\n self.gt_dir = os.path.join(self.data_dir, 'train_gt')\n elif self.mode == 'test':\n self.img_dir = os.path.join(self.data_dir, 'test')\n self.gt_dir = os.path.join(self.data_dir, 'test_gt')\n\n ''' set up list of filenames for retrieval purposes'''\n self.filenames = [image_basename(f) for f in os.listdir(self.img_dir)]\n self.filenames.sort()\n self.gt_names = [image_basename(f) for f in os.listdir(self.gt_dir)]\n self.gt_names.sort()\n\n ''' set up image transform '''\n if self.mode == 'train':\n self.transform = transforms.Compose([\n transforms.Resize((1024, 1024)),\n # transforms.RandomHorizontalFlip(),\n # transforms.CenterCrop((512, 512)),\n transforms.ToTensor(), # (H,W,C)->(C,H,W), [0,255]->[0, 1.0] RGB->RGB\n # transforms.Normalize(MEAN, STD)\n ])\n self.mask_transform = transforms.Compose([\n # transforms.Pad(padding =50, fill =0),\n transforms.Resize((1024, 1024)),\n \n # transforms.RandomHorizontalFlip(),\n transforms.ToTensor(), # (H,W,C)->(C,H,W), [0,255]->[0, 1.0] RGB->RGB\n ])\n self.gt_transform = transforms.Compose([\n transforms.Resize((1024,1024)),\n transforms.ToTensor(),\n ])\n\n elif self.mode == 'test':\n self.transform = transforms.Compose([\n# transforms.Pad(padding =50, fill =1),\n transforms.Resize((1024, 1024)),\n # /!\\ to remove later\n #transforms.RandomHorizontalFlip(),\n transforms.ToTensor(), # (H,W,C)->(C,H,W), [0,255]->[0, 1.0] RGB->RGB\n # transforms.Normalize(MEAN, STD)\n ])\n self.mask_transform = transforms.Compose([\n # transforms.Pad(padding =50, fill =1),\n transforms.Resize((1024, 1024)), # /!\\ to remove later\n \n # transforms.RandomHorizontalFlip(),\n transforms.ToTensor(), # (H,W,C)->(C,H,W), [0,255]->[0, 1.0] RGB->RGB\n ])\n\n self.gt_transform = transforms.Compose([\n transforms.Resize((1024,1024)),\n transforms.ToTensor()\n ])", "def problem2():\n\n data = loaddata(\"data/bayerdata.npy\")\n r, g, b = separatechannels(data)\n\n img = assembleimage(r, g, b)\n display_image(img)\n\n img_interpolated = interpolate(r, g, b)\n display_image(img_interpolated)", "def main():\n # ------------------------\n # 0 SETUP\n # ------------------------\n log.getLogger().setLevel(log.INFO)\n torch.autograd.set_detect_anomaly(True)\n parser = argparse.ArgumentParser(\n description='Image Style Transfer Training')\n parser = pl.Trainer.add_argparse_args(parser)\n # data\n parser.add_argument('-s', '--style-image', default='cropamara', type=str)\n parser.add_argument('-d', '--dataset',\n default=os.path.join('/', 'fridge', 'coco'), type=str)\n parser.add_argument('-t', '--to-style',\n default=os.path.join('images', 'test'), type=str)\n parser.add_argument('-m', '--model', default='transformer', type=str)\n parser.add_argument('-b', '--batch-size', default=1, type=int)\n parser.add_argument('-lr', '--learning-rate', default=0.001, type=float,\n help='initial learning rate')\n parser.add_argument(\"--image-size\", type=int, default=256,\n help=\"size of training images, default is 256\")\n\n parser.add_argument(\"--seed\", type=int, default=4747,\n help=\"random seed for training\")\n parser.add_argument(\"--content-weight\", type=float, default=1e5,\n help=\"weight for content-loss, default is 1e5\")\n parser.add_argument(\"--style-weight\", type=float, default=1e10,\n help=\"weight for style-loss, default is 1e10\")\n parser.add_argument(\"--weights\", type=str, default='flat',\n help=\"weight for layer losses, default is 1 each\")\n\n parser.add_argument(\"--content-image\", type=str, default='./images/content-images/gbimage2.jpeg',\n help=\"path to content image you want to stylize\")\n parser.add_argument(\"--content-scale\", type=float, default=None,\n help=\"factor for scaling down the content image\")\n parser.add_argument(\"--output-dir\", type=str, default='./images/output-images/',\n help=\"path for saving the output images\")\n\n parser.add_argument(\"-cp\", \"--checkpoint\", type=str, default='',\n help=\"path for starting weights\")\n parser.add_argument(\"--single\", action='store_true')\n\n parser.set_defaults(progress_bar_refresh_rate=5,\n gpus='0,1,2',\n max_epochs=50,\n overfit_pct=0.01,\n profiler=True,\n weights_summary='full',\n logger=False,\n distributed_backend=\"dp\")\n args = parser.parse_args()\n\n # ------------------------\n # 1 INIT LIGHTNING MODEL\n # ------------------------\n model = FastNeuralStyleSystem(args)\n if args.checkpoint is not '':\n print(f'loading checkpoint: {args.checkpoint}')\n FastNeuralStyleSystem.load_from_checkpoint(args.checkpoint)\n print(model.hparams)\n if args.single:\n print('single image optimize')\n model.to('cuda')\n model.prepare_data()\n model.optimize()\n print('Done single image')\n return\n # ------------------------\n # 2 INIT TRAINER\n # ------------------------\n trainer = pl.Trainer.from_argparse_args(args)\n trainer.checkpoint_callback = pl.callbacks.ModelCheckpoint(\n filepath='./trained_models',\n save_top_k=2,\n verbose=True,\n monitor='train_loss',\n mode='min',\n prefix=args.style_image\n )\n # ------------------------\n # 3 START TRAINING\n # ------------------------\n trainer.fit(model)\n\n import glob\n saved_images = glob.glob(\n f\"{args.output_dir}/{args.style_image}_steps_c_{args.content_weight}_s_{args.style_weight}/*png\")\n gif_images = []\n for step_img in saved_images:\n gif_images.append(imageio.imread(step_img))\n imageio.mimsave(os.path.join(temp_dir, '0_optimization.gif'), gif_images)", "def detect(img, template):\r\n\r\n #detect threshold\r\n args = parse_args()\r\n threshold=dictornary(args)\r\n\r\n # detect edges of image\r\n \"\"\"prewitt_x = [[1, 0, -1]] * 3\r\n prewitt_y = [[1] * 3, [0] * 3, [-1] * 3]\r\n img_x = task1.detect_edges(img, prewitt_x, False)\r\n img_y = task1.detect_edges(img, prewitt_y, False)\r\n img_norm = task1.edge_magnitude(img_x, img_y)\r\n\r\n task1.write_image(task1.normalize(img_norm), \".//img_norm.jpg\")\r\n\r\n # detect edges in template\r\n\r\n temp_x = task1.detect_edges(template, prewitt_x, False)\r\n temp_y = task1.detect_edges(template, prewitt_y, False)\r\n template_norm = task1.edge_magnitude(temp_x, temp_y)\r\n\r\n task1.write_image(task1.normalize(template_norm), \".//template_norm.jpg\") \"\"\"\r\n\r\n img_norm = task1.normalize(img)\r\n template_norm = task1.normalize(template)\r\n\r\n coordinates = []\r\n temp_h = len(template_norm)\r\n temp_w = len(template_norm[0])\r\n\r\n rows = len(img_norm)\r\n cols = len(img_norm[0])\r\n\r\n output = [[0 for x in range(len(img_norm[0]))] for y in range(len(img_norm))]\r\n cropped_img = [[0 for x in range(temp_w)] for y in range(temp_h)]\r\n\r\n for i in range(rows):\r\n for j in range(cols):\r\n\r\n if ((i +temp_h) < rows and (j + temp_w < cols)):\r\n cropped_img = utils.crop(img_norm, i, i + temp_h, j, j + temp_w)\r\n\r\n\r\n img_mul_temp = utils.elementwise_mul(cropped_img, template_norm)\r\n sum = 0\r\n # sum of every elemnet in img_mul_temp\r\n for p in range(temp_h):\r\n for q in range(temp_w):\r\n sum += img_mul_temp[p][q]\r\n\r\n # squaring every element in denominator of image\r\n square_img = utils.elementwise_mul(cropped_img, cropped_img)\r\n numsum_img = 0\r\n for d in range(len(cropped_img)):\r\n for e in range(len(cropped_img[0])):\r\n numsum_img += square_img[d][e]\r\n\r\n # squaring every element in denominator of template\r\n square_temp = utils.elementwise_mul(template_norm, template_norm)\r\n numsum_temp = 0\r\n for k in range(temp_h):\r\n for l in range(temp_w):\r\n numsum_temp += square_temp[k][l]\r\n\r\n denominator = np.sqrt((numsum_img * numsum_temp))\r\n\r\n if (denominator != 0):\r\n output[i][j] = (sum / denominator)\r\n if (output[i][j] > threshold):\r\n coordinates.append([i, j])\r\n\r\n # TODO: implement this function.\r\n # raise NotImplementedError\r\n return coordinates", "def test_empty_img():\n assert detected_boxes[-1] == ground_truth_boxes[-1]", "def act(self, x: np.ndarray, t: int = None, noise: np.ndarray = None) -> np.ndarray:", "def main_ededge(dataset):\n Application.set_input_image_folder('TestData/BSR/BSDS500/data/images/' + dataset)\n\n # Application.delete_folder_appl_out()\n # Benchmarking.delete_folder_benchmark_out()\n\n Application.do_get_image_job(port_output_name='RAW')\n Application.do_grayscale_transform_job(port_input_name='RAW', port_output_name='GRAY_RAW')\n blur = Application.do_gaussian_blur_image_job(port_input_name='GRAY_RAW', sigma=0, kernel_size=9)\n\n list_to_eval_edge = []\n\n first_order_edge = [\n CONFIG.FILTERS.PIXEL_DIFF_3x3, CONFIG.FILTERS.PIXEL_DIFF_SEPARATED_3x3\n , CONFIG.FILTERS.PIXEL_DIFF_SEPARATED_5x5, CONFIG.FILTERS.PIXEL_DIFF_SEPARATED_7x7\n , CONFIG.FILTERS.PIXEL_DIFF_5x5, CONFIG.FILTERS.PIXEL_DIFF_7x7\n\n , CONFIG.FILTERS.SOBEL_3x3, CONFIG.FILTERS.SOBEL_5x5, CONFIG.FILTERS.SOBEL_7x7\n , CONFIG.FILTERS.SOBEL_DILATED_5x5, CONFIG.FILTERS.SOBEL_DILATED_7x7\n\n , CONFIG.FILTERS.PREWITT_3x3, CONFIG.FILTERS.PREWITT_5x5, CONFIG.FILTERS.PREWITT_7x7\n , CONFIG.FILTERS.PREWITT_DILATED_5x5, CONFIG.FILTERS.PREWITT_DILATED_7x7\n\n , CONFIG.FILTERS.KIRSCH_3x3, CONFIG.FILTERS.KIRSCH_5x5\n , CONFIG.FILTERS.KIRSCH_DILATED_5x5, CONFIG.FILTERS.KIRSCH_DILATED_7x7\n\n , CONFIG.FILTERS.KITCHEN_MALIN_3x3\n , CONFIG.FILTERS.KITCHEN_MALIN_DILATED_5x5, CONFIG.FILTERS.KITCHEN_MALIN_DILATED_7x7\n\n , CONFIG.FILTERS.KAYYALI_3x3\n , CONFIG.FILTERS.KAYYALI_DILATED_5x5, CONFIG.FILTERS.KAYYALI_DILATED_7x7\n\n , CONFIG.FILTERS.SCHARR_3x3, CONFIG.FILTERS.SCHARR_5x5\n , CONFIG.FILTERS.SCHARR_DILATED_5x5, CONFIG.FILTERS.SCHARR_DILATED_7x7\n\n , CONFIG.FILTERS.KROON_3x3\n , CONFIG.FILTERS.KROON_DILATED_5x5, CONFIG.FILTERS.KROON_DILATED_7x7\n\n , CONFIG.FILTERS.ORHEI_3x3, CONFIG.FILTERS.ORHEI_B_5x5\n , CONFIG.FILTERS.ORHEI_DILATED_5x5, CONFIG.FILTERS.ORHEI_DILATED_7x7\n ]\n\n for edge in first_order_edge:\n for gr_thr in [50]:\n for anc_thr in [10]:\n e1, e2, = Application.do_edge_drawing_mod_job(port_input_name=blur, operator=edge,\n gradient_thr=gr_thr, anchor_thr=anc_thr, scan_interval=1,\n max_edges=100, max_points_edge=100)\n list_to_eval_edge.append(e1 + '_L0')\n\n Application.create_config_file(verbose=False)\n Application.configure_save_pictures(job_name_in_port=False, ports_to_save='ALL')\n # Application.configure_show_pictures(ports_to_show=list_to_save, time_to_show=200)\n\n # Application.run_application()\n\n # Do bsds benchmarking\n # Be ware not to activate job_name_in_port in Application.configure_save_pictures\n # Benchmarking.run_bsds500_boundary_benchmark(input_location='Logs/application_results',\n # gt_location='TestData/BSR/BSDS500/data/groundTruth/' + dataset,\n # raw_image='TestData/BSR/BSDS500/data/images/' + dataset,\n # jobs_set=list_to_eval_edge, do_thinning=False)\n\n Utils.plot_first_cpm_results(prefix='EDGE_DRAWING_MOD_', level='L0', order_by='f1', name='ed_results',\n list_of_data=list_to_eval_edge, number_of_series=50,\n inputs=[''], self_contained_list=True, set_legend_left=False,\n suffix_to_cut_legend='_S_0_GRAY_RAW_L0',\n replace_list=[('EDGE_DRAWING_MOD_THR_50_ANC_THR_10_SCAN_1_', ''),\n ('SEPARATED_PIXEL_DIFFERENCE_', 'Separated Px Dif '),\n ('PIXEL_DIFFERENCE_', 'Pixel Dif '),\n ('PREWITT_', 'Prewitt '), ('KIRSCH_', 'Kirsch '), ('SOBEL_', 'Sobel '),\n ('SCHARR_', 'Scharr '), ('KROON_', 'Kroon '), ('ORHEI_V1_', 'Orhei '),\n ('ORHEI_', 'Orhei '),\n ('KITCHEN_', 'Kitchen '), ('KAYYALI_', 'Kayyali '),\n ('DILATED_', 'dilated '),\n ('_GAUSS_BLUR_K_9', '')],\n save_plot=True, show_plot=False, set_all_to_legend=False)\n\n # Utils.create_latex_cpm_table_list()\n\n Utils.close_files()", "def imageprepare(self, argv):\n im = Image.open(argv).convert('L')\n img = im.resize((28, 28), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n tv = list(img.getdata()) \n \n # normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\n tva = [(255 - x) * 1.0 / 255.0 for x in tv]\n return tva", "def test_without_target(self, X, y):\n try:\n resize_batch(X, y, 1.0, 'crop', resize_targets=False)\n except:\n pytest.fail('apply_progressive_resizing failed with y == None')", "def run(self):\n self.run_tasks()\n self.images = np.array(self.images)\n self.shapes.extend(self.images.shape[-2:])\n\n self.images = np.reshape(self.images, self.shapes)", "def propagateImage(self, dryrun):\n pass", "def process(image):\n pass", "def main():\n base_dir = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n os.pardir,\n )\n default_output_path = os.path.join(base_dir, \"output\", \"out.png\")\n default_texture_path = os.path.join(base_dir, \"textures\", \"grid.png\")\n\n default_options = {\n \"resolution\": (1512, 762),\n \"texture_path\": default_texture_path,\n \"output_path\": default_output_path,\n \"iterations\": 200, # Increase this for good results\n \"camera_position\": [3.1, 1.570796, 0.],\n \"num_processes\": multi.cpu_count(),\n \"chunk_size\": 9000,\n \"gain\": 1,\n \"normalize\": 0,\n \"spin\": 0.7,\n }\n args = parse_args(default_options)\n\n output_path = os.path.dirname(args.output_path)\n if not os.path.exists(output_path):\n print(\"Error: Output path does not exist at:\")\n print(args.output_path)\n print(\"Create the directory or change the path then try again.\")\n print_help_and_exit()\n\n\n try:\n texture = spm.imread(args.texture_path)\n except FileNotFoundError as error:\n print(error)\n print(\"Error: Texture file not found at:\")\n print(args.texture_path)\n print_help_and_exit()\n\n # Convert to float to work in linear colour space\n texture = convert_image_to_float(texture)\n if not args.no_srgb:\n # Convert to sRGB before resizing for correct results\n srgbtorgb(texture)\n\n texture = convert_image_to_float(\n spm.imresize(texture, 2.0, interp=\"bicubic\"),\n )\n\n black_hole = KerrBlackHole(args.spin)\n raytracer = KerrRaytracer(\n black_hole,\n args.camera_position,\n texture,\n args.resolution,\n args.iterations,\n args.num_processes,\n args.chunk_size,\n shuffle=not args.disable_shuffle,\n )\n raytracer.generate_image()\n print(\"Raytracing Completed Succesfully.\")\n print(\n \"Total raytracing time:\",\n datetime.timedelta(seconds=(time.time() - raytracer.start_time)),\n )\n\n colour = post_process(raytracer.colour_buffer_preproc, args.gain, args.normalize)\n\n save_to_img(\n colour,\n args.output_path,\n args.resolution,\n srgb_out=not args.no_srgb,\n )", "def evaluate(t, x, y):\n from PIL import Image\n im = Image.open(filename)\n duration = im.info[\"duration\"]*pq.ms if im.info[\"duration\"] is not 0 else 30*pq.ms\n\n Nt = t.shape[0]\n Nx = x.shape[2]\n Ny = y.shape[1]\n\n stim = np.zeros([Nt, Ny, Nx])\n t_map = (t.flatten().rescale(\"ms\") / duration).astype(int)\n t_map = t_map[1:] - t_map[:-1]\n for i, ti in enumerate(t_map):\n try:\n im.seek(im.tell()+ti)\n except EOFError:\n break\n frame = im.convert(\"L\").transpose(Image.FLIP_TOP_BOTTOM).resize((Ny, Nx))\n stim[i, :, :] = np.array(frame)\n stim[i, :, :] = 2 * ((stim[i, :, :] - stim[i, :, :].min()) / (stim[i, :, :].max() - stim[i, :, :].min())) - 1\n\n return stim", "def main():\n original = SimpleImage('images/mt-rainier.jpg')\n original.show()\n reflected = make_reflected('images/mt-rainier.jpg')\n reflected.show()", "def main():\n\n os.system(\"rm -rf images; mkdir images\")\n\n if (len(sys.argv) > 1):\n N = int(sys.argv[1])\n else:\n N = 10\n\n x_test = np.load(\"../../../../data/mnist/mnist_test_images.npy\")\n\n for i in range(N):\n r,c = random.randint(6,12), random.randint(6,12)\n g = np.zeros(r*c)\n for j in range(r*c):\n if (random.random() < 0.15):\n g[j] = 1\n g = g.reshape((r,c))\n g[:,0] = g[0,:] = g[:,-1] = g[-1,:] = 0\n\n img = np.zeros((28*r,28*c), dtype=\"uint8\")\n for x in range(r):\n for y in range(c):\n if (g[x,y] == 1):\n n = random.randint(0, x_test.shape[0])\n im = x_test[n]\n img[28*x:(28*x+28), 28*y:(28*y+28)] = im\n \n Image.fromarray(img).save(\"images/image_%04d.png\" % i)", "def __call__(self, img: torch.Tensor) -> torch.Tensor:\n return self._trafo(img)", "def sample_damaging(image):\r\n return crease_image(blotch_image(image, 100, True), 10, False)", "def test_one_image(self, img):\n return self.__image_pipeline(img)", "def runauto(self, istart, nrows, rstep):\n self.ImageSolution=self.arcdisplay.autoidentify(istart=istart, nrows=nrows, rstep=rstep, oneline=False)", "def test_image_task_early_fusion(self):\n args = BASE_ARGS.copy()\n args.update(IMAGE_ARGS)\n args.update(EARLY_FUSION_ARGS)\n\n valid, test = testing_utils.train_model(args)\n self.assertLessEqual(\n valid['ppl'], 8.6, 'failed to train image_seq2seq on image task'\n )", "def imageprepare(filename):\n img = Image.open(filename).convert('L')\n rect = img.getbbox()\n im = img.crop(rect)\n im.save(filename + '_pressprocessed.png')\n\n width = float(im.size[0])\n height = float(im.size[1])\n newImage = Image.new('L', (28, 28), (0)) #creates white canvas of 28x28 pixels\n if width > height: #check which dimension is bigger\n #Width is bigger. Width becomes 20 pixels.\n nheight = int(round((20.0/width*height),0)) #resize height according to ratio width\n if (nheight == 0): #rare case but minimum is 1 pixel\n nheight = 1\n # resize and sharpen\n img = im.resize((20,nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n wtop = int(round(((28 - nheight)/2),0)) #caculate horizontal pozition\n newImage.paste(img, (4, wtop)) #paste resized image on white canvas\n newImage.save(filename + '_final.png')\n else:\n #Height is bigger. Heigth becomes 20 pixels. \n nwidth = int(round((20.0/height*width),0)) #resize width according to ratio height\n if (nwidth == 0): #rare case but minimum is 1 pixel\n nwidth = 1\n # resize and sharpen\n img = im.resize((nwidth,20), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n wleft = int(round(((28 - nwidth)/2),0)) #caculate vertical pozition\n newImage.paste(img, (wleft, 4)) #paste resized image on white canvas\n newImage.save(filename + '_final.png')\n tv = list(newImage.getdata()) #get pixel values\n #normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\n tva = [ (x)*1.0/255.0 for x in tv] \n return tva", "def __call__(self, src, label):\n\n h, w, _ = src.shape\n # interp = np.random.randint(0, 5)\n img = timage.resize_short_within(src, self._short, self._max_size, interp=1)\n img, flips = timage.random_flip(img, px=0.5)\n img = img.astype(np.float32)\n\n if self.teacher_aug:\n target_image_1 = self.random_color_aug(img)\n else:\n target_image_1 = img\n target_image_2 = self.random_color_aug(img)\n\n # target_image_1 = mx.nd.image.to_tensor(target_image_1)\n target_image_1 = mx.nd.image.to_tensor(target_image_1)\n target_image_1 = mx.nd.image.normalize(target_image_1, mean=self._mean, std=self._std)\n\n target_image_2 = mx.nd.image.to_tensor(target_image_2)\n target_image_2 = mx.nd.image.normalize(target_image_2, mean=self._mean, std=self._std)\n\n return target_image_1, target_image_2", "def resetTransformations():\n dislin.trfres()", "def process_image(self):\n pass", "def run(self, image):\n start = datetime.datetime.now()\n\n width, height = image.size\n resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)\n target_size = (int(resize_ratio * width), int(resize_ratio * height))\n resized_image = image.convert('RGB').resize(target_size, Image.ANTIALIAS)\n batch_seg_map = self.sess.run(\n self.OUTPUT_TENSOR_NAME,\n feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]})\n seg_map = batch_seg_map[0]\n\n end = datetime.datetime.now()\n\n diff = end - start\n print(\"Time taken to evaluate segmentation is : \" + str(diff))\n\n return resized_image, seg_map", "def main() -> None:\n\n # Define file name\n file_name = define_file()\n\n # Open chosen image\n img = image.load_img(IMAGES + file_name, color_mode='grayscale')\n\n # Show user image\n plt.imshow(img)\n plt.show()\n\n # Convert image to array\n img_arr = image.img_to_array(img)\n img_arr = np.array([img_arr])\n img_arr = img_arr.astype(\"float32\") / 255.0\n\n # Classify image\n img_class = classification(img_arr)\n\n # Suggest user add noise to original image\n if img_class == ORIGINAL:\n while True:\n command = input('Seems like your image is original. Do you want to add noise? y/n: ')\n if command.strip().lower() in ('y', 'yes'):\n noisy_array = noise(img_arr)\n display(img_arr, noisy_array)\n img = image.array_to_img(noisy_array[0])\n img.save(IMAGES + file_name[:-4] + '_noise' + file_name[-4:])\n main()\n elif command.strip().lower() in ('n', 'no'):\n main()\n else:\n continue\n\n # Suggest user remove noise from noised image\n elif img_class == NOISED:\n while True:\n command = input('Seems like your image has noise. Do you want to remove noise? y/n: ')\n if command.strip().lower() in ('y', 'yes'):\n denoise_array = denoise_image(img_arr)\n display(img_arr, denoise_array)\n img = image.array_to_img(denoise_array[0])\n img.save(IMAGES + file_name[:-4] + '_denoised' + file_name[-4:])\n main()\n elif command.strip().lower() in ('n', 'no'):\n main()\n else:\n continue\n\n # Image denoised. Nothing to do\n else:\n print('Seems like your image denoised.')\n main()", "def imageprepare(argv):\r\n im = Image.open(argv).convert('L')\r\n width = float(im.size[0])\r\n height = float(im.size[1])\r\n newImage = Image.new('L', (28, 28), (255)) #creates white canvas of 28x28 pixels\r\n \r\n if width > height: #check which dimension is bigger\r\n #Width is bigger. Width becomes 20 pixels.\r\n nheight = int(round((20.0/width*height),0)) #resize height according to ratio width\r\n if (nheight == 0): #rare case but minimum is 1 pixel\r\n nheight = 1 \r\n # resize and sharpen\r\n img = im.resize((20,nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\r\n wtop = int(round(((28 - nheight)/2),0)) #caculate horizontal pozition\r\n newImage.paste(img, (4, wtop)) #paste resized image on white canvas\r\n else:\r\n #Height is bigger. Heigth becomes 20 pixels. \r\n nwidth = int(round((20.0/height*width),0)) #resize width according to ratio height\r\n if (nwidth == 0): #rare case but minimum is 1 pixel\r\n nwidth = 1\r\n # resize and sharpen\r\n img = im.resize((nwidth,20), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\r\n wleft = int(round(((28 - nwidth)/2),0)) #caculate vertical pozition\r\n newImage.paste(img, (wleft, 4)) #paste resized image on white canvas\r\n \r\n #newImage.save(\"sample.png\")\r\n\r\n tv = list(newImage.getdata()) #get pixel values\r\n \r\n #normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\r\n tva = [ (255-x)*1.0/255.0 for x in tv] \r\n #print(tva)\r\n return tva", "def demo(image, model_class, do_add_noise=True):\n Log.enable_output = True\n Log.set_log_max_depth(8)\n\n image = normalise(image)\n image = numpy.expand_dims(image, axis=0)\n image = numpy.expand_dims(image, axis=0)\n noisy = add_noise(image) if do_add_noise else image\n print(noisy.shape)\n\n # noisy = models.tensor(noisy)\n image = torch.tensor(image)\n\n model = model_class(\n nb_unet_levels=2,\n spacetime_ndim=2,\n )\n\n print(\"training starts\")\n\n start = time.time()\n n2t_train(noisy, model, nb_epochs=128)\n stop = time.time()\n print(f\"Training: elapsed time: {stop - start} \")\n\n noisy = torch.tensor(noisy)\n model.eval()\n model = model.cpu()\n print(f\"noisy tensor shape: {noisy.shape}\")\n # in case of batching we have to do this:\n start = time.time()\n denoised = model(noisy)\n stop = time.time()\n print(f\"inference: elapsed time: {stop - start} \")\n\n noisy = noisy.detach().numpy()[0, 0, :, :]\n image = image.detach().numpy()[0, 0, :, :]\n denoised = denoised.detach().numpy()[0, 0, :, :]\n\n image = numpy.clip(image, 0, 1)\n noisy = numpy.clip(noisy, 0, 1)\n denoised = numpy.clip(denoised, 0, 1)\n\n return calculate_print_psnr_ssim(image, noisy, denoised)\n\n # import napari\n #\n # viewer = napari.Viewer() # no prior setup needed\n # viewer.add_image(image, name='image')\n # viewer.add_image(noisy, name='noisy')\n # viewer.add_image(denoised, name='denoised')\n # napari.run()", "def applyARUNet(self):\n assert(self.files and len(self.files) > 0)\n\n # TODO: move this to the init method\n session_conf = tf.ConfigProto()\n session_conf.gpu_options.visible_device_list = self.gpu_device\n pred = None\n with tf.Session(graph=self.graph, config=session_conf) as sess:\n x = self.graph.get_tensor_by_name('inImg:0')\n predictor = self.graph.get_tensor_by_name('output:0')\n \n progress = getProgressBar()\n for i in progress(range(len(self.files))):\n# print(self.files[i])\n # img: numpy array (height x width x channels)\n # scipy's misc.imread is deprecated\n # TODO: switch maybe to opencv instead of pillow with its image\n # class overhead\n pil_img = Image.open(self.files[i]).convert('L') # grayscale\n img = np.array(pil_img)\n size = (int(img.shape[1]*self.scale),int(img.shape[0]*self.scale))\n small = np.array(pil_img.resize(size, resample=Image.BICUBIC))\n origsize = (img.shape[1],img.shape[0]) \n\n # TODO: can we actually put them in 1 or 2 batches?\n pred1 = self.runSession(sess, x, predictor, small)\n out = self.pruneBaselines(pred1, size=origsize)\n\n # try other orientation\n # TODO: think of a better check! \n # this one depends on the resolution and due to noise might\n # still pass...\n if self.test_orientation and \\\n np.count_nonzero(out) < 100: \n print('WARNING: no baseline found for img:'\n ' {}'.format(self.files[i]))\n print('rotate it now and try again...')\n # rotate 90 degree counter clock-wise\n small2 = rotate(small, 90, True)\n pred2 = self.runSession(sess, x, predictor, small2) \n origsize = (origsize[1],origsize[0])\n out2 = self.pruneBaselines(pred2, size=origsize)\n # check which direction has higher probability\n # Note: unfortunately the probas are similar high for 0 + 180,\n # as well as 90 and 270 degree, so we cannot test for these\n # orientations!\n # Note 2: raw probability map didnt work out for me, so lets do\n # it that way\n n_comp, _, stats1, _ =\\\n cv2.connectedComponentsWithStats(out.astype(np.uint8))\n n_comp2, _, stats2, _ =\\\n cv2.connectedComponentsWithStats(out2.astype(np.uint8))\n # test for area, assumption is that we get larger\n # mean/median/sum area if it's correctly rotated\n # TODO: might still be a bad test due to noise...\n stat1 = np.sum(stats1[1:,cv2.CC_STAT_AREA])\n stat2 = np.sum(stats2[1:,cv2.CC_STAT_AREA])\n if stat2 > stat1: \n print('rotation by 90 degree counter clockwise gives higher'\n ' probability (orig {} vs rot: {}) for file {}\\n'\n ' -> rotate this file (90 degree'\n ' counter clock-wise), too!'.format(stat1, stat2, self.files[i]))\n out = out2\n \n # small check if we have found a line at all\n if np.count_nonzero(out) < 50: # TODO: think of a better check\n print('WARNING: no baseline found for img:'\n ' {}'.format(self.files[i]))\n\n if self.to_line:\n out = baseToLine(out)\n\n # save it\n name = os.path.splitext(os.path.basename(self.files[i]))[0]\n suffix = self.out_suffix if self.out_suffix else ''\n path = os.path.join(self.outdir, '{}{}.png'.format(name,suffix))\n# print('save to: {}'.format(path))\n out = out * 255\n misc.imsave(path, out)\n \n return pred", "def demo(image, name):\n\n # Log.set_log_max_depth(5)\n\n image = normalise(image.astype(np.float32))\n # noisy = add_noise(image, intensity=None, variance=0.1, sap=0, clip=False)\n noisy = random_noise(image, mode=\"gaussian\", var=0.1, seed=0, clip=False)\n noisier = random_noise(noisy, mode=\"gaussian\", var=0.1, seed=100, clip=False)\n\n generator = StandardFeatureGenerator(\n include_corner_features=True,\n include_scale_one=True,\n include_fine_features=True,\n include_spatial_features=True,\n )\n regressor = CBRegressor(\n patience=16, loss='l1', learning_rate=0.002, max_num_estimators=4096\n )\n\n it = ImageTranslatorFGR(feature_generator=generator, regressor=regressor)\n\n it.train(noisy, noisy, jinv=True)\n n2s_denoised = it.translate(noisy)\n\n it.exclude_center_feature = False\n it.train(noisier, noisy, jinv=False)\n denoised = it.translate(noisy)\n denoised_corrected = 2 * denoised - noisy\n\n # denoised2 = it.translate(it.translate(it.translate(denoised)))\n denoised2 = it.translate(denoised)\n\n image = numpy.clip(image, 0, 1)\n noisy = numpy.clip(noisy, 0, 1)\n n2s_denoised = numpy.clip(n2s_denoised, 0, 1)\n denoised_corrected = numpy.clip(denoised_corrected, 0, 1)\n denoised2 = numpy.clip(denoised2, 0, 1)\n\n psnr_noisy = psnr(image, noisy)\n ssim_noisy = ssim(image, noisy)\n\n psnr_n2s_denoised = psnr(image, n2s_denoised)\n ssim_n2s_denoised = ssim(image, n2s_denoised)\n\n psnr_denoised = psnr(image, denoised)\n ssim_denoised = ssim(image, denoised)\n\n psnr_denoised_corrected = psnr(image, denoised_corrected)\n ssim_denoised_corrected = ssim(image, denoised_corrected)\n\n psnr_denoised2 = psnr(image, denoised2)\n ssim_denoised2 = ssim(image, denoised2)\n\n print(\"noisy :\", psnr_noisy, ssim_noisy)\n print(\n \"denoised (classic_denoisers) :\",\n psnr_n2s_denoised,\n ssim_n2s_denoised,\n )\n print(\"denoised (noiser2noise) :\", psnr_denoised, ssim_denoised)\n print(\n \"denoised (noiser2noise corrected) :\",\n psnr_denoised_corrected,\n ssim_denoised_corrected,\n )\n print(\"denoised (x2) :\", psnr_denoised2, ssim_denoised2)\n\n Log.enable_output = False\n denoised_images = []\n for i in range(1, 32):\n psnr_denoised = psnr(image, numpy.clip(denoised, 0, 1))\n ssim_denoised = ssim(image, numpy.clip(denoised, 0, 1))\n psnr_sslos = psnr(numpy.clip(n2s_denoised, 0, 1), numpy.clip(denoised, 0, 1))\n ssim_sslos = ssim(numpy.clip(n2s_denoised, 0, 1), numpy.clip(denoised, 0, 1))\n print(f\"x{i} :\", psnr_sslos, ssim_sslos, psnr_denoised, ssim_denoised)\n\n denoised_images.append(numpy.clip(denoised, 0, 1))\n denoised = it.translate(denoised)\n\n import napari\n\n with napari.gui_qt():\n viewer = napari.Viewer()\n viewer.add_image(image, name='image')\n viewer.add_image(noisy, name='noisy')\n viewer.add_image(noisier, name='noisier')\n viewer.add_image(n2s_denoised, name='denoised (classic_denoisers)')\n viewer.add_image(denoised, name='denoised (noiser3noise)')\n viewer.add_image(denoised_corrected, name='denoised (noiser3noise corrected)')\n viewer.add_image(numpy.stack(denoised_images), name=f'denoised images')", "def run_image(image_path, lattice_size=35):\n im = plt.imread(image_path)[:, :, 2]\n im_pixels = _pixels(im)\n\n print('compression ratio is ', lattice_size**2 / float(im.size))\n\n # Hyperparameters.\n num_keypoints = 2\n hparams = tfl.CalibratedRtlHParams(\n num_keypoints=num_keypoints,\n num_lattices=1,\n lattice_rank=2,\n learning_rate=0.003,\n lattice_size=lattice_size)\n\n # Estimator.\n # input: coordinate of the pixel\n # output: value of the pixel\n feature_columns = [\n tf.feature_column.numeric_column('pixel_x'),\n tf.feature_column.numeric_column('pixel_y'),\n ]\n\n def keypoints_initializers():\n return tfl.uniform_keypoints_for_signal(\n num_keypoints,\n input_min=0.0,\n input_max=im_pixels.max(),\n output_min=0.0,\n output_max=lattice_size - 1\n )\n rtl_estimator = tfl.calibrated_rtl_regressor(\n feature_columns=feature_columns,\n hparams=hparams,\n keypoints_initializers_fn=keypoints_initializers\n )\n\n # Example input function.\n input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(\n x={\n 'pixel_x': im_pixels[:, 0],\n 'pixel_y': im_pixels[:, 1]\n },\n y=im_pixels[:, 2],\n batch_size=5000,\n num_epochs=15,\n shuffle=True)\n\n # Train!\n rtl_estimator.train(input_fn=input_fn)\n\n # Evaluate!\n eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(\n x={\n 'pixel_x': im_pixels[:, 0],\n 'pixel_y': im_pixels[:, 1]\n },\n y=im_pixels[:, 2],\n batch_size=5000,\n num_epochs=1,\n shuffle=True)\n print(rtl_estimator.evaluate(input_fn=eval_input_fn))\n\n return rtl_estimator", "def applyMorphologicalCleaning(self, image):", "def __call__(self, images, targets):\n pass", "def test_scrubbing_wf_no_insert_na(\n artifact_dir, sample_raw_image, plot_img, request, helpers\n):\n\n test_path = helpers.create_test_dir(artifact_dir, request.node.name)\n scrubbed_path = test_path / \"scrubbed.nii.gz\"\n\n scrub_vector = [0, 1, 0, 0, 0, 0, 1, 0, 0, 0]\n\n wf = build_scrubbing_workflow(\n scrub_vector,\n import_path=sample_raw_image,\n insert_na=False,\n export_path=scrubbed_path,\n base_dir=test_path,\n crashdump_dir=test_path,\n )\n\n wf.write_graph(dotfilename=test_path / \"scrubbed_flow\", graph2use=\"colored\")\n\n wf.run()\n\n helpers.plot_timeseries(scrubbed_path, sample_raw_image)\n\n if plot_img:\n helpers.plot_4D_img_slice(scrubbed_path, \"scrubbed.png\")", "def evaluate(t, x, y):\n # TODO: fix normalization\n from PIL import Image\n\n Nt = t.shape[0]\n Nx = x.shape[2]\n Ny = y.shape[1]\n stim = np.zeros([Nt, Nx, Ny])\n\n for i, filename in enumerate(filenames):\n im = Image.open(filename).convert(\"L\").transpose(Image.FLIP_TOP_BOTTOM)\n t_start = delay + i * (delay + duration)\n t_stop = (i+1) * (duration + delay)\n stim += np.array(im.resize((Ny, Nx))) * (heaviside(t - t_start) - heaviside(t - t_stop))\n\n if stim.max() - stim.min() != 0:\n stim = 2 * ((stim - stim.min()) / (stim.max() - stim.min())) - 1\n return stim", "def main():\n\n # first figure: betas for each predictor\n fig, axes = plt.subplots(figsize=(8, 18), nrows=3)\n\n image_paths = {\n \"3T\": (\n f\"{PATHS['figures_misc']}/figure_3_images/\"\n f\"C1051_20161006_childVSall_depth_1.png\"\n ),\n \"7T\": (\n f\"{PATHS['figures_misc']}/figure_3_images/\"\n f\"C1051_20160212_childVSall_depth_1_sim2pt4.png\"\n ),\n \"7T_noise\": (\n f\"{PATHS['figures_misc']}/figure_3_images/\"\n f\"C1051_20160212_childVSall_depth_1_sim2pt4_noise.png\"\n ),\n }\n\n for ax, (_, image_path) in zip(axes, image_paths.items()):\n assert os.path.isfile(image_path)\n img = imread(image_path)\n img[np.where(np.sum(img, axis=2) == 0.0)] = 1.0\n\n ax.imshow(img[200:-100, 50:-50, :])\n ax.axis(\"off\")\n\n savefig(f\"{PATHS['figures']}/figure_3ac.png\")\n plt.close(fig)", "def testDetect(name = \"smokey.gif\", amount = 20):\n image = Image(name)\n print(\"Close the image window to see the transformation\")\n image.draw()\n image2 = detectEdges(image, amount)\n image2.draw()", "def testTinttsysNNSp(self):\n self._runTest('tinttsys', True, self.tsys_funcs.keys(), 'nearest,nearest')", "def main_proc(self, ds=5.0):\n\n # Preprocessing\n # downsampling edge pixels\n pcd_t_ds = self.pcd_t.voxel_down_sample(voxel_size=ds)\n pcd_t_ds, center_t = centering(pcd_t_ds)\n self.result_id = 0\n reg_trans = None\n\n self.pcd_registrated = list() # results of ICP\n for i in range(len(self.pcd_s)):\n self.pcd_s[i].paint_uniform_color([0.0, 0.0, 1.0])\n pcd_s_ds = self.pcd_s[i].voxel_down_sample(voxel_size=ds)\n\n pcd_s_ds, center_s = centering(pcd_s_ds)\n ts_c = np.identity(4)\n ts_c[:3, 3] = -center_s\n tt_c = np.identity(4)\n tt_c[:3, 3] = center_t\n\n # Registration by ICP algorithm\n reg = ICPRegistration(pcd_s_ds, pcd_t_ds)\n reg.set_distance_tolerance(ds * 0.5)\n mse, rt = reg.registration()\n if mse < self.mse:\n self.result_id = i\n print(\"Init:\", self.initial_angles[i], self.mse, \"==>\", mse)\n self.mse = mse\n reg_trans = rt\n TT = np.dot(reg_trans, ts_c)\n self.trans_final = np.dot(tt_c, TT)\n\n # check transformation progress\n \"\"\"\n hoge = copy.deepcopy(pcd_s_ds)\n hoge.paint_uniform_color([1,0,0])\n mesh_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=100., origin=[0.0,0.0,0.0])\n o3d.visualization.draw_geometries( [mesh_frame,hoge, pcd_t_ds], width=640, height=500)\n hoge.transform( rt )\n o3d.visualization.draw_geometries( [mesh_frame,hoge, pcd_t_ds], width=640, height=500)\n \"\"\"\n\n self.pcds = reg.pcds\n self.d = reg.d\n # Get registration result\n # translation[x,y] and rotation\n _, _, rotate = mat2rpy(self.trans_final)\n print(\"Initial angle is:\", self.initial_angles[self.result_id])\n rotate = np.radians(self.initial_angles[self.result_id]) + rotate\n translation = self.trans_final[:2, 3]\n\n # Choose the direction that results in the smaller rotation\n if rotate > tau:\n rotate -= tau\n elif rotate < 0:\n rotate += tau\n\n self.rotate = rotate\n return self.rotate, translation, self.mse", "def test(one=True, training=True, detector_debug=False):\n total = 0\n imgs = []\n if one:\n\n print(\"Detecting:\")\n file_sign = \"./Data/Reglamentarias/STC-RG-3.jpg\"\n sign = cv2.imread(file_sign, 1)\n d = Detector(sign, show=True, debug=detector_debug)\n s, th = d.detect()\n seg = Segmenter(s)\n\n seg.keypoints()\n seg.descriptors()\n res = np.concatenate((seg.origi, seg.th, seg.img, seg.kpimg), axis=1)\n cv2.imshow(\"res\", res)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n if training:\n print (\"Training\")\n for imagePath in paths.list_images(\"./training/PV/\"):\n print (imagePath)\n sign = cv2.imread(imagePath, 1)\n\n seg = Segmenter(sign)\n seg.watershed()\n seg.keypoints()\n res = np.concatenate((seg.origi, seg.th, seg.img, seg.kpimg), axis=1)\n cv2.imshow(\"res\", res)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n else:\n if not one:\n for i in range(1, 90):\n # file = \"./Data/Preventivas/STC-PV-\"+str(i)+\".jpg\"\n file_sign = \"./Data/Reglamentarias/STC-RG-\" + str(i) + \".jpg\"\n # file = \"./Data/Mixtas/STC-MX-\"+ str(i) +\".jpg\"\n sign = cv2.imread(file_sign, 1)\n d = Detector(sign, show=False)\n s, th = d.detect()\n if s is not None:\n total += 1\n imgs.append((i, s, th))\n\n print (\"Detected:\", str(total))\n\n for i in range(1, len(imgs)-1):\n seg = Segmenter(imgs[i][1])\n seg.watershed()\n seg.keypoints()\n res = np.concatenate((seg.origi, seg.th, seg.img, seg.kpimg), axis=1)\n cv2.imshow(\"img\"+str(imgs[i][0]), res)\n print (str(imgs[i][0]))\n\n cv2.waitKey(0)\n cv2.destroyAllWindows()", "def test_image_task(self):\n args = BASE_ARGS.copy()\n args.update(IMAGE_ARGS)\n\n valid, test = testing_utils.train_model(args)\n self.assertLessEqual(\n valid['ppl'], 8.6, 'failed to train image_seq2seq on image task'\n )", "def skeletonize(data,subscriber = 0):\n nx,ny=data.shape\n #zero padding\n image = zeros((nx+2,ny+2),'int16')\n image[:,:] = IP.BACKGROUND_COLOR\n image[1:-1,1:-1]=data\n\n erosionComplete = False\n runs = 0\n erosionComplete = False\n runs = 0\n isCorner = zeros((nx+2,ny+2),'bool')\n while not erosionComplete:\n ruleI = (image == IP.FEATURE_COLOR)\n XFeat, YFeat = ruleI.nonzero()\n numberFeatures = len(XFeat)\n erosedPixels = 0\n if runs == 0:\n progressbar = progress(numberFeatures)\n neighbourhood = zeros((nx+2,ny+2,3),'int16')\n for x,y in zip(XFeat.tolist(),YFeat.tolist()):\n fingerprint = checkNeighbours(image[x-1:x+2,y-1:y+2])\n neighbourhood[x,y,:]=numpy.array(fingerprint)\n\n ruleII = neighbourhood[:,:,1]>=1\n ruleIII = neighbourhood[:,:,0]> 1\n border = (ruleI & ruleII & ruleIII)\n #ruleIV and ruleV\n XBord, YBord = border.nonzero()\n XBord2 = []\n YBord2 = []\n for x,y in zip(XBord.tolist(),YBord.tolist()):\n if checkTransitions(image[x-1:x+2,y-1:y+2]) <= 1 and not isCorner[x,y]:\n image[x,y] = IP.BACKGROUND_COLOR\n erosedPixels += 1\n subscriber %= progressbar.step()\n else:\n XBord2.append(x)\n YBord2.append(y)\n for x,y in zip(XBord2,YBord2):\n if checkTransitions(image[x-1:x+2,y-1:y+2]) <= 1 and not isCorner[x,y]:\n image[x,y] = IP.BACKGROUND_COLOR\n erosedPixels += 1\n subscriber %= progressbar.step()\n if erosedPixels == 0:\n erosionComplete = True\n subscriber %= 100.\n else:\n xCorn, yCorn = (neighbourhood[:,:,2] > 0 ).nonzero()\n for x,y in zip(xCorn.tolist(),yCorn.tolist()):\n if neighbourhood[x,y,2] == 1:\n isCorner[x+1,y-1] = True\n elif neighbourhood[x,y,2] == 2:\n isCorner[x+1,y+1] = True\n elif neighbourhood[x,y,2] == 3:\n isCorner[x-1,y+1] = True\n elif neighbourhood[x,y,2] == 4:\n isCorner[x-1,y-1] = True\n runs += 1\n return image[1:-1,1:-1].copy()", "def run_visualization(image):\n # for image in images:\n try:\n with tf.gfile.FastGFile(image, 'rb') as f:\n jpeg_str = f.read()\n original_im = Image.open(BytesIO(jpeg_str))\n except IOError:\n print('Cannot retrieve image')\n return\n\n # print('running deeplab on image {0}'.format(image))\n resized_im, seg_map = MODEL.run(original_im)\n seg_map = seg_map.astype(np.uint8) * 255\n resized_im = np.array(resized_im, dtype=np.uint8)\n resized_im = cv2.cvtColor(resized_im, cv2.COLOR_BGR2RGB)\n # vis_segmentation(resized_im, seg_map,FULL_COLOR_MAP ,LABEL_NAMES)\n overlay_image = cv2.addWeighted(resized_im, 0.8, cv2.merge((seg_map * 0, seg_map, seg_map * 0)), 0.2, 0)\n # time.sleep(params.SEC_BETWEEN_PREDICTION)\n\n return resized_im, seg_map, overlay_image.astype(np.uint8)", "def testPosterize(name = \"smokey.gif\", triple = (0,0,0)):\n image = Image(name)\n print(\"Close the image window to see the transformation\")\n image.draw()\n posterize(image, triple)\n image.draw()", "def transform(self, previousimage):", "def main():\n print_banner()\n params = read_steering()\n s, x, y, cur, theta = build_kinoshita()\n s, x, y, cur, theta = read_centerline(s, x, y, cur, theta)\n s, x, y, cur, theta = extend_centerline(s, x, y, cur, theta)\n for t in range(TSTEPS+1):\n cur, theta = tan2curv(s, x, y)\n cur_ori = np.copy(cur)\n cur = filter_curvature(cur, t)\n cur_flt = np.copy(cur)\n cur = lag(s, cur, t)\n cur_lag = np.copy(cur)\n beck_bed = build_beck(cur, s, t)\n allxyz = offset_all(x, y, beck_bed, t)\n if t == 0:\n write_xyz_file(allxyz)\n write_mesh_file(allxyz, beck_bed)\n oxbowxList, oxbowyList = [], []\n centerlinexList, centerlineyList = [], []\n if np.mod(t, GPRINT) == 0:\n centerlinexList.append(x)\n centerlineyList.append(y)\n mf.make_figure(x, y, allxyz, cur_ori, cur_flt, cur_lag, s, beck_bed,\n params, t, oxbowxList, oxbowyList, centerlinexList, centerlineyList)\n if t == TSTEPS:\n break\n s, x, y = migration(s, x, y, cur_flt, cur_lag, theta, t)\n s, x, y, oxbowx, oxbowy, found_cutoff = cutoff(s, x, y)\n s, x, y = smooth_centerline(x, y)\n s, x, y, cur, theta = resample_centerline(s, x, y)\n if found_cutoff:\n oxbowxList.append(oxbowx)\n oxbowyList.append(oxbowy)\n make_gif()\n job_done()", "def train(args):\n # Create the data loader\n loader = sunnerData.DataLoader(\n dataset = sunnerData.ImageDataset(\n root = [[args.train]],\n transforms = transforms.Compose([\n \n# transforms.RandomCrop(720,720)\n# transforms.RandomRotation(45)\n# transforms.RandomHorizontalFlip(), \n# transforms.ColorJitter(brightness=0.5, contrast=0.5),\n \n\n sunnerTransforms.Resize(output_size = (args.H, args.W)),\n #transforms.RandomCrop(512,512)\n sunnerTransforms.ToTensor(),\n sunnerTransforms.ToFloat(),\n # sunnerTransforms.Transpose(),\n sunnerTransforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),\n ])\n ), batch_size = args.batch_size, shuffle = True, num_workers = 2\n )\n loader = sunnerData.IterationLoader(loader, max_iter = args.n_iter)\n\n # Create the model\n model = GANomaly2D(r = args.r, device = args.device)\n model.IO(args.resume, direction = 'load')\n model.train()\n \n # Train!\n bar = tqdm(loader)\n for i, (normal_img,) in enumerate(bar):\n model.forward(normal_img)\n model.backward()\n loss_G, loss_D = model.getLoss()\n bar.set_description(\"Loss_G: \" + str(loss_G) + \" loss_D: \" + str(loss_D))\n bar.refresh()\n if i % args.record_iter == 0:\n model.eval()\n with torch.no_grad():\n z, z_ = model.forward(normal_img)\n img, img_ = model.getImg()\n visualizeEncoderDecoder(img, img_, z, z_,i)\n model.train()\n model.IO(args.det, direction = 'save')\n model.IO(args.det, direction = 'save')", "def tessellate(self):\n\n self.tessellation = Delaunay(self.grid)", "def test(self):\n # Load the trained models.\n self.train_ev_ea()\n self.restore_model(self.test_iters)\n self.encoder_v.eval()\n # Set data loader.\n data_loader = self.data_loader\n empty = torch.FloatTensor(1, 3,self.image_size,self.image_size).to(self.device) \n empty.fill_(1)\n noise = torch.FloatTensor(self.batch_size, self.nz_num)\n noise = noise.to(self.device)\n step = 0\n data_loader.test.reinitialize_index()\n with torch.no_grad():\n while True:\n try:\n x_real, wrong_images, attributes, _, label_org = data_loader.test.next_batch_test(self.batch_size,10)\n except:\n break\n x_real = x_real.to(self.device) \n label_org = label_org.to(self.device)\n attributes = attributes.to(self.device)\n \n \n ev_x = self.encoder_v(x_real)\n noise.normal_(0, 1)\n ea_a = self.encoder_a(attributes, noise)\n \n out_A2B_results = [empty]\n out_A2B_results_a = [empty]\n\n for idx1 in range(label_org.size(0)):\n out_A2B_results.append(x_real[idx1:idx1+1])\n out_A2B_results_a.append(x_real[idx1:idx1+1])\n\n for idx2 in range(label_org.size(0)):\n out_A2B_results.append(x_real[idx2:idx2+1])\n out_A2B_results_a.append(x_real[idx2:idx2+1])\n \n for idx1 in range(label_org.size(0)):\n x_fake = self.decoder(self.encoder(x_real[idx2:idx2+1]), ev_x[idx1:idx1+1])\n out_A2B_results.append(x_fake)\n \n x_fake_a = self.decoder(self.encoder(x_real[idx2:idx2+1]), ea_a[idx1:idx1+1])\n out_A2B_results_a.append(x_fake_a)\n results_concat = torch.cat(out_A2B_results)\n x_AB_results_path = os.path.join(self.result_dir, '{}_x_AB_results_test_v.jpg'.format(step+1)) \n save_image(self.denorm(results_concat.data.cpu()), x_AB_results_path, nrow=label_org.size(0)+1,padding=0)\n print('Saved real and fake images into {}...'.format(x_AB_results_path))\n \n results_concat = torch.cat(out_A2B_results_a)\n x_AB_results_path = os.path.join(self.result_dir, '{}_x_AB_results_test_a.jpg'.format(step+1)) \n save_image(self.denorm(results_concat.data.cpu()), x_AB_results_path, nrow=label_org.size(0)+1,padding=0)\n print('Saved real and fake images into {}...'.format(x_AB_results_path))\n \n step += 1", "def detectSpots(img, detectSpotsParameter = None, correctIlluminationParameter = None, removeBackgroundParameter = None,\n filterDoGParameter = None, findExtendedMaximaParameter = None, detectCellShapeParameter = None,\n verbose = False, out = sys.stdout, **parameter):\n\n timer = Timer();\n \n # normalize data -> to check\n #img = img.astype('float');\n #dmax = 0.075 * 65535;\n #ids = img > dmax;\n #img[ids] = dmax;\n #img /= dmax; \n #out.write(timer.elapsedTime(head = 'Normalization'));\n #img = dataset[600:1000,1600:1800,800:830];\n #img = dataset[600:1000,:,800:830];\n \n # correct illumination\n correctIlluminationParameter = getParameter(detectSpotsParameter, \"correctIlluminationParameter\", correctIlluminationParameter);\n img1 = img.copy();\n img1 = correctIllumination(img1, correctIlluminationParameter = correctIlluminationParameter, verbose = verbose, out = out, **parameter) \n\n # background subtraction in each slice\n #img2 = img.copy();\n removeBackgroundParameter = getParameter(detectSpotsParameter, \"removeBackgroundParameter\", removeBackgroundParameter);\n img2 = removeBackground(img1, removeBackgroundParameter = removeBackgroundParameter, verbose = verbose, out = out, **parameter) \n \n # mask\n #timer.reset();\n #if mask == None: #explicit mask\n # mask = img > 0.01;\n # mask = binary_opening(mask, self.structureELement('Disk', (3,3,3)));\n #img[img < 0.01] = 0; # masking in place # extended maxima\n #out.write(timer.elapsedTime(head = 'Mask')); \n \n #DoG filter\n filterDoGParameter = getParameter(detectSpotsParameter, \"filterDoGParameter\", filterDoGParameter);\n dogSize = getParameter(filterDoGParameter, \"size\", None);\n #img3 = img2.copy(); \n img3 = filterDoG(img2, filterDoGParameter = filterDoGParameter, verbose = verbose, out = out, **parameter);\n \n # normalize \n # imax = img.max();\n # if imax == 0:\n # imax = 1;\n # img /= imax;\n \n # extended maxima\n findExtendedMaximaParameter = getParameter(detectSpotsParameter, \"findExtendedMaximaParameter\", findExtendedMaximaParameter);\n hMax = getParameter(findExtendedMaximaParameter, \"hMax\", None);\n imgmax = findExtendedMaxima(img3, findExtendedMaximaParameter = findExtendedMaximaParameter, verbose = verbose, out = out, **parameter);\n \n #center of maxima\n if not hMax is None:\n centers = findCenterOfMaxima(img, imgmax, verbose = verbose, out = out, **parameter);\n else:\n centers = findPixelCoordinates(imgmax, verbose = verbose, out = out, **parameter);\n \n #cell size detection\n detectCellShapeParameter = getParameter(detectSpotsParameter, \"detectCellShapeParameter\", detectCellShapeParameter);\n cellShapeThreshold = getParameter(detectCellShapeParameter, \"threshold\", None);\n if not cellShapeThreshold is None:\n \n # cell shape via watershed\n imgshape = detectCellShape(img2, centers, detectCellShapeParameter = detectCellShapeParameter, verbose = verbose, out = out, **parameter);\n \n #size of cells \n csize = findCellSize(imgshape, maxLabel = centers.shape[0], out = out, **parameter);\n \n #intensity of cells\n cintensity = findCellIntensity(img, imgshape, maxLabel = centers.shape[0], verbose = verbose, out = out, **parameter);\n\n #intensity of cells in background image\n cintensity2 = findCellIntensity(img2, imgshape, maxLabel = centers.shape[0], verbose = verbose, out = out, **parameter);\n \n #intensity of cells in dog filtered image\n if dogSize is None:\n cintensity3 = cintensity2;\n else:\n cintensity3 = findCellIntensity(img3, imgshape, maxLabel = centers.shape[0], verbose = verbose, out = out, **parameter);\n \n if verbose:\n out.write(timer.elapsedTime(head = 'Spot Detection') + '\\n');\n \n #remove cell;s of size 0\n idz = csize > 0;\n \n return ( centers[idz], numpy.vstack((cintensity[idz], cintensity3[idz], cintensity2[idz], csize[idz])).transpose()); \n \n \n else:\n #intensity of cells\n cintensity = findIntensity(img, centers, verbose = verbose, out = out, **parameter);\n\n #intensity of cells in background image\n cintensity2 = findIntensity(img2, centers, verbose = verbose, out = out, **parameter);\n \n #intensity of cells in dog filtered image\n if dogSize is None:\n cintensity3 = cintensity2;\n else:\n cintensity3 = findIntensity(img3, centers, verbose = verbose, out = out, **parameter);\n\n if verbose:\n out.write(timer.elapsedTime(head = 'Spot Detection') + '\\n');\n \n return ( centers, numpy.vstack((cintensity, cintensity3, cintensity2)).transpose());", "def test_random_single_image():\n\n shap.image_plot(np.random.randn(3, 20, 20), np.random.randn(3, 20, 20), show=False)", "def test_time_optimize(args, model, optim, imgs, poses, hwf, bound):\n pixels = imgs.reshape(-1, 3)\n\n rays_o, rays_d = get_rays_shapenet(hwf, poses)\n rays_o, rays_d = rays_o.reshape(-1, 3), rays_d.reshape(-1, 3)\n\n num_rays = rays_d.shape[0]\n for step in range(args.tto_steps):\n indices = torch.randint(num_rays, size=[args.tto_batchsize])\n raybatch_o, raybatch_d = rays_o[indices], rays_d[indices]\n pixelbatch = pixels[indices] \n t_vals, xyz = sample_points(raybatch_o, raybatch_d, bound[0], bound[1],\n args.num_samples, perturb=True)\n \n optim.zero_grad()\n rgbs, sigmas = model(xyz)\n colors = volume_render(rgbs, sigmas, t_vals, white_bkgd=True)\n loss = F.mse_loss(colors, pixelbatch)\n loss.backward()\n optim.step()", "def test_replace_image(self):\n pass", "def test_resize_noop(self, X, y, mode):\n Xc, _ = resize_batch(X, y, 1.0, mode, resize_targets=False)\n assert X is Xc", "def testComputeImage(self):\n for fiberId in self.detMap.fiberId:\n for fraction in (0.1, 0.5, 0.9):\n yy = self.synthConfig.height*fraction\n if yy == int(yy):\n # Ensure we have a non-integer pixel position,\n # so computeImage and computeKernelImage differ\n yy += 0.5\n wavelength = self.detMap.findWavelength(fiberId, yy)\n image = self.psf.computeImage(fiberId, wavelength)\n kernel = self.psf.computeKernelImage(fiberId, wavelength)\n\n # Image should have xy0 set somewhere in the middle of the larger image\n self.assertNotEqual(image.getX0(), 0)\n self.assertNotEqual(image.getY0(), 0)\n\n # Kernel should have xy0 set to the half-size\n halfSize = (self.size - 1)//2\n self.assertEqual(kernel.getX0(), -halfSize)\n self.assertEqual(kernel.getY0(), -halfSize)\n\n # Centroid on image should be at the point of interest\n xx, yy = self.detMap.findPoint(fiberId, wavelength)\n centroid = calculateCentroid(image)\n self.assertFloatsAlmostEqual(xx, centroid.x, atol=2.0e-2)\n self.assertFloatsAlmostEqual(yy, centroid.y, atol=2.0e-2)\n\n # Centroid on kernel should be zero\n centroid = calculateCentroid(kernel)\n self.assertFloatsAlmostEqual(centroid.x, 0.0, atol=1.0e-7)\n self.assertFloatsAlmostEqual(centroid.y, 0.0, atol=1.0e-7)", "def run_predictive(op) -> None:\n\n try:\n img = Image.open(op['input'])\n except Exception as e:\n print(e)\n sys.exit(1)\n\n algo.predictive.run(op)", "def run(self, image):\n # width, height = image.size\n # resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)\n # target_size = (int(resize_ratio * width), int(resize_ratio * height))\n target_size = (self.INPUT_SIZE, self.INPUT_SIZE)\n resized_image = image.convert('RGB').resize(target_size, Image.ANTIALIAS)\n net_image = resized_image\n if params.HZ_preprocess_activate:\n net_image = params.image_preprocess_func(resized_image)\n net_image = np.expand_dims(net_image, axis=-1)\n batch_seg_map = self.sess.run(\n self.OUTPUT_TENSOR_NAME,\n feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(net_image)]})\n seg_map = batch_seg_map[0]\n return resized_image, seg_map", "def test_transformer2d_single_step_e2e(self):\n\n problem_object = allen_brain.Img2imgAllenBrainDim8to32()\n\n with TemporaryDirectory() as tmp_dir:\n\n mock_raw_data(tmp_dir, raw_dim=256, num_images=100)\n\n with TemporaryDirectory() as data_dir:\n\n problem_object.generate_data(data_dir, tmp_dir)\n\n input_xy_dim = problem_object.input_dim\n target_xy_dim = problem_object.output_dim\n num_channels = problem_object.num_channels\n\n hparams = image_transformer_2d.img2img_transformer2d_tiny()\n hparams.data_dir = data_dir\n\n p_hparams = problem_object.get_hparams(hparams)\n\n model = image_transformer_2d.Img2imgTransformer(\n hparams, tf.estimator.ModeKeys.TRAIN, p_hparams\n )\n\n @tfe.implicit_value_and_gradients\n def loss_fn(features):\n _, losses = model(features)\n return losses[\"training\"]\n\n batch_size = 1\n train_dataset = problem_object.dataset(Modes.TRAIN, data_dir)\n train_dataset = train_dataset.repeat(None).batch(batch_size)\n\n optimizer = tf.train.AdamOptimizer()\n\n example = tfe.Iterator(train_dataset).next()\n example[\"targets\"] = tf.reshape(example[\"targets\"],\n [batch_size,\n target_xy_dim,\n target_xy_dim,\n num_channels])\n _, gv = loss_fn(example)\n optimizer.apply_gradients(gv)\n\n model.set_mode(Modes.EVAL)\n dataset = problem_object.dataset(Modes.EVAL, data_dir)\n\n example = tfe.Iterator(dataset).next()\n example[\"inputs\"] = tf.reshape(example[\"inputs\"],\n [1,\n input_xy_dim,\n input_xy_dim,\n num_channels])\n example[\"targets\"] = tf.reshape(example[\"targets\"],\n [1,\n target_xy_dim,\n target_xy_dim,\n num_channels])\n\n predictions, _ = model(example)\n\n self.assertEqual(predictions.numpy().shape,\n (1,\n target_xy_dim,\n target_xy_dim,\n num_channels,\n 256))", "def preprocessing(image, smooth_size, folder):\n from skimage.restoration import denoise_tv_chambolle\n \n dim = int(image.shape[0] / 50.)\n smoothed = rank.median(image, disk(smooth_size))\n #smoothed = denoise_tv_chambolle(image, weight=0.002)\n smoothed = rank.enhance_contrast(smoothed, disk(smooth_size))\n \n pl.subplot(2, 3, 1)\n pl.title(\"after median\")\n pl.imshow(smoothed)\n pl.gray()\n # If after smoothing the \"dot\" disappears\n # use the image value\n \n # TODO: wat do with thresh?\n try:\n im_max = smoothed.max()\n thresh = threshold_otsu(image)\n except:\n im_max = image.max()\n thresh = threshold_otsu(image)\n\n \n if im_max < thresh:\n labeled = np.zeros(smoothed.shape, dtype=np.int32)\n \n else:\n binary = smoothed > thresh\n \n # TODO: this array size is the fault of errors\n bin_open = binary_opening(binary, np.ones((dim, dim)), iterations=5)\n bin_close = binary_closing(bin_open, np.ones((5,5)), iterations=5)\n \n pl.subplot(2, 3, 2)\n pl.title(\"threshold\")\n pl.imshow(binary, interpolation='nearest')\n pl.subplot(2, 3, 3)\n pl.title(\"opening\")\n pl.imshow(bin_open, interpolation='nearest')\n pl.subplot(2, 3, 4)\n pl.title(\"closing\")\n pl.imshow(bin_close, interpolation='nearest')\n \n distance = ndimage.distance_transform_edt(bin_open)\n local_maxi = peak_local_max(distance,\n indices=False, labels=bin_open)\n \n markers = ndimage.label(local_maxi)[0]\n \n labeled = watershed(-distance, markers, mask=bin_open)\n pl.subplot(2, 3, 5)\n pl.title(\"label\")\n pl.imshow(labeled)\n #pl.show()\n pl.savefig(folder)\n pl.close('all')\n\n #misc.imsave(folder, labeled)\n# labels_rw = random_walker(bin_close, markers, mode='cg_mg')\n# \n# pl.imshow(labels_rw, interpolation='nearest')\n# pl.show()\n\n return labeled", "def test_full_setup(n):\n for x in range(n):\n for y in range(n):\n Stitch(x,y)\n Stitch.stitches[(x,y)].vital = True if round(rnd.random()) == 1 else False", "def setup():\n img = Image.new('RGB', (10, 20))\n img.putpixel((5, 10), (0, 255, 0))\n img.save('green-dot.tif')\n img.save('green-dot.jpg')\n img.save('green-dot.png')", "def imageprepare(argv):\n im = Image.open(argv).convert('L')\n width = float(im.size[0])\n height = float(im.size[1])\n newImage = Image.new('L', (28, 28), (255)) # creates white canvas of 28x28 pixels\n\n if width > height: # check which dimension is bigger\n # Width is bigger. Width becomes 20 pixels.\n nheight = int(round((20.0 / width * height), 0)) # resize height according to ratio width\n if (nheight == 0): # rare case but minimum is 1 pixel\n nheight = 1\n # resize and sharpen\n img = im.resize((20, nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n wtop = int(round(((28 - nheight) / 2), 0)) # calculate horizontal position\n newImage.paste(img, (4, wtop)) # paste resized image on white canvas\n else:\n # Height is bigger. Heigth becomes 20 pixels.\n nwidth = int(round((20.0 / height * width), 0)) # resize width according to ratio height\n if (nwidth == 0): # rare case but minimum is 1 pixel\n nwidth = 1\n # resize and sharpen\n img = im.resize((nwidth, 20), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n wleft = int(round(((28 - nwidth) / 2), 0)) # caculate vertical pozition\n newImage.paste(img, (wleft, 4)) # paste resized image on white canvas\n\n # newImage.save(\"sample.png\n\n tv = list(newImage.getdata()) # get pixel values\n\n # normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\n tva = [(255 - x) * 1.0 / 255.0 for x in tv]\n print(tva)\n return tva", "def test_synthetic_auto():\n background = Image.new('RGB', (7, 3), (125, 125, 125))\n red = Image.new('RGB', (1, 1), (255, 0, 0))\n green = Image.new('RGB', (1, 1), (0, 255, 0))\n blue = Image.new('RGB', (1, 1), (0, 0, 255))\n\n parameters = {\n 'data': [background, red, green, blue],\n 'positions': 'auto'\n }\n\n synth = images.synthetic(parameters)\n\n assert_equal(synth.size, (7, 3))\n assert_equal(synth.getpixel((1, 1)), (255, 0, 0, 255))\n assert_equal(synth.getpixel((3, 1)), (0, 255, 0, 255))\n assert_equal(synth.getpixel((5, 1)), (0, 0, 255, 255))", "def run(self, image):\n start = datetime.datetime.now()\n\n width, height = image.size\n resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)\n target_size = (int(resize_ratio * width), int(resize_ratio * height))\n resized_image = image.convert('RGB').resize(target_size, Image.ANTIALIAS)\n batch_seg_map = self.sess.run(\n self.OUTPUT_TENSOR_NAME,\n feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]})\n seg_map = batch_seg_map[0]\n\n end = datetime.datetime.now()\n\n diff = end - start\n print(\"Time taken to evaluate segmentation is : \" + str(diff))\n\n return resized_image, seg_map", "def image_undistort():\n # read test images\n all_test_images = os.listdir('test_images')\n test_images = []\n for name in all_test_images:\n if name.endswith(\".jpg\"):\n test_images.append(name)\n # apply distortion correction on test images\n undistort_images(test_images, './camera_calib_dist_pickle.p')\n print(\"DONE: undistorted test-images saved\")" ]
[ "0.6175", "0.5833953", "0.5790842", "0.5783519", "0.56402856", "0.5631712", "0.56218964", "0.56209666", "0.56089985", "0.5602803", "0.5586337", "0.55410194", "0.54952884", "0.54952645", "0.5484749", "0.545018", "0.54336417", "0.54323924", "0.5420694", "0.5396064", "0.53868186", "0.53826827", "0.5362292", "0.53536016", "0.5348376", "0.5334613", "0.5331135", "0.5322052", "0.53130174", "0.52966183", "0.52957296", "0.52858585", "0.52761126", "0.52730453", "0.52730453", "0.52709746", "0.52577615", "0.5255402", "0.5254579", "0.5252006", "0.52486026", "0.52388537", "0.5236693", "0.52361727", "0.52310973", "0.5229211", "0.5226386", "0.52245146", "0.5223437", "0.52189183", "0.52161115", "0.52122986", "0.5206287", "0.5203338", "0.5197991", "0.51952636", "0.518684", "0.51737463", "0.51647913", "0.5163664", "0.5163374", "0.51623523", "0.5162209", "0.51612526", "0.51582986", "0.51550347", "0.5154782", "0.51534045", "0.5149202", "0.5147345", "0.5146975", "0.5145934", "0.5144491", "0.51382864", "0.51379997", "0.51306033", "0.51246226", "0.5121956", "0.5112787", "0.5103682", "0.5103279", "0.5099865", "0.5096792", "0.5092306", "0.50870067", "0.50856745", "0.5083787", "0.5081803", "0.5081272", "0.5081258", "0.50808245", "0.5080545", "0.5077416", "0.5073753", "0.5072539", "0.5069696", "0.5058945", "0.5058356", "0.5054575", "0.5049315", "0.5047406" ]
0.0
-1
Temporarily overwrite the settings with test settings. This allows to use test datasets for testing.
def generate_test_settings(tmpdir, dataset): # When `tmpdir` is a path convert it to a string if isinstance(tmpdir, py._path.local.LocalPath): tmpdir = str(tmpdir) test_settings = { 'datasets': { 'mnist': { 'train': { 'images': "file://" + tmpdir + "/" + dataset + "/server/train-images-idx3-ubyte.gz", 'labels': "file://" + tmpdir + "/" + dataset + "/server/train-labels-idx1-ubyte.gz" }, 'test': { 'images': "file://" + tmpdir + "/" + dataset + "/server/t10k-images-idx3-ubyte.gz", 'labels': "file://" + tmpdir + "/" + dataset + "/server/t10k-labels-idx1-ubyte.gz" }, }, }, 'data-dir': tmpdir + "/" + dataset + "/data" } overwrite_settings(test_settings)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def force_test_setting(dm, tsm, output_path):\n if dm is not None:\n data_json_path = os.path.join(output_path, 'cur_data_setting.json')\n dm.data_par['datapro']['dataset']['prepare_data'] = False\n dm.data_par['datapro']['reg']['max_num_for_loading'] = [1, 1, -1, 1]\n dm.save(data_json_path)\n else:\n tsm.task_par['dataset']['max_num_for_loading'] = [1, 1, -1, 1]\n tsm.task_par['tsk_set']['train'] = False\n tsm.task_par['tsk_set']['continue_train'] = False\n tsk_json_path = os.path.join(output_path, 'cur_task_setting.json')\n tsm.save(tsk_json_path)", "def teardown_function():\n\n # Force module reload as the default test settings have been restored\n importlib.reload(defaults)", "def __perapre_test_setting(package_settings: dict) -> dict:\n\n __package_setting = copy.deepcopy(package_settings)\n\n __package_setting['slient'] = False\n\n if __package_setting.get('weights') is not None:\n __package_setting['weights'] = [1, 1, 1, 1, 1]\n\n return __package_setting", "def test_settings_restored(self) -> None:\n from django.conf import settings\n\n assert TestLiveServer._test_settings_before_run is True # type: ignore[attr-defined]\n assert (\n f\"{settings.__class__.__module__}.{settings.__class__.__name__}\"\n == \"django.conf.Settings\"\n )\n assert settings.ALLOWED_HOSTS == [\"testserver\"]", "def get_test_settings():\n from youtube_podcast_api.config import Settings\n settings = Settings()\n settings.db_path = \"./sql_test.db\"\n return settings", "def setUp(self):\n self.settings = MockSettings()\n django_yamlconf.load(project=\"testing\", settings=self.settings)", "def setUp(self):\n self.dataset = get_test_dataset()", "def test_default_options(self):\r\n\r\n settings.ASSETS_URL_EXPIRE = True\r\n assert get_env().config['url_expire'] == settings.ASSETS_URL_EXPIRE\r\n\r\n settings.ASSETS_ROOT = 'FOO_ASSETS'\r\n settings.STATIC_ROOT = 'FOO_STATIC'\r\n settings.MEDIA_ROOT = 'FOO_MEDIA'\r\n # Pointing to ASSETS_ROOT\r\n assert get_env().directory.endswith('FOO_ASSETS')\r\n get_env().directory = 'BAR'\r\n assert settings.ASSETS_ROOT == 'BAR'\r\n # Pointing to STATIC_ROOT\r\n delsetting('ASSETS_ROOT')\r\n assert get_env().directory.endswith('FOO_STATIC')\r\n get_env().directory = 'BAR'\r\n assert settings.STATIC_ROOT == 'BAR'\r\n # Pointing to MEDIA_ROOT; Note we only\r\n # set STATIC_ROOT to None rather than deleting\r\n # it, a scenario that may occur in the wild.\r\n settings.STATIC_ROOT = None\r\n assert get_env().directory.endswith('FOO_MEDIA')\r\n get_env().directory = 'BAR'\r\n assert settings.MEDIA_ROOT == 'BAR'", "def configure_test(self, test, config_json):\n pass", "def test_reset_settings(self):\n\n self.feature_test.set_percentage(5)\n self.feature_test.add_to_whitelist(3)\n self.feature_test.add_to_blacklist(4)\n self.feature_test.reset_settings()\n\n generated = Feature(\"testing\")\n self.assertEqual(generated.percentage, 0)\n self.assertFalse(3 in generated.whitelist)\n self.assertFalse(4 in generated.blacklist)", "def setUp(self):\n\n self.test_data_path = 'testing/test_data/'", "def custom_settings(tmpdir_factory):\n overrides = override_settings(\n MEDIA_ROOT=str(tmpdir_factory.mktemp('test_media')))\n overrides.enable()", "def setupTests(self, paths = [], tests = {}):\n # Used for settings only\n self.view = self.build.window.active_view()\n self._settings = {}\n for key in buildSettings:\n self._settings[key] = self._coalesceOption(key)\n self.runnerSetup(paths = paths, tests = tests)", "def setUpTestData(cls):\n # Set up non-modified objects used by all test methods\n Prohibited.objects.create(credential_type=2, credential='google.com')\n Prohibited.objects.create(credential_type=1, credential='127.0.0.1')", "def _load_test_data(self):\n self._save_test_data()", "def test_set_testing(self):\n old_value = Config.testing\n Config.set_testing(True)\n\n self.assertNotEqual(old_value, Config.testing)", "def test_setup(self, test_data: list=None):\n print(\"[dataset]: using test setup ...\")\n self.vocabulary = [\"empty\"]\n self.eval_dataset = ABSADataset(data_path=self.dev_path, mode=self.in_mode, task=self.task,\n tokenizer=self.tokenizer, vocab=\"bert\", test=True)\n return", "def setUpConfig(self):\n pass", "def setUp(self):\n self.dataset = self.dataset_cls()", "def setUp(self):\n self.directory = tempfile.TemporaryDirectory()\n self.dataset = self.dataset_cls(cache_root=self.directory.name)", "def tearDown(self):\n # set the config module level variables back to None\n config.config._conf_parser = None\n config.config._user_config_file = None", "def setup_settings():\n # pylint: disable=import-outside-toplevel\n from django.conf import settings\n import tiny_erp.settings as defaults\n\n for name in dir(defaults):\n if name.isupper() and not hasattr(settings, name):\n setattr(settings, name, getattr(defaults, name))", "def setUpClass(cls):\n super(Module05Tests, cls).setUpClass()\n cls.datasets = {\n 0: DATASETS_ROOT + 'diffusion_synthetic_normal_L8_r2_slices_41_50_gr15_b1200',\n 1: DATASETS_ROOT + 'filtered',\n 2: DATASETS_ROOT + 'noise'\n }\n cls.data = smns.load_object(file_path=cls.datasets[2])", "def create_default_settings():\n from flaskbb.fixtures.settings import fixture\n create_settings_from_fixture(fixture)", "def app(request):\n settings_override = {\n 'TESTING': True,\n }\n yield settings_override", "def initialize_options(self):\n self.all = False\n self.coverage = False\n super(test, self).initialize_options()", "def test_defaults(self) -> None:\n\n scratch_view: sublime.View = sublime.active_window().new_file()\n tabs: List[Tab] = [Tab(scratch_view)]\n\n data_set: Tuple[Tuple[TabSetting, bool, str], ...] = (\n (\n ShowCaptionsTabSetting,\n DEFAULT_SETINGS[\"show_captions\"],\n \"show_captions\"\n ),\n (\n IncludePathTabSetting,\n DEFAULT_SETINGS[\"include_path\"],\n \"include_path\"\n ),\n (\n ShowGroupCaptionTabSetting,\n DEFAULT_SETINGS[\"show_group_caption\"],\n \"show_group_caption\"\n )\n )\n\n for (cls, enabled, caption) in data_set:\n with self.subTest(cls=cls, enabled=enabled, caption=caption):\n inst = cls(\n self.settings,\n sublime.active_window()\n ) # type: ignore\n self.assertEqual(enabled, inst.is_enabled())\n self.assertListEqual(tabs, inst.apply(tabs))", "def afterSetUp(self):\n self.load_config = {}\n self.load_config['monitor_interval'] = 1\n self.load_config['limit_number_request'] = 100\n self.load_config['limit_memory_used'] = 500", "def setUp(self):\n\n fq_dataset_name = self.fq_table_names[0].split('.')\n self.fq_dataset_name = '.'.join(fq_dataset_name[:-1])\n\n fq_sandbox_name = self.fq_sandbox_table_names[0].split('.')\n self.fq_sandbox_name = '.'.join(fq_sandbox_name[:-1])\n\n super().setUp()", "def turn_test_mode_off_by_default(test_mode_off):", "def setUp(self):\r\n # nothing to do, all tests use different things\r\n pass", "def test_act_on_settings(self):\n pass # TODO(tlarsen)", "def test_act_on_settings(self):\n pass # TODO(tlarsen)", "def test_sites(self, test_sites):\n\n self._test_sites = test_sites", "def switch_to_test_data(self) -> None:\n if self._test_name not in self._datasets:\n raise ValueError(\"Test data not provided.\")\n self.switch_to_dataset(self._test_name)", "def setUp(self):\n self.tmp = TemporaryDirectory()", "def setUp(self):\n super(MaintenanceModeMiddlewareTestCase, self).setUp()\n self._set_model_to(False)", "def set_test_environment():\n import flask_monitoringdashboard\n\n flask_monitoringdashboard.config.database_name = 'sqlite:///test-database.db'", "def test_settings(self):\n\n wf._items = []\n\n sys.argv = ['drive.py', '>']\n main(None)\n self.assertEqual(len(wf._items), 4)\n self.assertEqual(wf._items[0].title, SETTINGS['LOGIN']['title'])\n self.assertEqual(wf._items[1].title, SETTINGS['LOGOUT']['title'])\n self.assertEqual(wf._items[2].title, SETTINGS['CLEAR_CACHE']['title'])\n self.assertEqual(wf._items[3].title, SETTINGS['SET_CACHE']['title'] % '[seconds]')\n wf._items = []", "def test_change_default_throttling_settings_http_with_overwrite_not_throttled():", "def set_tests(self, tests):\n self.tests = tests[:]", "def settings():\n raise NotImplementedError # pragma: nocoverage", "def setUp(self):\n clean_temlogger_config()", "def setUp(self):\n clean_temlogger_config()", "def setUp(self):\n clean_temlogger_config()", "def setUp(self):\n clean_temlogger_config()", "def setUp(self):\n\n self.client = None\n if conf.options.get_value('runlive') == 'true':\n self.client = gdata.analytics.client.AnalyticsClient()\n self.client.http_client.debug = True\n\n conf.configure_client(\n self.client,\n 'AnalyticsClientTest',\n self.client.auth_service)", "def setUp(self):\n super(TestCase, self).setUp()\n # Change the default directory that the tempfile\n # module places temporary files and directories in\n self.useFixture(fixtures.NestedTempfile())\n # Create a temporary directory and set it as $HOME in the environment.\n self.useFixture(fixtures.TempHomeDir())\n self.useFixture(tools.StandardLogging())\n self.addCleanup(self._clear_attrs)", "def setUp(self):\n super(TestCase, self).setUp()\n # Change the default directory that the tempfile\n # module places temporary files and directories in\n self.useFixture(fixtures.NestedTempfile())\n # Create a temporary directory and set it as $HOME in the environment.\n self.useFixture(fixtures.TempHomeDir())\n self.useFixture(tools.StandardLogging())\n self.addCleanup(self._clear_attrs)", "def setUpClass(cls):\n super(ExistingDataTest, cls).setUpClass()\n django.setup()", "def setUp(self):\n self.Reinitialize()", "def run(\n dataset,\n setting\n ):\n \n log_setting = setting if setting else \"default\" \n logger.debug(\"Create setting '{0}' from dataset '{1}'\".format(log_setting, dataset))\n\n if dataset in expmgmt.config.settings.get_datasets():\n expmgmt.config.settings.set_dataset(\n dataset,\n setting\n )", "def settings(args):\n data = {}\n data['train_x'] = load_pkl(os.path.join(args.data_dir, 'train_images.pkl'))\n data['train_y'] = load_pkl(os.path.join(args.data_dir, 'train_labels.pkl'))\n data['valid_x'] = load_pkl(os.path.join(args.data_dir, 'valid_images.pkl'))\n data['valid_y'] = load_pkl(os.path.join(args.data_dir, 'valid_labels.pkl'))\n if args.combine_train_val:\n data['train_x'].update(data['valid_x'])\n data['train_y'].update(data['valid_y'])\n data['valid_x'] = load_pkl(os.path.join(args.data_dir, 'test_images.pkl'))\n data['valid_y'] = load_pkl(os.path.join(args.data_dir, './data/bsd_pkl_float/test_labels.pkl'))\n args.display_step = len(data['train_x']) / 46\n # Default configuration\n if args.default_settings:\n args.n_epochs = 250\n args.batch_size = 10\n args.learning_rate = 3e-2\n args.std_mult = 0.8\n args.delay = 8\n args.filter_gain = 2\n args.filter_size = 5\n args.n_rings = 4\n args.n_filters = 7\n args.save_step = 5\n args.height = 321\n args.width = 481\n\n args.n_channels = 3\n args.lr_div = 10.\n args.augment = True\n args.sparsity = True\n\n args.test_path = args.save_name\n args.log_path = './logs'\n args.checkpoint_path = './checkpoints'\n\n make_dirs(args, args.test_path)\n make_dirs(args, args.log_path)\n make_dirs(args, args.checkpoint_path)\n\n return args, data", "def setUp(self):\n self.test_data = MockPyMySqlDataSource().load()", "def setUp(self):\n super(BasicTestCase, self).setUp()", "def setUp(self):\n self.db_fd, mainPyUnit.app.config['DATABASE'] = tempfile.mkstemp()\n mainPyUnit.app.config['TESTING'] = True\n self.app = mainPyUnit.app.test_client()\n #mainPyUnit.init_db()", "def setUp(self):\n self.factory = RequestFactory()\n with patch(\"bookwyrm.suggested_users.rerank_suggestions_task.delay\"), patch(\n \"bookwyrm.activitystreams.populate_stream_task.delay\"\n ):\n self.local_user = models.User.objects.create_user(\n \"[email protected]\",\n \"[email protected]\",\n \"password\",\n local=True,\n localname=\"mouse\",\n )\n models.SiteSettings.objects.create()", "def setUp(self) -> None:\n\n self.helper = EnvironmentVariableHelper()\n\n self.test_name = \"PYFUNCEBLE_TESTING\"\n self.temp_env_file = tempfile.NamedTemporaryFile(\"w\", delete=False)", "def get_settings(dataset: DS):\n if dataset == DS.ARTIFICIAL_BBOX:\n project_path = Path('data/artificial/')\n project_file = project_path / 'annotations.json'\n image_dir = 'images'\n _, annotations = create_color_classification(path=project_path, n_samples=50,\n size=(500, 500))\n\n anno = {str(project_path / image_dir / k): [f'{v}.jpg'] for k, v in annotations.items()}\n\n with open(project_file, 'w') as f:\n json.dump(anno, f)\n\n return Settings(project_path=project_path,\n project_file=project_file,\n image_dir=image_dir,\n label_dir='class_images',\n # used on create step - should be empty!\n result_dir='create_results',\n im_width=50, im_height=50,\n label_width=30, label_height=30,\n n_cols=3)\n elif dataset == DS.ARTIFICIAL_VIDEO:\n project_path = Path('data/artificial/')\n project_file = project_path / 'annotations.json'\n image_dir = 'images'\n create_mot_ds(project_path, image_dir, 20, True)\n return Settings(\n project_path=project_path,\n project_file=project_file,\n image_dir=image_dir,\n im_width=200,\n im_height=200,\n result_dir='create_results',\n )\n elif dataset == DS.CIFAR10:\n cifar_train_p, cifar_test_p = get_cifar10(Path('data'))\n\n return Settings(project_path=Path('data/cifar10/'),\n project_file=cifar_test_p,\n image_dir='test',\n label_dir=None,\n # used on create step - should be empty!\n result_dir='create_results',\n im_width=50, im_height=50,\n label_width=140, label_height=30,\n n_cols=2)\n\n elif dataset == DS.OXFORD102:\n flowers102_train_p, flowers102_test_p = get_oxford_102_flowers(Path('data'))\n\n return Settings(project_path=Path('data/oxford-102-flowers'),\n project_file=flowers102_test_p,\n image_dir='jpg',\n label_dir=None,\n # used on create step - should be empty!\n result_dir='create_results',\n im_width=50, im_height=50,\n label_width=40, label_height=30,\n n_cols=7)\n\n elif dataset == DS.CUB200:\n cub200_train_p, cub200_test_p = get_cub_200_2011(Path('data'))\n\n return Settings(project_path=Path('data/CUB_200_2011'),\n project_file=cub200_test_p,\n image_dir='images',\n label_dir=None,\n # used on create step - should be empty!\n result_dir='create_results',\n im_width=50, im_height=50,\n label_width=50, label_height=50,\n n_cols=7)\n else:\n raise UserWarning(f\"Dataset {dataset} is not supported!\")", "def test_missing_settings(monkeypatch) -> None: # noqa: TYP001\n monkeypatch.delattr(django_settings, 'SWAGGER_TESTER')\n SwaggerTesterSettings()", "def setUp(self):\n self.tmpdir = mkdtemp()", "def test_data(self):\n if self._test_data is None:\n self._load_test_data()\n if self._swapped_test_data is None:\n self._swapped_test_data = {}\n for key, value in self._test_data.items():\n self._swapped_test_data[key] = value\n return self._swapped_test_data", "def setUp(self):\n rmg_path = os.path.normpath(os.path.join(get_path(), '..'))\n\n self.settings1 = QMSettings(software='mopac',\n method='pm3',\n fileStore=os.path.join(rmg_path, 'testing', 'qm', 'QMfiles'),\n scratchDirectory=None,\n onlyCyclics=False,\n maxRadicalNumber=0,\n )\n\n self.settings2 = QMSettings()", "def setUpTestData(cls):\n # volunteer user\n common.initialize_empty_volunteer()", "def generate_test_environment(tmpdir, dataset):\n\n # Overwrite settings with test settings\n generate_test_settings(tmpdir, dataset)\n\n # Generate the archive files\n for usage in ['train', 'test']:\n \n for dstype in ['images', 'labels']:\n \n dataset_type = usage + '.' + dstype\n \n mnist_dataset = 'datasets.mnist.' + dataset_type\n filepath = get_setting(mnist_dataset)\n\n test_dataset = dataset + '.' + dataset_type\n generate_test_dataset_archive(filepath, test_dataset)", "def setUp(self):\n MainTests.setUp(self)", "def test_get_mt_settings(self):\n pass", "def tearDown(self):\n test_utils.delete_test_config()", "def test_empty_settings(monkeypatch) -> None: # noqa: TYP001\n monkeypatch.setattr(django_settings, 'SWAGGER_TESTER', {})\n SwaggerTesterSettings()", "def settings():\n return SettingsMock.instance()", "def setUp(self):\n test_helpers.patch_environ(self)\n\n data = []\n\n strategy1 = data_types.FuzzStrategyProbability()\n strategy1.strategy_name = 'fork,corpus_subset,'\n strategy1.probability = 0.33\n strategy1.engine = 'libFuzzer'\n data.append(strategy1)\n\n strategy2 = data_types.FuzzStrategyProbability()\n strategy2.strategy_name = 'random_max_len,value_profile,'\n strategy2.probability = 0.34\n strategy2.engine = 'libFuzzer'\n data.append(strategy2)\n ndb.put_multi(data)\n\n distribution = fuzz_task.get_strategy_distribution_from_ndb()\n\n environment.set_value('USE_BANDIT_STRATEGY_SELECTION', True)\n environment.set_value('STRATEGY_SELECTION_DISTRIBUTION', distribution)", "def setUp(self):\n\t\tprint(\"\\n-------------------------------------\\nIn Test_Pandas:\", self._testMethodName)\n\t\tself.model = models.resnet18()\n\t\tself.watcher = ww.WeightWatcher(model=self.model, log_level=logging.WARNING)", "def set_test(self):\n self.genes_test = self.__genes.copy()\n self.__fitness_test = self.__fitness", "def setUp(self):\n with open(SRC_PATH + \"configs/etl_config.json\", \"r\") as f:\n self.config = json.loads(f.read())\n self.spark = SparkBuilder(\"test\").build_sc()\n self.test_data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../tests/test_data/')", "def setUp(self):\n self.test_max_size = 10", "def reset_settings():\n settings = Settings()\n settings.reset()\n settings.save()", "def setUp(self):\n test_env_setup()", "def test_change_default_throttling_settings_http_with_overwrite_throttled():", "def setUp_extra(self):\n #todo: is this ugly? At least there is explicit assignment of vars.\n # How to do this better? \n [self.testproject,\n self.root,\n self.projectadmin,\n self.participant,\n self.registered_user] = self._create_dummy_project(\"view-test\")", "def setUp(self):\n super().setUp()\n current_app.config[\"DOMAIN_ANALYZER_WATCHED_DOMAINS\"] = [\"foobar.com\"]\n current_app.config[\"DOMAIN_ANALYZER_WATCHED_DOMAINS_THRESHOLD\"] = 10\n current_app.config[\"DOMAIN_ANALYZER_WATCHED_DOMAINS_SCORE_THRESHOLD\"] = 0.75\n current_app.config[\"DOMAIN_ANALYZER_WHITELISTED_DOMAINS\"] = [\n \"ytimg.com\",\n \"gstatic.com\",\n \"yimg.com\",\n \"akamaized.net\",\n \"akamaihd.net\",\n \"s-microsoft.com\",\n ]", "def setUp(self):\n self.env = EnvironmentStub(default_data=True,\n enable=['ticket-field-config.*'])\n\n # this is the default data that is in the test Trac database\n self.default = {\n 'priority':['blocker', 'critical', 'major', 'minor', 'trivial'],\n 'severity':[],\n 'resolution': ['fixed','invalid','wontfix','duplicate','worksforme'],\n 'ticket_type':['defect', 'enhancement', 'task'],\n 'component':['component1', 'component2'],\n }\n\n # this is the new data we plan to put in configuration\n self.new = {\n 'priority': ['P1','P2','P3'],\n 'severity': ['High','Medium','Low'],\n 'resolution': ['fixed','wontfix','invalid','duplicate','worksforme'],\n 'ticket_type': ['Bug','Release','Project'],\n 'component': ['new/blog','new/site','old/blog','old/site'],\n }", "def setUpTestData(cls):\n call_command('loaddata', 'db.json', verbosity=0)", "def setup_settings():\n settings = DEFAULT_SETTINGS\n if os.environ.get(\"MUTALYZER_SETTINGS\"):\n configuration_path = os.environ[\"MUTALYZER_SETTINGS\"]\n with open(configuration_path) as f:\n configuration_content = \"[config]\\n\" + f.read()\n loaded_settings = configparser.ConfigParser()\n loaded_settings.optionxform = str\n loaded_settings.read_string(configuration_content)\n loaded_settings = {\n sect: dict(loaded_settings.items(sect))\n for sect in loaded_settings.sections()\n }[\"config\"]\n for k in loaded_settings:\n if loaded_settings[k] in {\"yes\", \"true\", \"1\"}:\n loaded_settings[k] = True\n elif loaded_settings[k] in {\"no\", \"false\", \"0\"}:\n loaded_settings[k] = False\n elif loaded_settings[k].isnumeric():\n loaded_settings[k] = int(loaded_settings[k])\n settings.update(loaded_settings)\n\n return settings", "def tearDownConfig(self):\n print time.ctime(), 'enter tearDownConfig'\n\n self.site1 = self.globalCfg['site1']\n self.site2 = self.globalCfg['site2']\n self.site3 = self.globalCfg['site3']\n\n self.site1.databaseLandscapeInfo()\n self.site2.databaseLandscapeInfo()\n self.site3.databaseLandscapeInfo()\n self.site1.systemReplicationStatus()\n\n if self.globalCfg['sync_mode'] == 'sync' and self.site1.fullSync:\n try:\n self.site1.srDisableFullSync(self.site1.getHost(\"WORKER1\"))\n self.site1.fullSync = False\n except Exception, e:\n print 'disable full_sync in tearDownConfig failed: %s' % e\n\n for h in range(1, self.site1.getHostNo()):\n self.site1.setConfigParameter(h, \"daemon.ini\", \"ConfigMgrPy.HOST\", \"indexserver.c\", \"instanceids\", None)\n for h in range(1, self.site2.getHostNo()):\n self.site2.setConfigParameter(h, \"daemon.ini\", \"ConfigMgrPy.HOST\", \"indexserver.c\", \"instanceids\", None)\n for h in range(1, self.site3.getHostNo()):\n self.site3.setConfigParameter(h, \"daemon.ini\", \"ConfigMgrPy.HOST\", \"indexserver.c\", \"instanceids\", None)\n\n self.site1.resetStatXSToMaster(self.globalCfg['multiDB'])\n self.site2.resetStatXSToMaster(self.globalCfg['multiDB'])\n self.site3.resetStatXSToMaster(self.globalCfg['multiDB'])\n\n self.site1.setTraceLevel(self.site1.getHost(\"WORKER1\"), \"global.ini\", \"sr_nameserver\", None)\n self.site1.setTraceLevel(self.site1.getHost(\"WORKER1\"), \"global.ini\", \"sr_dataaccess\", None)\n self.site1.setTraceLevel(self.site1.getHost(\"WORKER1\"), \"global.ini\", \"sr_log_retention\", None)\n self.site1.setTraceLevel(self.site1.getHost(\"WORKER1\"), \"global.ini\", \"pitrestart\", None)\n self.site1.setTraceLevel(self.site1.getHost(\"WORKER1\"), \"global.ini\", \"warm_upper\", None)\n self.site1.setTraceLevel(self.site1.getHost(\"WORKER1\"), \"global.ini\", \"sr_spcoordinator\", None)\n self.site1.setTraceLevel(self.site1.getHost(\"WORKER1\"), \"global.ini\", \"persistencelayer\", None)\n self.site1.setTraceLevel(self.site1.getHost(\"WORKER1\"), \"nameserver.ini\", \"nameserver\", None)\n\n self.site2.setTraceLevel(self.site2.getHost(\"WORKER1\"), \"global.ini\", \"sr_nameserver\", None)\n self.site2.setTraceLevel(self.site2.getHost(\"WORKER1\"), \"global.ini\", \"sr_dataaccess\", None)\n self.site2.setTraceLevel(self.site2.getHost(\"WORKER1\"), \"global.ini\", \"sr_log_retention\", None)\n self.site2.setTraceLevel(self.site2.getHost(\"WORKER1\"), \"global.ini\", \"pitrestart\", None)\n self.site2.setTraceLevel(self.site2.getHost(\"WORKER1\"), \"global.ini\", \"warm_upper\", None)\n self.site2.setTraceLevel(self.site2.getHost(\"WORKER1\"), \"global.ini\", \"sr_spcoordinator\", None)\n self.site2.setTraceLevel(self.site2.getHost(\"WORKER1\"), \"global.ini\", \"persistencelayer\", None)\n self.site2.setTraceLevel(self.site2.getHost(\"WORKER1\"), \"nameserver.ini\", \"nameserver\", None)\n\n self.site3.setTraceLevel(self.site3.getHost(\"WORKER1\"), \"global.ini\", \"sr_nameserver\", None)\n self.site3.setTraceLevel(self.site3.getHost(\"WORKER1\"), \"global.ini\", \"sr_dataaccess\", None)\n self.site3.setTraceLevel(self.site3.getHost(\"WORKER1\"), \"global.ini\", \"sr_log_retention\", None)\n self.site3.setTraceLevel(self.site3.getHost(\"WORKER1\"), \"global.ini\", \"pitrestart\", None)\n self.site3.setTraceLevel(self.site3.getHost(\"WORKER1\"), \"global.ini\", \"warm_upper\", None)\n self.site3.setTraceLevel(self.site3.getHost(\"WORKER1\"), \"global.ini\", \"sr_spcoordinator\", None)\n self.site3.setTraceLevel(self.site3.getHost(\"WORKER1\"), \"global.ini\", \"persistencelayer\", None)\n self.site3.setTraceLevel(self.site3.getHost(\"WORKER1\"), \"nameserver.ini\", \"nameserver\", None)\n\n # for normal tear down(unregister/disable), the steps should be in order\n # the primary cannot be disabled if there's secondary attached\n # so there's no need to use multi-thread\n # executing here means the landscape has been resorded to site1--(sync/syncmem)--site2--(async)--site3\n #pdb.set_trace()\n self.site3.tearDown()\n self.site2.tearDown()\n self.site1.tearDown()", "def ignore_test_load_and_persist_without_train(self):\n test_config = \"tests/data/test_config/test_config.json\"\n config = AnnotatorConfig(test_config)\n\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n # create tmp train set\n tmp_path = create_tmp_test_jsonfile(\"tmp.json\")\n train_data = load_local_data(tmp_path)\n # rm tmp train set\n rm_tmp_file(\"tmp.json\")\n\n # interpreter = trainer.train(train_data)\n # test persist and load\n persisted_path = trainer.persist(config['path'],\n config['project'],\n config['fixed_model_name'])\n\n interpreter_loaded = Interpreter.load(persisted_path, config)\n assert interpreter_loaded.pipeline\n assert interpreter_loaded.parse(\"hello\") is not None\n assert interpreter_loaded.parse(\"Hello today is Monday, again!\") is not None\n # remove tmp models\n shutil.rmtree(config['path'], ignore_errors=False)", "def setUp(self):\n self.setup_start_servers = False\n super(ZeroConfigTest, self).setUp()", "def test_Defaults(self):\n self._run(self._test_scenarios, \"Defaults\")", "def setUp(self):\n global access_token\n global accountID\n global account_cur\n global api\n # self.maxDiff = None\n try:\n accountID, account_cur, access_token = unittestsetup.auth()\n setattr(sys.modules[\"oandapyV20.oandapyV20\"],\n \"TRADING_ENVIRONMENTS\",\n {\"practice\": {\n \"stream\": \"https://test.com\",\n \"api\": \"https://test.com\",\n }})\n api = API(environment=environment,\n access_token=access_token,\n headers={\"Content-Type\": \"application/json\"})\n api.api_url = 'https://test.com'\n except Exception as e:\n print(\"%s\" % e)\n exit(0)", "def setUp(self):\n self.design = None", "def setUp(self):\n super(LegacyResultsProcessorUnittest, self).setUp()\n if six.PY2:\n self.data_directory = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), 'testdata')\n else:\n self.data_directory = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), 'testdata', 'python3')", "def setUpClass(cls):\n super(SettingsTests, cls).setUpClass()\n # TODO: refactor into a pytest fixture\n\n with create_app().app_context():\n # get the submission test user\n sess = GlobalDB.db().session\n cls.session = sess\n\n cgac = CGAC(cgac_code='097')\n rule = RuleSql(rule_sql_id=1, rule_sql='', rule_label='FABS1', rule_error_message='', query_name='',\n file_id=1, rule_severity_id=2, rule_cross_file_flag=False)\n sess.add_all([cgac, rule])\n sess.commit()\n default_setting = RuleSetting(agency_code='097', rule_label=rule.rule_label, file_id=rule.file_id,\n target_file_id=rule.target_file_id, priority=1, impact_id=1)\n sess.add(default_setting)\n sess.commit()", "def setUp(self):\n\n BaseTest.setUp(self)", "def refresh_test_dataset(self):\n inputs_id, inputs_starts, inputs_paths, inputs_ends, inputs_label = self.build_data(self.reader, self.test_items, self.option.max_path_length)\n self.test_dataset = CodeDataset(inputs_id, inputs_starts, inputs_paths, inputs_ends, inputs_label)", "def test_defaults():\n model = torch.nn.Module()\n dataset = torch.utils.data.Dataset()\n dataloader = torch.utils.data.DataLoader(dataset)\n loaders = OrderedDict()\n loaders[\"train\"] = dataloader\n\n test_callbacks = OrderedDict(\n [\n (\"_timer\", TimerCallback),\n (\"_metrics\", MetricManagerCallback),\n (\"_validation\", ValidationManagerCallback),\n (\"_saver\", CheckpointCallback),\n (\"_console\", ConsoleLogger),\n (\"_tensorboard\", TensorboardLogger),\n (\"_exception\", ExceptionCallback),\n ]\n )\n\n exp = SupervisedExperiment(model=model, loaders=loaders)\n _test_callbacks(test_callbacks, exp)", "def setUp(self) -> None:\n self.s3 = boto3.client('s3')\n\n try:\n self.prod_env = os.environ['TEST_ENV'] == \"prod\"\n except KeyError:\n self.prod_env = True", "def autogen_dataset_with_test():\n return TabularDataset.autogen('tests/data/dummy_tabular/train.csv',\n test_path='tests/data/dummy_tabular_test/test.csv',\n seed=42,\n sep=',')", "def get_test_config() -> Config:\n # overwrite some settings for unit tests\n args = dict(\n datapath=os.path.abspath(os.path.join(os.path.dirname(__file__), 'testdata')),\n debug=True\n )\n return Config(**args)", "def test(self):\n self.training = False", "def test_settingmodel_init():\n SettingsModel()", "def setUp(self):\n # create temporary directory\n if not usedir:\n self.test_dir = tempfile.mkdtemp()\n os.chdir(self.test_dir)\n else:\n os.chdir(usedir) \n\n super(SimpleTest, self).setUp()\n\n import SFramework\n self.manager = SFramework.TSStatisticsManager()\n self.manager.getWorkspaces().addObject(self.makeWS())" ]
[ "0.69389164", "0.6621101", "0.6452525", "0.64237857", "0.64121604", "0.6407746", "0.6387266", "0.6371625", "0.6295202", "0.6287137", "0.6241228", "0.6236511", "0.62239265", "0.6192211", "0.6178279", "0.61352205", "0.6134868", "0.6040336", "0.6021929", "0.6021816", "0.6011449", "0.6010427", "0.6010105", "0.6002002", "0.5988062", "0.5981944", "0.59710366", "0.5961021", "0.59479684", "0.5930659", "0.59288794", "0.5921435", "0.5921435", "0.5910502", "0.5907605", "0.5896178", "0.58906156", "0.58890253", "0.58571005", "0.5846073", "0.58348405", "0.58306587", "0.5819421", "0.5819421", "0.5819421", "0.5819421", "0.5808571", "0.58039725", "0.58039725", "0.5792783", "0.57899463", "0.5789425", "0.57835346", "0.5778677", "0.5770828", "0.57641137", "0.57636285", "0.5761736", "0.57598424", "0.57528013", "0.5750042", "0.57472646", "0.5746263", "0.5736428", "0.5728747", "0.57287455", "0.5727402", "0.57258254", "0.57221395", "0.5711878", "0.5710515", "0.57069355", "0.56952626", "0.5694967", "0.5691731", "0.5686592", "0.5684169", "0.56760126", "0.56712365", "0.5662126", "0.5660757", "0.56568456", "0.56556165", "0.56536525", "0.56421316", "0.5638639", "0.5633671", "0.56267774", "0.562627", "0.5624733", "0.5621062", "0.5619578", "0.5611675", "0.56111836", "0.56103593", "0.56072116", "0.5605381", "0.5597976", "0.5596493", "0.5596432" ]
0.6982843
0
Generate archive files for the given test dataset in tmpdir
def generate_test_dataset_archive(filepath, dataset): # 'file:///some/path' to '/some/path' if filepath[:7] == 'file://': filepath = filepath[7:] # Check if the dataset exists. # When not been generate it. if not os.path.isfile(filepath): print("Generating", filepath) data = get_test_dataset(dataset) ensure_dir(os.path.dirname(filepath)) idxgz.save(filepath, data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_test_environment(tmpdir, dataset):\n\n # Overwrite settings with test settings\n generate_test_settings(tmpdir, dataset)\n\n # Generate the archive files\n for usage in ['train', 'test']:\n \n for dstype in ['images', 'labels']:\n \n dataset_type = usage + '.' + dstype\n \n mnist_dataset = 'datasets.mnist.' + dataset_type\n filepath = get_setting(mnist_dataset)\n\n test_dataset = dataset + '.' + dataset_type\n generate_test_dataset_archive(filepath, test_dataset)", "def test_generate_test_environment(dataset):\n\n print(\"## =========================================================\")\n print(\"## Dataset:\", dataset)\n print(\"## ---------------------------------------------------------\")\n print(\"\")\n\n tmpdir = \"/tmp/collagen\"\n\n generate_test_environment(tmpdir, dataset)\n\n # Generate the archive files\n for usage in ['train', 'test']:\n for dstype in ['images', 'labels']:\n \n dataset_type = usage + '.' + dstype\n \n mnist_dataset = 'datasets.mnist.' + dataset_type\n filepath = get_setting(mnist_dataset)\n \n # 'file:///some/path' to '/some/path'\n if filepath[:7] == 'file://':\n filepath = filepath[7:]\n\n # Unpack\n print(\"\")\n print(\"{}: {}\".format(mnist_dataset, filepath))\n print(\"\")\n data = idxgz.load(filepath)\n print(\"data:\", data)\n print(\"type:\", type(data))\n print(\"dtype:\", data.dtype)\n print(\"shape:\", data.shape)\n\n print(\"\")", "def test_dir(tmpdir):\n directory = tmpdir.mkdir('test_dir')\n for i in range(5):\n file_path = directory / 'test_{}.txt'.format(i)\n file_path.write_binary(b\"This is some test data!\")\n return directory", "def test_create_files(self):\n\n testdir = \"test_output\"\n test_submission = Submission()\n self.addCleanup(os.remove, \"submission.tar.gz\")\n self.addCleanup(shutil.rmtree, testdir)\n\n test_submission.create_files(testdir)\n\n self.doCleanups()", "def main(args):\n\n for dir in args.dirs:\n # prepdir = mdssprep.Directory(dir,exclude=['file_*3*','file_2??'],include=['file_*5*'],maxarchivesize=mdssprep.one_meg*200.,minsize=mdssprep.one_meg*100.)\n prepdir = mdssprep.Directory(dir)\n prepdir.archive(dryrun=False)", "def test_archive_run(self):\n pass", "def archive_test_logs(days, archive_path, all_logs):\n for day in days.keys():\n daydir = datetime.strptime(day, \"%Y%m%d\").strftime(\"%m-%d-%Y\")\n for scenario in days[day].keys():\n # temporary log directories are stored by scenario + date\n datename = scenario + \"-\" + datetime.strptime(day, \"%Y%m%d\").strftime(\"%Y-%m-%d\")\n if datename not in all_logs:\n raise RuntimeError(f\"Missing all_log entry for {datename}\")\n\n if not os.path.exists(all_logs[datename].name):\n raise RuntimeError(f\"Missing log directory for {datename}\")\n\n tmpdir = all_logs[datename].name\n failed = days[day][scenario][\"failed-tests\"]\n flakes = days[day][scenario][\"flaky-tests\"]\n\n scenario_archive = os.path.join(archive_path, daydir, scenario)\n os.makedirs(os.path.join(scenario_archive, \"failed\"))\n os.makedirs(os.path.join(scenario_archive, \"flakes\"))\n # data is organized by test names as keys with lists of tests\n for name in failed:\n i = 1\n for t in sorted(failed[name], key=lambda x: x[\"start_time\"]):\n try:\n logdir = kstest_logdir(tmpdir, t)\n if not os.path.exists(logdir):\n raise RuntimeError(f\"Missing logdir - {logdir}\")\n except RuntimeError:\n continue\n dst = os.path.join(scenario_archive, \"failed\", name, str(i))\n shutil.copytree(logdir, dst)\n i += 1\n\n for name in flakes:\n i = 1\n for t in sorted(flakes[name], key=lambda x: x[\"start_time\"]):\n try:\n logdir = kstest_logdir(tmpdir, t)\n if not logdir or not os.path.exists(logdir):\n raise RuntimeError(f\"Missing logdir - {logdir}\")\n except RuntimeError:\n continue\n dst = os.path.join(scenario_archive, \"flakes\", name, str(i))\n shutil.copytree(logdir, dst)\n i += 1", "def _archive(self, name, contents, isolate_content):\n # Shared code for all test_isolated_* test cases.\n root = os.path.join(self.tmpdir, name)\n # Refuse reusing the same task name twice, it makes the whole test suite\n # more manageable.\n self.assertFalse(os.path.isdir(root), root)\n os.mkdir(root)\n isolate_path = os.path.join(root, 'i.isolate')\n with open(isolate_path, 'wb') as f:\n f.write(isolate_content)\n for relpath, content in contents.items():\n p = os.path.join(root, relpath)\n d = os.path.dirname(p)\n if not os.path.isdir(d):\n os.makedirs(d)\n with open(p, 'wb') as f:\n f.write(content)\n return self.client.isolate(isolate_path)", "def zip_data_file(task_id, task_name, data_path):\n zip_file_dir = os.path.join(FILE_PATH, task_id + \".zip\")\n file = zipfile.ZipFile(zip_file_dir, \"w\", zipfile.ZIP_DEFLATED)\n sample_path = os.path.join(data_path, \"datasets\", str(task_id) + \"_\" + task_name + \".csv\")\n true_dag_path = os.path.join(data_path, \"true\", str(task_id) + \"_\" + task_name + \".npz\")\n file.write(sample_path)\n file.write(true_dag_path)\n file.close()\n return zip_file_dir", "def download_test_files(request):\n\n # Log the start of the function\n logger.info(\"=========== returns ms1 test files from code directory input/ms1\")\n\n # create an absolute path to the 'example_data_dir' containing the test data files, then create\n # absolute paths to each test data file. Note the test data files are located in this code base.\n example_data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','input/ms1')\n pos_input = os.path.join(example_data_dir, example_pos_filename)\n neg_input = os.path.join(example_data_dir, example_neg_filename)\n tracer_file = os.path.join(example_data_dir, example_tracer_filename)\n run_sequence_pos_file = os.path.join(example_data_dir, example_run_sequence_pos_filename)\n run_sequence_neg_file = os.path.join(example_data_dir, example_run_sequence_neg_filename)\n\n # create filenames\n filename1 = 'ms1_pos_input_test_data.csv'\n filename2 = 'ms1_neg_input_test_data.csv'\n filename3 = 'ms1_tracer_test_data.csv'\n filename4 = 'ms1_run_sequence_pos_test_data.csv'\n filename5 = 'ms1_run_sequence_neg_test_data.csv'\n\n # List of files to be zipped\n files_to_zip = {filename1: pos_input, filename2: neg_input, filename3: tracer_file, filename4: run_sequence_pos_file, filename5: run_sequence_neg_file}\n\n # Create an in-memory zip file\n in_memory_zip = BytesIO()\n with ZipFile(in_memory_zip, 'w', ZIP_DEFLATED) as zipf:\n # Add each file to the zipfile\n for filename in files_to_zip:\n logger.info('filename: {}'.format(filename))\n file_path = files_to_zip[filename]\n with open(file_path, 'rb') as file:\n file_content = file.read()\n zipf.writestr(filename, file_content)\n # The ZipFile object is automatically closed when exiting the 'with' block\n\n zip_filename = \"ms1_test_data_files.zip\"\n # Create an HTTP response with the zip file attached for download\n response = HttpResponse(in_memory_zip.getvalue(),content_type='application/zip')\n response['Content-Disposition'] = 'attachment; filename=' + zip_filename\n response['Content-length'] = in_memory_zip.tell()\n\n # Return the HTTP response\n return response", "def MakeDataSetFiles(dirname):\n\n\n if not os.path.exists(dirname):\n os.mkdir(dirname)\n if not os.path.exists(os.path.join(dirname, 'train')):\n os.mkdir(os.path.join(dirname, 'train'))\n if not os.path.exists(os.path.join(dirname, 'test')):\n os.mkdir(os.path.join(dirname, 'test'))\n data_train = fetch_20newsgroups(subset='train', categories=None, shuffle=True, random_state=42)\n data_test = fetch_20newsgroups(subset='test', categories=None, shuffle=True, random_state=42)\n\n if dirname[-1] == '/' or dirname[-1] == '\\\\':\n dirname = dirname[:-1]\n \n Util.WriteClassFile(data_train.target, os.path.join(dirname, 'train_classes.txt'))\n Util.WriteClassFile(data_test.target,os.path.join(dirname, 'test_classes.txt'))\n\n\n train_counter = 0;\n for doc in data_train.data:\n filename = 'train_' + str(train_counter).zfill(5);\n f = file(os.path.join(dirname, 'train', filename), 'w');\n f.write(doc.encode('ascii', 'ignore'));\n f.close();\n train_counter = train_counter + 1;\n\n test_counter = 0;\n for doc in data_test.data:\n filename = 'test_' + str(test_counter).zfill(5);\n f = file(os.path.join(dirname, 'test', filename), 'w');\n f.write(doc.encode('ascii', 'ignore'));\n f.close();\n test_counter = test_counter + 1;\n\n class_index = file(os.path.join(dirname, 'class_label_index.txt'), 'w')\n for label in data_train.target_names:\n class_index.write(label + '\\n')\n class_index.close()", "def fixture_out_dir(tmpdir_factory) -> Path:\n my_tmpdir = Path(tmpdir_factory.mktemp(\"out\"))\n yield my_tmpdir\n shutil.rmtree(str(my_tmpdir))", "def zip_files():\n zipper = ZipFile(\"Moritz_Bunse_ML_project.zip\", \"w\")\n files_to_write = [\"poi_id.py\",\n \"my_classifier.pkl\",\n \"my_dataset.pkl\",\n \"my_feature_list.pkl\",\n \"tester.py\",\n \"Look+At+Enron+data+set.html\",\n \"Look At Enron data set.ipynb\",\n \"data_dict.pkl\",\n \"final_project_dataset.pkl\",\n \"img/Flow chart feature selection.png\"\n ]\n for filename in files_to_write:\n zipper.write(filename)\n\n zipper.close()", "def populated_archivist_dataset(archivist_dataset, tmp_path_factory):\n wpath = tmp_path_factory.mktemp(\"archivistds\")\n\n ads = archivist_dataset\n\n dscontent = (\n ('azip/file1.txt', 'zipfile1'),\n ('azip/file2.csv', 'zipfile2_muchcontent'),\n ('atar/file1.txt', 'tarfile1'),\n ('atar/file2.csv', 'tarfile2_muchcontent'),\n )\n srcds = Dataset(wpath / 'srcds').create(**nonoise)\n for fpath, fcontent in dscontent:\n fpath = srcds.pathobj / (PurePosixPath(fpath))\n fpath.parent.mkdir(parents=True, exist_ok=True)\n fpath.write_text(fcontent)\n srcds.save(**nonoise)\n\n archive_root = wpath / 'myarchive'\n #archivetype = 'zip'\n\n akeys = {}\n\n # no ZIP just yet\n # for archivetype, ext in (('zip', ''), ('tar', '.gz')):\n for archivetype, ext in (('tar', '.gz'), ):\n archive_path = Path(f\"{archive_root}.{archivetype}{ext}\")\n\n archive_path_inds = ads.pathobj / '.archives' / archive_path.name\n # create an archive, the easy way, by simply exporting the\n # entire dataset worktree\n srcds.export_archive(archive_root, archivetype=archivetype,\n **nonoise)\n assert archive_path.exists()\n\n # add the archive (in a hidden dir) to be able to reference\n # it via a key\n aurl = archive_path.as_uri()\n ads.repo.call_annex([\n 'addurl', '--file', str(archive_path_inds), aurl])\n ads.save(**nonoise)\n # get the key of the archive\n akeys[archivetype] = ads.status(\n archive_path_inds, annex='basic', return_type='item-or-list',\n **nonoise)['key']\n return ads, akeys, archive_root, dscontent", "def sample_input_dir():\n tmpdir = tempfile.mkdtemp()\n input_zip = os.path.join(ASSETS_DIR, 'input_dir.zip')\n with zipfile.ZipFile(input_zip, \"r\") as zip_ref:\n zip_ref.extractall(tmpdir)\n yield tmpdir\n shutil.rmtree(tmpdir)", "def archive_experiment(experiment_dir: str,\n dst_dir: str,\n save_extensions: Union[str, Sequence[str]]='py',\n exclude_dirs: Union[str, Sequence[str]]='output',\n archive_format: str='zip',\n base_name: Optional[str]=None):\n # Format save_extensions for consistency\n # Make into a sequence\n if isinstance(save_extensions, str):\n save_extensions = [save_extensions]\n # Drop any .'s\n save_extensions = [s.strip('.') for s in save_extensions]\n # Format exclude_dirs for consistency\n if isinstance(exclude_dirs, str):\n exclude_dirs = [exclude_dirs]\n # Get default base name\n if base_name is None:\n experiment_path = os.path.abspath(experiment_dir)\n base_name = [p for p in experiment_path.split('/') if p][-1]\n\n # Full name of the archive name uses a time stamp\n timestamp = time.strftime('%b%d%Y_%H%M%S')\n archive_name = f'{base_name}_{timestamp}'\n\n # Use a temporary folder to create the archive\n tmp_folder = f'/tmp/{str(uuid.uuid4())}'\n if os.path.exists(tmp_folder):\n shutil.rmtree(tmp_folder)\n os.makedirs(tmp_folder)\n tmp_experiment = os.path.join(tmp_folder, archive_name)\n os.makedirs(tmp_experiment)\n\n # Recurse through the experiment directory and non-'output' subdirectories,\n # saving files to the temporary folder\n dirs_to_check = [experiment_dir]\n while len(dirs_to_check) > 0:\n # A directory to check (DTC), relative to the experiment_dir\n dtc = dirs_to_check.pop(0)\n # Full path to the DTC\n full_dtc = dtc if dtc == experiment_dir \\\n else os.path.join(experiment_dir, dtc)\n # List of all files and folders in the DTC\n dlist = os.listdir(full_dtc)\n # List of all files in the DTC\n files = [d for d in dlist\n if os.path.isfile(os.path.join(full_dtc, d))]\n # Check each file to see if it should be archived.\n for f in files:\n if f.split('.')[-1] in save_extensions:\n # Recreate the file structure inside experiment_dir, up to\n # the folder containing f\n tmp_save_dir = tmp_experiment if dtc == experiment_dir \\\n else os.path.join(tmp_experiment, dtc)\n os.makedirs(tmp_save_dir, exist_ok=True)\n # Save a copy of f\n shutil.copy2(os.path.join(full_dtc, f), tmp_save_dir)\n\n # Get non-excluded subdirectories\n subdirs = [d for d in dlist\n if os.path.isdir(os.path.join(full_dtc, d))\n and d not in exclude_dirs]\n # Track subdirectories as paths relative to the experiment dir\n if dtc != experiment_dir and len(subdirs) > 0:\n subdirs = [os.path.join(dtc, d) for d in subdirs]\n\n dirs_to_check += subdirs\n\n # At this point, all archivable files and folders are saved in tmp_folder.\n # Create an archive, coincidentally the same name as tmp_experiment's path\n tmp_archive = tmp_experiment[:]\n shutil.make_archive(tmp_archive, archive_format, tmp_folder, archive_name)\n # Get the full name of the archive. There should only be one file in\n # tmp_experiment\n tmp_archive_full = [f for f in os.listdir(tmp_folder)\n if os.path.isfile(os.path.join(tmp_folder, f))][0]\n # Copy the archive to its destination\n os.makedirs(dst_dir, exist_ok=True)\n shutil.move(os.path.join(tmp_folder, tmp_archive_full),\n os.path.join(dst_dir, tmp_archive_full),\n copy_function=shutil.copyfile)\n # Remove the temporary folder\n shutil.rmtree(tmp_folder)\n\n pass", "def write_output_files(input_path, output_path, out_data, random = False):\n create_directory_structure(output_path)\n for city in cities:\n # set relevant list\n data_dir = os.path.join(input_path, city, city+'_test')\n sub_files = list_filenames(data_dir)\n for f in sub_files:\n # load data\n outfile = os.path.join(output_path, city, city+'_test',f)\n if random:\n out = np.random.randint(256, size=(5,3,495,436,3), dtype = np.dtype(np.uint8))\n else:\n out = out_data\n write_data(out, outfile)\n print(\"just wrote file {}\".format(outfile))", "def compress_skim_dir(directory, output=\"zarr\"):\n\n if output not in (\"zarr\", \"zarr.zip\"):\n raise NotImplementedError(output)\n\n if output == \"zarr\":\n if not os.path.exists(directory+\".zarr\"):\n os.makedirs(directory+\".zarr\")\n elif output == \"zarr.zip\":\n if os.path.exists(directory+\".zarr.zip\"):\n raise FileExistsError(directory+\".zarr.zip\")\n\n master = {}\n for f in os.walk(directory):\n for fi in f[2]:\n if \".emx\" in fi:\n arr = np.fromfile(fi, dtype='f4')\n side = int(np.sqrt(arr.size))\n arr = arr.reshape(side, side)\n tazrange = pd.RangeIndex(1, side+1)\n master[fi.replace(\".emx\", \"\")] = xr.DataArray(\n arr,\n dims=['otaz', 'dtaz'],\n coords={'otaz': tazrange, 'dtaz': tazrange}\n )\n\n master = sh.Dataset(master)\n\n if output == \"zarr\":\n master.to_zarr(directory+\".zarr\", mode='a')\n elif output == \"zarr.zip\":\n with zarr.ZipStore(directory+\".zarr.zip\", mode='w') as store:\n master.to_zarr(store)\n return master", "def create_temp_archive(case_dict):\n # ---------------------------------------------------------------------\n archive_temp_dir = \"{0}/archive_temp_dir\".format(case_dict[\"workdir\"])\n logger.debug(\"create_temp_archive %s\", archive_temp_dir)\n\n if not os.path.exists(archive_temp_dir):\n os.makedirs(archive_temp_dir)\n else:\n logger.info(\n \"ERROR archive_metadata archive_temp_dir already exists. exiting...\"\n )\n sys.exit(1)\n\n return archive_temp_dir", "def generateDataset(self):\n if self.outdir[-1] != \"/\": \n self.outdir += \"/\"\n self.outdir += \"dataset_trackml\"\n i = 1\n while os.path.exists(self.outdir):\n self.outdir.replace(\"_\"+str(i-1), \"\")\n self.outdir += (\"_\"+str(i))\n i += 1\n cmd = \"mkdir -p \"+ self.outdir\n os.system(cmd)\n\n cont = pc.particleController()\n cont.generateEvents(self.numevents, self.hpe, self.detectors)\n\n self.generateHits(cont)\n self.generateTruths(cont)\n self.generateSolution(cont)", "def sample_series_dirs():\n tmp_dir = tempfile.mkdtemp()\n # Extract Series\n os.mkdir(os.path.join(tmp_dir, \"series_dir\"))\n series_dir_series = os.path.join(tmp_dir, \"series_dir\")\n series_zip = os.path.join(ASSETS_DIR, 'series_dir_series.zip')\n with zipfile.ZipFile(series_zip, \"r\") as zip_ref:\n zip_ref.extractall(series_dir_series)\n # Extract Animes\n os.mkdir(os.path.join(tmp_dir, \"anime_dir\"))\n series_dir_anime = os.path.join(tmp_dir, \"anime_dir\")\n anime_zip = os.path.join(ASSETS_DIR, 'series_dir_anime.zip')\n with zipfile.ZipFile(anime_zip, \"r\") as zip_ref:\n zip_ref.extractall(series_dir_anime)\n\n yield [series_dir_series, series_dir_anime]\n shutil.rmtree(tmp_dir)", "def test_create_daily_archives_non_daily_operator_files(self, *args):\n start_date = DateHelper().this_month_start\n\n file_path = \"path\"\n\n context = {\"version\": \"1\"}\n expected = [file_path]\n result = create_daily_archives(\n 1, \"10001\", self.ocp_provider_uuid, \"file\", \"path\", self.ocp_manifest_id, start_date, context=context\n )\n self.assertEqual(result, expected)", "def _test_path(self, request, artifact_dir):\n self.test_path = artifact_dir / request.module.__name__ / request.node.name\n self.test_path.mkdir(parents=True, exist_ok=True)\n self.export_path = self.test_path / \"sample_processed.nii.gz\"", "def _generate_examples(self, archive):\n\n for fname, fobj in archive:\n image_dir, image_file = os.path.split(fname)\n d = os.path.basename(image_dir)\n record = {'image': fobj, 'label': d}\n yield \"%s/%s\" % (image_file, d), record", "def test_RandomDatasetGenerator_SampleZip(temp_dir: pathlib.Path):\n generator = make_dataset.RandomDatasetGenerator(\n start_time_seconds_since_epoch=time.mktime(\n time.strptime(\"1/1/2018\", \"%m/%d/%Y\")\n ),\n locations=[\"My House\", \"The Office\", \"A Restaurant\",],\n names=[\n \"Work\",\n \"Home\",\n \"Sleep\",\n \"Fun\",\n \"Commute to work\",\n \"Commute to home\",\n ],\n )\n\n generator.SampleZip(temp_dir / \"LC_export.zip\", 100)\n\n with zipfile.ZipFile(temp_dir / \"LC_export.zip\") as z:\n with z.open(\"LC_export.csv\") as f:\n # Read and decode the compressed CSV into a string.\n string = f.read().decode(\"utf-8\")\n reader = csv.reader(string.split(\"\\n\"))\n rows = [row for row in reader]\n\n # One line for the header.\n assert len(rows) == 103\n\n # All lines except the second and last have eight columns.\n assert len(rows[0]) == 8\n for row in rows[2:-1]:\n assert len(row) == 8", "def _clean_up_temporary_files(dataset_dir):\n return", "def compress_wrapper(args: Namespace) -> None:\n directory_path = os.path.join(DATASETS_DIR, args.directory)\n compress_datasets(directory_path, args.holdout)", "def generate_all_files():\n for (name, fn) in lang_module.targets.items():\n path = of_g.options.install_dir + '/' + name\n os.system(\"mkdir -p %s\" % os.path.dirname(path))\n with open(path, \"w\") as outfile:\n fn(outfile, os.path.basename(name))\n print(\"Wrote contents for \" + name)", "def generate_archive_file(location, paths, environment=None, compression=None, archive_format=None):\n if archive_format == 'zip':\n archive = ZipTarWrapper(location.name, 'w', zipfile.ZIP_DEFLATED)\n else:\n write_type = \"w\"\n if compression:\n write_type = \"w|{0}\".format(compression)\n archive = tarfile.open(location.name, write_type)\n\n # Add all the things to the archive\n for path_spec in paths:\n path_spec.add_to_tar(archive, environment)\n\n # Finish the zip\n archive.close()\n\n return archive", "def create_test_input_files(input1, input2):\n random.shuffle(input1)\n random.shuffle(input2)\n filename1 = application.join_abs_path(EMPTY_TEST_DIR, 'file-1.gz')\n filename2 = application.join_abs_path(EMPTY_TEST_DIR, 'file-2.gz')\n\n with gzip.open(filename1, 'wb') as file1:\n file1.write('\\n'.join(input1))\n with gzip.open(filename2, 'wb') as file2:\n file2.write('\\n'.join(input2))", "def main():\n if len(sys.argv) < 3:\n message = \"\"\"\n Usage: python generate_dataset.py <dataset_name> <number of files> <size of each file in bytes>\n \"\"\"\n print(message)\n sys.exit(0)\n dataset_name = sys.argv[1]\n file_number = int(sys.argv[2])\n file_size = int(sys.argv[3])\n\n if not os.path.exists(dataset_name):\n os.makedirs(dataset_name)\n\n for i in range(file_number):\n tmp_file = open('./' + dataset_name + '/' + dataset_name + '.file' + str(i), 'w+')\n tmp_file.write(os.urandom(file_size))\n tmp_file.close()", "def datadir(tmpdir, request):\n filename = request.module.__file__\n test_dir, _ = os.path.splitext(filename)\n\n if os.path.isdir(test_dir):\n dir_util.copy_tree(test_dir, str(tmpdir))\n\n return tmpdir", "def tmp_dir(data_dir):\n tmp_dir = os.path.join(data_dir, 'manorm_tmp_output')\n yield tmp_dir\n shutil.rmtree(tmp_dir)", "def create_tar(self):\n with tarfile.open(self.tgzfile, \"w:gz\") as tar_handle:\n for root, _, files in os.walk(self.dirname):\n for file in files:\n tar_handle.add(os.path.join(root, file))", "def test_meta_analysis(self):\n # run a meta-analysis\n ids = ['study1', 'study3']\n ma = meta.MetaAnalysis(self.dataset, ids)\n # save the results\n tempdir = tempfile.mkdtemp()\n ma.save_results(tempdir + os.path.sep, prefix='test')\n from glob import glob\n files = glob(tempdir + os.path.sep + \"test_*.nii.gz\")\n self.assertEquals(len(files), 9)\n shutil.rmtree(tempdir)", "def setup(zip_path, dest_path):\n\n #makes folder for zip files\n make_directory(zip_path)\n\n #makes folder for processed data\n make_directory(dest_path)", "def test_make_final_path_directory(self):\n with DataArchive(self.user, DATA_DOWNLOADS_WORKING_DIR) as archive:\n final_path = archive.make_final_path(directory='test-directory')\n valid_path = os.path.join(archive.data_dir_path, 'test-directory')\n self.assertEqual(final_path, valid_path)", "def create_archive(filelist):\n\t\n\n\ttmp = tempfile.NamedTemporaryFile()\n\t# with tempfile.SpooledTemporaryFile() as tmp:\n\twith zipfile.ZipFile(tmp, 'w', zipfile.ZIP_DEFLATED) as archive:\n\t\tarcname = './docs/'\n\t\tfor x in filelist:\n\t\t\tfilename = os.path.basename(x[1])\n\t\t\t_file = x[0]\n\t\t\t# make sure we're at the start...\n\t\t\t_file.seek(0)\n\t\t\tarchive.write(_file.name, arcname=os.path.join(arcname, filename))\n\n\t# Reset file pointer\n\ttmp.seek(0)\n\n\treturn tmp\n\n\t\t# Write file data to response\n\t\t# return HttpResponse(tmp.read(), content_type='application/x-zip-compressed')", "def zip_output(directory):\n #directory = client_variables.output_zip_folder\n #create the zip archive\n zip = zipfile.ZipFile('outputs.zip', 'w')\n\n # add all files in specified folder\n for name in glob.glob(directory + '\\\\*'):\n zip.write(name, os.path.basename(name), zipfile.ZIP_DEFLATED)\n zip.close()", "def _generate_data(self, codec='deflate'):\n _logger.info('generating fake data')\n (desc, path) = mkstemp()\n os.close(desc)\n os.remove(path)\n try:\n call([\n 'node', osp.join(DPATH, os.pardir, os.pardir, 'scripts', 'random'),\n self.path, str(self.n_records), path\n ])\n yield path\n finally:\n if osp.exists(path):\n os.remove(path)", "def autogen_dataset_dir_with_test():\n return TabularDataset.autogen('tests/data/dummy_tabular',\n test_path='tests/data/dummy_tabular_test',\n seed=42,\n sep=',')", "def _create_files_from_template(\r\n self,\r\n *,\r\n data_dir: str,\r\n is_first_submission: Optional[bool] = None,\r\n ):\r\n\r\n for template_type in TemplateType:\r\n\r\n # Do not re-create the submission_metadata file if it already\r\n # exists for other submission(s) for this benchmark\r\n if (\r\n template_type == TemplateType.METADATA\r\n and is_first_submission is not None\r\n and is_first_submission == False\r\n ):\r\n continue\r\n\r\n template_module = importlib.import_module(\r\n f\"mcs_benchmark_data.cli.template_contexts.{template_type.value}_template\"\r\n )\r\n TemplateDataclass = getattr(\r\n template_module, f\"{template_type.value.capitalize()}Template\"\r\n )\r\n\r\n template_metadata = TemplateDataclass(\r\n benchmark_name=self.benchmark_name, submission_name=self.submission_name\r\n )\r\n\r\n # Update the template for the benchmark/submission provided\r\n template_metadata.execute(root_path=self.root_path, data_dir=data_dir)\r\n\r\n self._logger.info(\r\n \"A %s file has been created at %s\",\r\n template_metadata.__class__.__name__,\r\n self.root_path / template_metadata.dest_file_path_from_root,\r\n )\r\n\r\n if template_type == TemplateType.METADATA:\r\n\r\n self._logger.info(\r\n \"A %s file has been created at %s\",\r\n template_type.value,\r\n self.root_path\r\n / f\"test_{str(template_metadata.dest_file_path_from_root)}\",\r\n )", "def make_temp_file():\n global TEST_DATA_PATH\n TEST_DATA_PATH = tempfile.mkstemp()", "def zip_to_test(prepend_path=\"\"):\n module_path = 'test_data/example_module'\n with tempfile.TemporaryDirectory(prefix='zipload-py-test') as tmp_dir:\n zip_path = os.path.join(tmp_dir, 'test.zip')\n with zipfile.ZipFile(zip_path, 'w') as created_zip:\n for root, _, files in os.walk(module_path):\n for file in files:\n created_zip.write(os.path.join(root, prepend_path, file))\n yield zip_path", "def create_tarballs_from_xml_files_in_folder(xml_dir, download_date=\"2017.11.02\"):\n\n xml_list = glob.glob(os.path.join(xml_dir, \"*.xml\"))\n\n for xml in xml_list:\n xml_renamed = xml[:-4] + \".BLAST.xml\"\n xml_tar_gz = xml[:-4] + \".BLAST.xml.tar.gz\"\n xml_txt = xml[:-4] + \"_details.txt\"\n # xml_txt = xml[:-4] + \".BLAST_details.txt\"\n\n if not os.path.isfile(xml_tar_gz):\n copyfile(xml, xml_renamed)\n acc = os.path.basename(xml).split(\".\")[0]\n sys.stdout.write(\"{}, \".format(acc))\n sys.stdout.flush()\n # create an empty text file with the download date\n date = strftime(\"%Y%m%d\")\n with open(xml_txt, \"w\") as f:\n f.write(\"acc\\t{}\\ndownload_date\\t{}\\ndatabase\\tncbi_nr\\ne_value\\t1\\n\".format(acc, download_date))\n\n with tarfile.open(xml_tar_gz, mode='w:gz') as tar:\n # add the files to the compressed tarfile\n tar.add(xml_renamed, arcname=os.path.basename(xml_renamed))\n tar.add(xml_txt, arcname=os.path.basename(xml_txt))\n\n # delete the original files\n try:\n os.remove(xml_renamed)\n os.remove(xml_txt)\n except:\n sys.stdout.write(\"{} could not be deleted\".format(xml_renamed))", "def _make_tar_gz_file(output_filename, source_dir):\n with tarfile.open(output_filename, \"w:gz\") as tar:\n for f in os.listdir(source_dir):\n tar.add(os.path.join(source_dir, f), arcname=f)", "def _create_zip_file(self, dest, paths):\n with zipfile.ZipFile(dest, 'w') as zip_file:\n for path in paths:\n zip_file.write(path, os.path.basename(path))", "def test_save_directory_check(shipping_group, tmpdir):\n\n outfile = str(tmpdir.mkdir(\"out\"))\n\n with pytest.raises(UsageError):\n shipping_group.save(outfile, 'hdf5')", "def setUp(self):\n self.tmpdir = mkdtemp()", "def zipped_tarball(this_tmp_dir):\n tgz_name = \"%s.tar.gz\" % this_tmp_dir\n\n tar = tarfile.open(tgz_name, \"w:gz\")\n\n tar.add(this_tmp_dir)\n\n tar.close()\n\n return tgz_name", "def generate(number, output):\n output = os.path.abspath(output)\n\n if os.path.exists(output):\n if len(os.listdir(output)) > 0:\n raise click.FileError(\n output, hint='folder exists and is not empty.')\n else:\n os.makedirs(output)\n\n padding = len(str(number))\n template = '{i:0%dd}.zip' % padding\n for i in range(number):\n archive_name = template.format(i=i)\n click.echo(f'Generating archive: {archive_name}')\n filename = join(output, archive_name)\n archive = RandomArchive(filename)\n try:\n archive.build()\n except FileExistsError:\n click.echo(f'Warning! Archive already exists: {filename}')\n except Exception as e:\n click.echo(f'Unexpected error: {str(e)}')\n raise click.Abort(1)\n\n click.echo(f'Archives generated: {output}')", "def test_compress(self):\n self.logger.info(\"STEP: Create the workspace directory to be compressed.\")\n workspace = Workspace(Mock)\n directory = Path.cwd().joinpath(\"workspace\")\n directory.mkdir()\n workspace.workspace = directory\n\n # Create a file to verify compression.\n directory.joinpath(\"file.txt\").touch()\n\n test_folder = Path.cwd().joinpath(\"testfolder\")\n test_folder.mkdir()\n self.items.append(test_folder)\n\n self.logger.info(\"STEP: Compress the directory.\")\n workspace.compress()\n\n self.logger.info(\n \"STEP: Verify that the directory was compressed using the gztar format.\"\n )\n self.items.append(test_folder)\n compressed_workspace = Path.cwd().joinpath(\"workspace.tar.gz\")\n unpack_archive(compressed_workspace, test_folder, format=\"gztar\")\n compressed_file = test_folder.joinpath(\"workspace/file.txt\")\n self.assertTrue(compressed_file.exists() and compressed_file.is_file())", "def write_tarball(args, tarfilename, archivefiles=[]):\n if not archivefiles:\n return None\n \n manifest_filename, manifest_uuid = render_manifest(args, archivefiles)\n try:\n with tarfile.open(tarfilename, f\"{FILE_FLAG}:gz\") as tarball:\n file_count = 0\n for fname in archivefiles:\n LOG.debug(f\"Adding {fname} to {tarfilename}: \")\n if fname.endswith(\".csv\"):\n upload_name = f\"{manifest_uuid}_openshift_usage_report.{file_count}.csv\"\n tarball.add(fname, arcname=upload_name)\n file_count += 1\n tarball.add(manifest_filename, arcname=\"manifest.json\")\n except FileExistsError as exc:\n LOG.critical(exc)\n sys.exit(2)\n LOG.info(f\"Wrote: {tarfilename}\")\n return f\"{tarfilename}\"", "def creation_data_sets(quality, dataset, test_case=False):\n current_path = Path.cwd()\n if dataset == 0:\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n del y_train, y_test\n train_path = current_path.joinpath(\"Mnist_{}\".format(quality))\n test_path = current_path.joinpath(\"Mnist_{}_test\".format(quality))\n else:\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n del y_train, y_test\n train_path = current_path.joinpath(\"Cifar-10_{}\".format(quality))\n test_path = current_path.joinpath(\"Cifar-10_{}_test\".format(quality))\n\n create_directories(train_path, test_path)\n convert(train_path, x_train, dataset, quality, test_case)\n convert(test_path, x_test, dataset, quality, test_case)", "def test_create4_dir(self):\n TempfileManager.sequential_files(2)\n fname = TempfileManager.create_tempdir()\n self.assertEqual(len(list(glob.glob(tempdir + '*'))), 1)\n fname = os.path.basename(fname)\n self.assertEqual(fname, 'tmp2')\n #\n TempfileManager.unique_files()\n fname = TempfileManager.create_tempdir()\n self.assertEqual(len(list(glob.glob(tempdir + '*'))), 2)\n fname = os.path.basename(fname)\n self.assertNotEqual(fname, 'tmp3')\n self.assertTrue(fname.startswith('tmp'))", "def archive_files(output_dir, output_dir_name):\n\n file_name = output_dir + \".tar.gz\"\n\n logger.info(\"Archiving files into %s\", file_name)\n with tarfile.open(file_name, \"w|gz\") as tar:\n tar.add(output_dir, arcname=output_dir_name)\n logger.info(\"Archived files into %s\", file_name)\n\n try:\n shutil.rmtree(output_dir)\n except OSError as ex:\n logger.warning(\"Failed to delete directory after archiving: %s\", ex)", "def write_test_data(output_dir):\r\n test_data = get_test_data()\r\n for k, v in test_data.items():\r\n f = open(join(output_dir, k), 'w')\r\n f.write('\\n'.join(v))\r\n f.close()", "def fixture_project_dir(tmpdir_factory) -> Path:\n my_tmpdir = Path(tmpdir_factory.mktemp(\"data\"))\n yield my_tmpdir\n shutil.rmtree(str(my_tmpdir))", "def test_create3_dir(self):\n fname = TempfileManager.create_tempdir(suffix='bar')\n self.assertEqual(len(list(glob.glob(tempdir + '*'))), 1)\n fname = os.path.basename(fname)\n self.assertTrue(fname.endswith('bar'))", "def autogen_dataset_dir():\n return TabularDataset.autogen('tests/data/dummy_tabular',\n seed=42,\n sep=',')", "def mp_tmpdir():\n # shutil.rmtree(TEMP_DIR, ignore_errors=True)\n os.makedirs(TEMP_DIR)\n yield TEMP_DIR\n shutil.rmtree(TEMP_DIR, ignore_errors=True)", "def dict_to_files(result_dict, dest_dir):\n tempdir = tempfile.mkdtemp()\n\n # timestamp every generated dignostic file\n timestamp = datetime.datetime.now().strftime(\"%Y%m%d_%H%I%S\")\n tarball = '{0}/ceph-collect_{1}.tar.gz'.format(dest_dir, timestamp)\n\n with tarfile.open(tarball, 'w:gz') as tar:\n for filename, content in result_dict.items():\n\n for contentname, contentdata in content.items():\n tmpfile = '{0}/{1}'.format(tempdir, filename +\n \"-\" + contentname)\n\n LOGGER.debug('Writing file %s', tmpfile)\n print('Writing file %s', tmpfile)\n with open(tmpfile, 'wb') as f:\n f.write(contentdata)\n f.close()\n\n tar.add(name=tmpfile,\n arcname='ceph-collect_{0}/{1}'.format(timestamp,\n filename + \"-\" +\n contentname))\n tar.close()\n LOGGER.info(\"Diagnostics are written to : \"+ tarball)\n\n LOGGER.info(\"Cleaning up temporary directory\")\n shutil.rmtree(tempdir)", "def convert_testing_data(mfccPath):\n inputlist, inputnamelist = ark_parser(mfccPath, 'test.ark')\n\n print(\"%d sample in testing set\" % len(inputlist))\n with open('./test_data.pkl', 'wb') as test_data:\n pickle.dump(inputlist, test_data)\n \n with open('./test_name.pkl', 'wb') as test_name:\n pickle.dump(inputnamelist, test_name)", "def setUpClass(self):\n\n # Create a new temporary folder\n self.dir = tempfile.mkdtemp(self.__name__, dir=os.getcwd()) # this folder should be on a hadoop file system\n\n if self.dir.startswith(\"/gpfs/\"):\n # remove /gpfs/ because on sdil hadoop-path starts at \"/smartdata/\"\n self.dir = \"/\" + self.dir.split(\"/\", 2)[2]\n \n dirhash._logger.info(\"writing test data to folder \\\"%s\\\"\", self.dir)\n \n # create some sub-folder\n os.makedirs(self.dir + \"/dir/subdir1\")\n os.makedirs(self.dir + \"/dir/subdir2\")\n os.makedirs(self.dir + \"/dir/subdir3\")\n os.makedirs(self.dir + \"/dir/emptysubdir\")\n dirhash._logger.info(\"created subdirectories\")\n \n # Create a lorem ipsum file\n with open(self.dir + \"/\" + self.LOREM_IPSUM_PATH, \"w\") as f:\n f.write(self.LOREM_IPSUM_TEXT)\n \n # Create an HTML file\n with open(self.dir + \"/\" + self.HELLO_WORLD_HTML_PATH, \"w\") as f:\n f.write(self.HELLO_WORLD_HTML_TEXT)\n \n # Create a file holding typical passwords\n with open(self.dir + \"/\" + self.PASSWORDS_PATH, \"w\") as f:\n f.write(self.PASSWORDS_TEXT)\n \n # just a few characters\n with open(self.dir + \"/\" + self.ABC_PATH, \"w\") as f:\n f.write(self.ABC_TEXT)\n \n # an empty file\n with open(self.dir + \"/\" + self.EMPTY_FILE_PATH, \"w\") as f:\n f.write(\"\")\n \n # create a file of 32 * 1024 * 1024 zero bytes\n with open(self.dir + \"/\" + self.MANY_ZEROS_PATH, \"wb\") as f:\n f.write(b\"\\0\" * (32 * 1024 * 1024))\n \n # create a spark context for execution, that will run all jobs locally\n self._context = SparkContext(appName=\"dirhash_test\")\n self._context.addPyFile('dirhash.py')", "def KittiTestDataset(test_root_path):\n \n names = os.listdir(test_root_path)\n dataset = [[os.path.join(test_root_path, name)] for name in names]\n \n return dataset", "def gen_folders(rho, kappa, km, pa, analysis, dbase, analysisdbase):\n \n path1 = 'density_' + + str(rho) + \"_kappa_\" + \\\n str(kappa) + \"_km_\" + str(km) + \"_panti_\" + str(pa)\n path2 = analysis + '_density_' + + str(rho) + \"_kappa_\" + \\\n str(kappa) + \"_km_\" + str(km) + \"_panti_\" + str(pa) + '.txt' \n datafolder = dbase + path1 + '/'\n analysisfile = analysisdbase + path2 \n\n return datafolder, analysisfile", "def archive_backup(self):\n\n # Archiving the Training script\n shutil.copyfile(self.script_path, self.save_path + '/0-' + os.path.basename(self.script_path))\n os.chmod(self.save_path + '/0-' + os.path.basename(self.script_path), 0o755)\n # Archiving the src folder\n pkg_path = os.path.dirname(arch_src)\n backup_path = os.path.join(self.save_path, 'src_backup')\n shutil.make_archive(backup_path, 'gztar', pkg_path)\n\n # Archiving the Environment Info\n env_info = collect_env.get_pretty_env_info()\n with open(self.save_path + '/env_info.txt', 'w') as f:\n f.write(env_info)", "def generate_test_data(obj, name):\n with open('tests/{}'.format(name), 'wb') as f:\n pickle.dump(obj, f)", "def test_make_final_path_no_kwargs(self):\n with DataArchive(self.user, DATA_DOWNLOADS_WORKING_DIR) as archive:\n final_path = archive.make_final_path()\n self.assertEqual(final_path, archive.data_dir_path)", "def create_test_folder(df_test, target_path):\n folder_path = os.path.join(target_path, 'xray_preprocess/test')\n print(f'Create test set at: {folder_path}')\n for _, row in tqdm(df_test.iterrows(), total=df_test.shape[0]):\n if row['class']=='negative':\n destination_path = os.path.join(folder_path, 'negative')\n elif row['class']=='positive':\n destination_path = os.path.join(folder_path, 'positive')\n if not os.path.exists(destination_path):\n os.makedirs(destination_path) \n img = os.path.join(target_path, 'xray', 'test', row['filename'])\n shutil.copy(img, destination_path )", "def setUp(self):\n self.outdir = \"tests/out/pdftotext\"\n if not os.path.exists(self.outdir):\n os.makedirs(self.outdir)\n else:\n files = glob.glob(self.outdir)\n for f in files:\n if os.path.isfile(f):\n os.remove(f)", "def mkdtemp(suffix='',prefix='tmp',dir=None):\n\tpass", "def generateTestset(testSets):\n\ttestFiles = []\n\tfor testSet in testSets:\n\t\tif testSet not in _dataSets:\n\t\t\traise ValueError(\"Not a valid test set: \" + testSet)\n\n\t\ttestFiles += sorted(map(lambda x: _dataSets[testSet] + \"test/\" + x,\n\t\t\t\t\t\t\tfilter(lambda x: x.endswith(\".txt\"), os.listdir(_dataSets[testSet] + \"test/\"))))\n\n\treturn testFiles", "def _expand_archive(self, name):\r\n target = path(self.temp_dir) / uuid.uuid4().hex\r\n os.mkdir(target)\r\n with tarfile.open(self.data_dir / name) as tar_file:\r\n tar_file.extractall(path=target)\r\n\r\n return target", "def extract_to_disk(self):\n archive_name, extension = os.path.splitext(os.path.basename(self.file.name))\n if not os.path.isdir(os.path.join(os.getcwd(), archive_name)):\n os.mkdir(archive_name)\n os.chdir(archive_name)\n for filename, data in self.extract().items():\n f = open(filename, 'wb')\n f.write(data or b'')\n f.close()", "def generate_data_set(args):\n if args.o is None:\n filename = \"test\"\n else:\n filename = args.o\n\n if args.b is None:\n args.b = 0.5\n if args.p is None:\n path = Path(\"\")\n else:\n path = Path(args.p)\n if args.nf is None:\n args.nf = 1\n\n for i in range(int(args.nf)):\n args.o = path / (filename + \"_t\"+str(args.t) + \"_k\" + str(args.k) + \"_n\"+str(args.n) + \"_m\"+str(args.m)\n + \"_c\"+str(args.c) + \"_\"+str(i) + \".dzn\")\n create_dnz_file(args)", "def setUp(self):\n self.test_root = tempfile.mkdtemp(dir=tmpdir)\n self.test_input = os.path.join(self.test_root, 'input')\n self.test_output = os.path.join(self.test_root, 'output')\n self.test_output_tree = os.path.join(self.test_output, 'tree')\n self.test_output_meta = os.path.join(self.test_output_tree, 'meta.js')\n self.test_output_toc = os.path.join(self.test_output_tree, 'toc.js')\n\n os.makedirs(self.test_input, exist_ok=True)\n os.makedirs(self.test_output, exist_ok=True)", "def clean_test_files(dest_dir):\n\n print 'Cleaning data files'\n folders = [os.path.join(dest_dir, 'testdata'),\n os.path.join(dest_dir, 'logs')]\n for the_folder in folders:\n if os.path.isdir(the_folder):\n for the_file in os.listdir(the_folder):\n file_path = os.path.join(the_folder, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n except IOError, exception:\n print exception\n for the_folder in folders:\n if not os.path.isdir(the_folder):\n try:\n os.makedirs(the_folder)\n except OSError:\n print 'ERROR Could not create directory structure for tests.'", "def make_test_dataset(outdir, overwrite=False,\n observatory_name='HESS', n_obs=10,\n az_range=Angle([0, 360], 'deg'),\n alt_range=Angle([45, 90], 'deg'),\n date_range=(Time('2010-01-01'),\n Time('2015-01-01')),\n n_tels_range=(3, 4),\n sigma=Angle(5., 'deg'),\n spectral_index=2.7,\n random_state='random-seed'):\n from ..data import DataStore\n random_state = get_random_state(random_state)\n\n # create output folder\n Path(outdir).mkdir(exist_ok=overwrite)\n\n # generate observation table\n observation_table = make_test_observation_table(observatory_name=observatory_name,\n n_obs=n_obs,\n az_range=az_range,\n alt_range=alt_range,\n date_range=date_range,\n use_abs_time=False,\n n_tels_range=n_tels_range,\n random_state=random_state)\n\n # save observation list to disk\n outfile = Path(outdir) / 'runinfo.fits'\n observation_table.write(str(outfile))\n\n # create data store for the organization of the files\n # using H.E.S.S.-like dir/file naming scheme\n if observatory_name == 'HESS':\n scheme = 'HESS'\n else:\n s_error = \"Warning! Storage scheme for {}\".format(observatory_name)\n s_error += \"not implemented. Only H.E.S.S. scheme is available.\"\n raise ValueError(s_error)\n\n data_store = DataStore(dir=outdir, scheme=scheme)\n\n # loop over observations\n for obs_id in observation_table['OBS_ID']:\n event_list, aeff_hdu = make_test_eventlist(observation_table=observation_table,\n obs_id=obs_id,\n sigma=sigma,\n spectral_index=spectral_index,\n random_state=random_state)\n\n # save event list and effective area table to disk\n outfile = data_store.filename(obs_id, filetype='events')\n outfile_split = outfile.rsplit(\"/\", 1)\n os.makedirs(outfile_split[0]) # recursively\n event_list.write(outfile)\n outfile = data_store.filename(obs_id, filetype='effective area')\n aeff_hdu.writeto(outfile)", "def _make_archive(file_list, archive, root):\n with zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED) as zipf:\n for f in file_list:\n zipf.write(f, os.path.relpath(f, root))", "def main():\n # Create / clean output dir\n if os.path.isdir(OUT_DIR):\n shutil.rmtree(OUT_DIR)\n os.mkdir(OUT_DIR)\n\n # Write all assets to the directory\n for fname, bb in create_assets().items():\n filename = os.path.join(OUT_DIR, fname)\n dirname = os.path.dirname(filename)\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n with open(filename, \"wb\") as f:\n f.write(bb)", "def write_data_files(self):\n \n logging.info('\\n Start writing data files \\n')\n \n for i, (data_file, label_file) in enumerate(self.files):\n data_file, label_file = Path(data_file), Path(label_file)\n logging.info('Writing .hdf5 file for : [{}]'.format(str(data_file)))\n \n file_name = self.save_data_folder / '{}.hdf5'.format(label_file.name[:-4])\n if file_name.exists():\n continue\n \n with h5py.File(str(file_name), 'w') as writer:\n self.serialize_samples(\n writer, data_file, label_file)", "def setup():\n print('...')\n # Make sure dirs exist\n for directory in [DATA_DIR, DATA_INPUT_DIR, DATA_OUTPUT_DIR]:\n os.makedirs(directory, exist_ok=True)", "def _create_data_directory(self):\n self.src_data_dir.mkdir(exist_ok=True, parents=True)", "def _zip_files(self):\n\n zip_file = Path(self.build_directory.parent).joinpath(\n self.package_name + '.zip'\n )\n logger.info('Creating zip file: %s', zip_file)\n\n shutil.make_archive(zip_file.with_suffix(''), 'zip', self.build_directory)\n shutil.move(str(zip_file), self.build_directory)", "def test_create1a_dir(self):\n fname = TempfileManager.create_tempdir(dir=tempdir)\n self.assertEqual(len(list(glob.glob(tempdir + '*'))), 1)\n fname = os.path.basename(fname)\n self.assertTrue(fname.startswith('tmp'))", "def setUp(self):\n # Generates directory names\n self.tempdir = tempfile.mkdtemp()\n self.subdir = os.path.join(self.tempdir, \"dir\")\n self.emptydir = os.path.join(self.tempdir, \"empty\")\n # Populates directories\n os.makedirs(self.subdir)\n os.makedirs(self.emptydir)\n # Populates files\n self.root_fcount = 3\n self.nest_fcount = 5\n for i in range(0, self.root_fcount):\n with open(os.path.join(self.tempdir, \"%i.txt\" % i), \"w+\") as f:\n f.write(\"Test.\")\n for i in range(0, self.nest_fcount):\n with open(os.path.join(self.subdir, \"%i.txt\" % i), \"w+\") as f:\n f.write(\"Test.\")\n self.filename = os.path.join(self.subdir, \"nontxt.mp3\")\n with open(self.filename, \"w+\") as f:\n f.write(\"Test.\")", "def testArchiveExport(self):\n\n archive = alembic.Abc.OArchive(\"iterator.abc\")\n for i in range(3):\n child = alembic.Abc.OObject(archive.getTop(), \"childObj\" + str(i))\n for j in range(3):\n gchild = alembic.Abc.OObject(child, \"grandChild\" + str(j))\n for k in range(3):\n cp = alembic.Abc.OCompoundProperty(gchild.getProperties(), \"prop\" + str(k))\n sp = alembic.Abc.OStringProperty(cp, \"scalar\")\n sp.setValue(\"a\")\n sp.setValue(\"b\")\n sp.setValue(\"c\")\n ap = alembic.Abc.OStringArrayProperty(cp, \"array\")\n stra = imath.StringArray(3)\n stra[0] = 'a'\n stra[1] = 'b'\n stra[2] = 'c'\n ap.setValue(stra)\n strb = imath.StringArray(2)\n strb[0] = 'd'\n strb[1] = 'e'\n ap.setValue(strb)\n strc = imath.StringArray(1)\n strc[0] = 'f'\n ap.setValue(strc)", "def archive(ctx, config):\n log.info('Creating archive directory...')\n archive_dir = misc.get_archive_dir(ctx)\n run.wait(\n ctx.cluster.run(\n args=[\n 'install', '-d', '-m0755', '--', archive_dir,\n ],\n wait=False,\n )\n )\n\n try:\n yield\n except Exception:\n # we need to know this below\n set_status(ctx.summary, 'fail')\n raise\n finally:\n passed = get_status(ctx.summary) == 'pass'\n if ctx.archive is not None and \\\n not (ctx.config.get('archive-on-error') and passed):\n log.info('Transferring archived files...')\n logdir = os.path.join(ctx.archive, 'remote')\n if (not os.path.exists(logdir)):\n os.mkdir(logdir)\n for rem in ctx.cluster.remotes.iterkeys():\n path = os.path.join(logdir, rem.shortname)\n misc.pull_directory(rem, archive_dir, path)\n # Check for coredumps and pull binaries\n fetch_binaries_for_coredumps(path, rem)\n\n log.info('Removing archive directory...')\n run.wait(\n ctx.cluster.run(\n args=[\n 'rm',\n '-rf',\n '--',\n archive_dir,\n ],\n wait=False,\n ),\n )", "def compress_datasets(directory_path: str, holdout: float) -> None:\n\n dataset_path = Path(directory_path)\n sar_sets = get_sar_paths(directory_path)\n make_directory_dataset(directory_path)\n divide_sar_files(dataset_path, sar_sets, holdout)\n remove_subdirectories(directory_path)", "def test_create_package_dir(self):\n tempdir = tempfile.mkdtemp()\n os.rmdir(tempdir)\n settings = {\n 'storage.dir': tempdir,\n }\n FileStorage.configure(settings)\n try:\n self.assertTrue(os.path.exists(tempdir))\n finally:\n os.rmdir(tempdir)", "def test_8_archive(self):\n\n # Setup a repository with packages from multiple publishers.\n amber = self.amber10.replace(\"open \", \"open pkg://test2/\")\n t2_amber10 = self.pkgsend_bulk(self.durl3, amber)[0]\n self.pkgrecv(self.durl1, \"-d {0} [email protected] [email protected]\".format(\n self.durl3))\n\n # Now attempt to receive from a repository to a package archive.\n arc_path = os.path.join(self.test_root, \"test.p5p\")\n self.pkgrecv(self.durl3, \"-a -d {0} \\*\".format(arc_path))\n\n #\n # Verify that the archive can be opened and the expected\n # packages are inside.\n #\n amber10 = self.published[0]\n bronze10 = self.published[2]\n arc = p5p.Archive(arc_path, mode=\"r\")\n\n # Check for expected publishers.\n expected = set([\"test1\", \"test2\"])\n pubs = set(p.prefix for p in arc.get_publishers())\n self.assertEqualDiff(expected, pubs)\n\n # Check for expected package FMRIs.\n expected = set([amber10, t2_amber10, bronze10])\n tmpdir = tempfile.mkdtemp(dir=self.test_root)\n returned = []\n for pfx in pubs:\n catdir = os.path.join(tmpdir, pfx)\n os.mkdir(catdir)\n for part in (\"catalog.attrs\", \"catalog.base.C\"):\n arc.extract_catalog1(part, catdir, pfx)\n\n cat = catalog.Catalog(meta_root=catdir, read_only=True)\n returned.extend(str(f) for f in cat.fmris())\n self.assertEqualDiff(expected, set(returned))\n arc.close()\n shutil.rmtree(tmpdir)\n\n #\n # Verify that packages can be received from an archive to an\n # archive.\n #\n arc2_path = os.path.join(self.test_root, \"test2.p5p\")\n self.pkgrecv(arc_path, \"-a -d {0} pkg://test2/amber\".format(arc2_path))\n\n # Check for expected publishers.\n arc = p5p.Archive(arc2_path, mode=\"r\")\n expected = set([\"test2\"])\n pubs = set(p.prefix for p in arc.get_publishers())\n self.assertEqualDiff(expected, pubs)\n\n # Check for expected package FMRIs.\n expected = set([t2_amber10])\n tmpdir = tempfile.mkdtemp(dir=self.test_root)\n returned = []\n for pfx in pubs:\n catdir = os.path.join(tmpdir, pfx)\n os.mkdir(catdir)\n for part in (\"catalog.attrs\", \"catalog.base.C\"):\n arc.extract_catalog1(part, catdir, pfx)\n\n cat = catalog.Catalog(meta_root=catdir, read_only=True)\n returned.extend(str(f) for f in cat.fmris())\n self.assertEqualDiff(expected, set(returned))\n arc.close()\n\n #\n # Verify that pkgrecv gracefully fails if archive already\n # exists.\n #\n self.pkgrecv(arc_path, \"-d {0} \\*\".format(arc2_path), exit=1)\n\n #\n # Verify that packages can be received from an archive to\n # a repository.\n #\n self.pkgrecv(arc_path, \"--newest\")\n self.pkgrecv(arc_path, \"-d {0} pkg://test2/amber bronze\".format(\n self.durl4))\n self.wait_repo(self.dcs[4].get_repodir())\n repo = self.dcs[4].get_repo()\n self.pkgrecv(repo.root, \"--newest\")\n\n # Check for expected publishers.\n expected = set([\"test1\", \"test2\"])\n pubs = repo.publishers\n self.assertEqualDiff(expected, pubs)\n\n # Check for expected package FMRIs.\n expected = sorted([t2_amber10, bronze10])\n returned = []\n for pfx in repo.publishers:\n cat = repo.get_catalog(pub=pfx)\n returned.extend(str(f) for f in cat.fmris())\n self.assertEqualDiff(expected, sorted(returned))\n\n # Attempt a dry-run to receive a package archive.\n # We should not have the archive created in this case.\n arc_path = os.path.join(self.test_root, \"dry-run.p5p\")\n self.pkgrecv(self.durl3, \"-n -a -d {0} \\*\".format(arc_path))\n self.assertFalse(os.path.exists(arc_path))", "def _generate_examples(self, data_dir=None, archive=None, label_path=None, process=None):\n if process == 'train':\n abnormal_train = pd.read_csv(os.path.join(data_dir, \"train-abnormal.csv\"), names=[\"Patient_Number\", \"abnormal\"])\n ACL_train = pd.read_csv(os.path.join(data_dir, \"train-acl.csv\"), names=[\"Patient_Number\", \"ACL\"])\n meniscus_train = pd.read_csv(os.path.join(data_dir, \"train-meniscus.csv\"), names=[\"Patient_Number\", \"meniscus\"])\n abnormal_acl_train = abnormal_train.merge(ACL_train, on=\"Patient_Number\")\n ab_acl_meni_train = abnormal_acl_train.merge(meniscus_train, on=\"Patient_Number\")\n else:\n abnormal_test = pd.read_csv(os.path.join(data_dir, \"valid-abnormal.csv\"), names=[\"Patient_Number\", \"abnormal\"])\n ACL_test = pd.read_csv(os.path.join(data_dir, \"valid-acl.csv\"), names=[\"Patient_Number\", \"ACL\"])\n meniscus_test = pd.read_csv(os.path.join(data_dir, \"valid-meniscus.csv\"), names=[\"Patient_Number\", \"meniscus\"])\n abnormal_acl_test = abnormal_test.merge(ACL_test, on=\"Patient_Number\")\n ab_acl_meni_test = abnormal_acl_test.merge(meniscus_test, on=\"Patient_Number\")\n\n def sumup(row):\n if (row['abnormal'] == 1) & (row[\"ACL\"] == 1) & (row[\"meniscus\"] == 1):\n val = \"Both_ACL_Meniscus\"\n elif (row['abnormal'] == 1) & (row[\"ACL\"] == 1) & (row[\"meniscus\"] == 0):\n val = \"ACL\"\n elif (row['abnormal'] == 1) & (row[\"ACL\"] == 0) & (row[\"meniscus\"] == 1):\n val = \"Meniscus\"\n elif (row['abnormal'] == 1) & (row[\"ACL\"] == 0) & (row[\"meniscus\"] == 0):\n val = \"abnormal\"\n elif row['abnormal'] == 0:\n val = \"normal\"\n return val\n if process == 'train':\n ab_acl_meni_train['sumup'] = ab_acl_meni_train.apply(sumup, axis=1)\n ab_acl_meni_train[['Patient_Number', \"sumup\"]].to_csv(os.path.join(data_dir, \"train_labels.csv\"), header=False)\n else:\n ab_acl_meni_test['sumup'] = ab_acl_meni_test.apply(sumup, axis=1)\n ab_acl_meni_test[['Patient_Number', \"sumup\"]].to_csv(os.path.join(data_dir, \"valid_labels.csv\"), header=False)\n\n all_labels = pd.read_csv(label_path, names=[\"Patient_Number\", \"label\"])\n\n count = 0\n archive_list = [os.path.join(archive, \"axial\"), os.path.join(archive, \"coronal\"), os.path.join(archive, \"sagittal\")]\n for direction in archive_list:\n files = []\n for r, d, f in tf.io.gfile.walk(direction):\n for file in f:\n if '.npy' in file:\n files.append(os.path.join(r, file))\n sorted_files = sorted(files)\n\n for path_cube in sorted_files:\n cube = np.load(tf.io.gfile.GFile(path_cube, mode='rb'))\n for picture in cube:\n example_image = picture[..., np.newaxis]\n example_label = (all_labels.loc[all_labels['Patient_Number'] ==\n int(os.path.splitext(os.path.basename(path_cube))[0][:4]),\n 'label'].iloc[0])\n count += 1\n yield count, {\n 'image': example_image,\n 'label': example_label,\n }", "def test_add3_dir(self):\n os.mkdir(tempdir + 'add3')\n TempfileManager.add_tempfile(tempdir + 'add3')", "def testdir(contents=None, suffix=\"\"):\n\n if contents is not None:\n contents = [op.join(*c.split('/')) for c in contents]\n\n class ctx(object):\n\n def __init__(self, contents):\n self.contents = contents\n\n def __enter__(self):\n\n self.testdir = tempfile.mkdtemp(suffix=suffix)\n self.prevdir = os.getcwd()\n\n os.chdir(self.testdir)\n\n if self.contents is not None:\n contents = [op.join(self.testdir, c) for c in self.contents]\n make_dummy_files(contents)\n\n return self.testdir\n\n def __exit__(self, *a, **kwa):\n os.chdir(self.prevdir)\n shutil.rmtree(self.testdir)\n\n return ctx(contents)", "def package_datasets(ds_all, dirname=''):\n ds_all = copy.deepcopy(ds_all)\n assert dirname != '', \"dirname required\"\n package_dataset(ds_all['ds_train_um'], dirname=join('.', dirname, 'train'))\n package_dataset(ds_all['ds_valid_um'], dirname=join('.', dirname, 'valid'))\n package_dataset(ds_all['ds_test_um'], dirname=join('.', dirname, 'test'))", "def make_archive(fname_archive: str, \n sim_epoch: rebound.Simulation, \n object_names: List[str],\n epoch: datetime, dt0: datetime, dt1: datetime, \n time_step: int, save_step: int = 1,\n save_elements: bool = False,\n progbar: bool = False) -> rebound.SimulationArchive:\n try:\n # First try to load the named archive\n sa = rebound.SimulationArchive(filename=fname_archive)\n except:\n # If the archive is not on disk, save it to disk\n print(f'Generating archive {fname_archive}\\n'\n f'from {dt0} to {dt1}, time_step={time_step}, save_step={save_step}...')\n make_archive_impl(fname_archive=fname_archive, sim_epoch=sim_epoch, object_names=object_names,\n epoch=epoch, dt0=dt0, dt1=dt1, \n time_step=time_step, save_step=save_step, \n save_elements=save_elements, progbar=progbar)\n # Load the new archive into memory\n sa = rebound.SimulationArchive(filename=fname_archive)\n return sa", "def archive(po_filename, bl_filename):\n\n # Store archive in same dir as this script\n root = os.path.abspath(os.path.dirname(sys.argv[0]))\n\n po_archive = root + '/po.csv.%s' % datetime.date.today()\n bl_archive = root + '/bl.csv.%s' % datetime.date.today()\n\n shutil.move(po_filename, po_archive)\n shutil.move(bl_filename, bl_archive)\n\n perms = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH\n os.chmod(po_archive, perms)\n os.chmod(bl_archive, perms)", "def make_output_folders():\n call([\"mkdir\", \"-p\", args.out_folder.strip()])\n call([\"mkdir\", args.out_folder.strip() + \"/files\"])\n call([\"mkdir\", args.out_folder.strip() + \"/fasta\"])", "def test_create3(self):\n fname = TempfileManager.create_tempfile(suffix='bar')\n OUTPUT = open(fname, 'w')\n OUTPUT.write('tempfile\\n')\n OUTPUT.close()\n self.assertEqual(len(list(glob.glob(tempdir + '*'))), 1)\n fname = os.path.basename(fname)\n self.assertTrue(fname.endswith('bar'))" ]
[ "0.7358341", "0.6791461", "0.6418583", "0.63508826", "0.63468665", "0.6301004", "0.6291766", "0.61660707", "0.6096657", "0.6091523", "0.60853094", "0.6035293", "0.602757", "0.59755576", "0.595753", "0.59322333", "0.5913556", "0.5871974", "0.58634", "0.5859327", "0.58383375", "0.579577", "0.5780835", "0.5780175", "0.57778054", "0.57703567", "0.576039", "0.57591206", "0.5756717", "0.5731769", "0.5729714", "0.57258564", "0.5723285", "0.571901", "0.5706833", "0.5704778", "0.5694077", "0.5691649", "0.56885916", "0.5681837", "0.56614715", "0.56555307", "0.5619951", "0.5601932", "0.5595516", "0.5594", "0.5585194", "0.5581556", "0.5579792", "0.55749184", "0.55723876", "0.55714875", "0.5570977", "0.5561753", "0.5561029", "0.55533195", "0.5547729", "0.55454725", "0.55447626", "0.5540238", "0.5538297", "0.5535639", "0.55304176", "0.5527924", "0.55194557", "0.5513577", "0.5511589", "0.55091", "0.55052185", "0.55042183", "0.5503146", "0.5498506", "0.5498012", "0.5489352", "0.54844594", "0.54814047", "0.5478563", "0.54662704", "0.5460224", "0.54572785", "0.54470694", "0.54430217", "0.544077", "0.543943", "0.5437908", "0.5436288", "0.54289925", "0.54272014", "0.54233754", "0.5423262", "0.54121315", "0.5402117", "0.54018855", "0.540002", "0.53955245", "0.5390976", "0.5383573", "0.5377977", "0.53768826", "0.5376266" ]
0.75224614
0
Generate a test environment using the given dataset. The settings are temporarily overwritten to use the test data.
def generate_test_environment(tmpdir, dataset): # Overwrite settings with test settings generate_test_settings(tmpdir, dataset) # Generate the archive files for usage in ['train', 'test']: for dstype in ['images', 'labels']: dataset_type = usage + '.' + dstype mnist_dataset = 'datasets.mnist.' + dataset_type filepath = get_setting(mnist_dataset) test_dataset = dataset + '.' + dataset_type generate_test_dataset_archive(filepath, test_dataset)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_environment(dataset, tmpdir):\n\n print(\">>> Test environment:\")\n print(\"dataset:\", dataset)\n print(\"tmpdir:\", tmpdir)\n\n generate_test_environment(tmpdir, dataset)\n\n return { 'dataset': dataset, 'tmpdir': tmpdir }", "def test_generate_test_environment(dataset):\n\n print(\"## =========================================================\")\n print(\"## Dataset:\", dataset)\n print(\"## ---------------------------------------------------------\")\n print(\"\")\n\n tmpdir = \"/tmp/collagen\"\n\n generate_test_environment(tmpdir, dataset)\n\n # Generate the archive files\n for usage in ['train', 'test']:\n for dstype in ['images', 'labels']:\n \n dataset_type = usage + '.' + dstype\n \n mnist_dataset = 'datasets.mnist.' + dataset_type\n filepath = get_setting(mnist_dataset)\n \n # 'file:///some/path' to '/some/path'\n if filepath[:7] == 'file://':\n filepath = filepath[7:]\n\n # Unpack\n print(\"\")\n print(\"{}: {}\".format(mnist_dataset, filepath))\n print(\"\")\n data = idxgz.load(filepath)\n print(\"data:\", data)\n print(\"type:\", type(data))\n print(\"dtype:\", data.dtype)\n print(\"shape:\", data.shape)\n\n print(\"\")", "def generate_test_settings(tmpdir, dataset):\n\n # When `tmpdir` is a path convert it to a string\n if isinstance(tmpdir, py._path.local.LocalPath):\n tmpdir = str(tmpdir)\n \n test_settings = {\n \n 'datasets': {\n 'mnist': {\n 'train': {\n 'images': \"file://\" + tmpdir + \"/\" + dataset + \"/server/train-images-idx3-ubyte.gz\",\n 'labels': \"file://\" + tmpdir + \"/\" + dataset + \"/server/train-labels-idx1-ubyte.gz\"\n },\n 'test': {\n 'images': \"file://\" + tmpdir + \"/\" + dataset + \"/server/t10k-images-idx3-ubyte.gz\",\n 'labels': \"file://\" + tmpdir + \"/\" + dataset + \"/server/t10k-labels-idx1-ubyte.gz\"\n },\n },\n },\n 'data-dir': tmpdir + \"/\" + dataset + \"/data\"\n }\n overwrite_settings(test_settings)", "def run(\n dataset,\n setting\n ):\n \n log_setting = setting if setting else \"default\" \n logger.debug(\"Create setting '{0}' from dataset '{1}'\".format(log_setting, dataset))\n\n if dataset in expmgmt.config.settings.get_datasets():\n expmgmt.config.settings.set_dataset(\n dataset,\n setting\n )", "def setUp(self):\n self.dataset = get_test_dataset()", "def autogen_dataset_dir_with_test():\n return TabularDataset.autogen('tests/data/dummy_tabular',\n test_path='tests/data/dummy_tabular_test',\n seed=42,\n sep=',')", "def autogen_dataset_with_test():\n return TabularDataset.autogen('tests/data/dummy_tabular/train.csv',\n test_path='tests/data/dummy_tabular_test/test.csv',\n seed=42,\n sep=',')", "def test_setup(self, test_data: list=None):\n print(\"[dataset]: using test setup ...\")\n self.vocabulary = [\"empty\"]\n self.eval_dataset = ABSADataset(data_path=self.dev_path, mode=self.in_mode, task=self.task,\n tokenizer=self.tokenizer, vocab=\"bert\", test=True)\n return", "def _generate_datasets(self):\n\n degrade_test = False\n if self._opts['degrade_step'] == 'test':\n degrade_test = True\n\n use_trainset_for_tests = UseTrainForTest.IDENTICAL # can be different in few shot workflow\n\n train_dataset, test_dataset = self._gen_datasets_with_options(self._opts['train_classes'],\n self._opts['test_classes'],\n is_superclass=self._opts['superclass'],\n class_proportion=self._opts['class_proportion'],\n degrade_test=degrade_test,\n degrade_type=self._opts['degrade_type'], # only relevant if degrade_test = True\n degrade_val=self._opts['min_val'], # only relevant if degrade_test = True\n recurse_train=self._is_train_recursive(),\n recurse_test=self._is_inference_recursive(),\n num_batch_repeats=self._opts['num_repeats'],\n recurse_iterations=self._opts['recurse_iterations'],\n evaluate_step=self._opts['evaluate'],\n use_trainset_for_tests=use_trainset_for_tests,\n invert_images=self._opts['invert_images'],\n min_val=self._opts['min_val'])\n return train_dataset, test_dataset", "def setUp(self):\n self.directory = tempfile.TemporaryDirectory()\n self.dataset = self.dataset_cls(cache_root=self.directory.name)", "def setUp(self):\n self.dataset = self.dataset_cls()", "def prepare_data(dataset, train_ratio=0.8, input_dim=None, seed=10):\n # Retrieve main path of project\n dirname = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n\n # Download and store dataset at chosen location\n if dataset == 'Cora' or dataset == 'PubMed' or dataset == 'Citeseer':\n path = os.path.join(dirname, 'data')\n data = Planetoid(path, name=dataset, split='full')[0]\n data.name = dataset\n data.num_classes = (max(data.y)+1).item()\n # data.train_mask, data.val_mask, data.test_mask = split_function(data.y.numpy())\n # data = Planetoid(path, name=dataset, split='public', transform=T.NormalizeFeatures(), num_train_per_class=20, num_val=500, num_test=1000)\n\n elif dataset == 'Amazon':\n path = os.path.join(dirname, 'data', 'Amazon')\n data = Amazon(path, 'photo')[0]\n data.name = dataset\n data.num_classes = (max(data.y)+1).item()\n data.train_mask, data.val_mask, data.test_mask = split_function(\n data.y.numpy(), seed=seed)\n # Amazon: 4896 train, 1224 val, 1530 test\n \n elif dataset in ['syn1', 'syn2', 'syn4', 'syn5']: \n data = synthetic_data(\n dataset, dirname, train_ratio, input_dim)\n \n elif dataset == 'syn6':\n data = gc_data(dataset, dirname, train_ratio)\n\n elif dataset == 'Mutagenicity':\n data = gc_data(dataset, dirname, train_ratio)\n\n return data", "def get_dataset(test_envs, args, hparams, algorithm_class=None):\n is_mnist = \"MNIST\" in args.dataset\n dataset = vars(datasets)[args.dataset](args.data_dir, test_envs)\n # if not isinstance(dataset, MultipleEnvironmentImageFolder):\n # raise ValueError(\"SMALL image datasets are not implemented (corrupted), for transform.\")\n\n in_splits = []\n out_splits = []\n for env_i, env in enumerate(dataset):\n # The split only depends on seed_hash (= trial_seed).\n # It means that the split is always identical only if use same trial_seed,\n # independent to run the code where, when, or how many times.\n out, in_ = split_dataset(\n env,\n int(len(env) * args.holdout_fraction),\n misc.seed_hash(args.trial_seed, env_i),\n )\n if env_i in test_envs:\n in_type = \"test\"\n out_type = \"test\"\n else:\n in_type = \"train\"\n out_type = \"valid\"\n\n if is_mnist:\n in_type = \"mnist\"\n out_type = \"mnist\"\n\n set_transfroms(in_, in_type, hparams, algorithm_class)\n set_transfroms(out, out_type, hparams, algorithm_class)\n\n if hparams[\"class_balanced\"]:\n in_weights = misc.make_weights_for_balanced_classes(in_)\n out_weights = misc.make_weights_for_balanced_classes(out)\n else:\n in_weights, out_weights = None, None\n in_splits.append((in_, in_weights))\n out_splits.append((out, out_weights))\n\n return dataset, in_splits, out_splits", "def create_sandbox_dataset(project_id, dataset_id):\n sandbox_dataset_id = get_sandbox_dataset_id(dataset_id)\n friendly_name = f'Sandbox for {dataset_id}'\n description = f'Sandbox created for storing records affected by the cleaning rules applied to {dataset_id}'\n label_or_tag = {'label': '', 'tag': ''}\n create_dataset(project_id=project_id,\n dataset_id=sandbox_dataset_id,\n friendly_name=friendly_name,\n description=description,\n label_or_tag=label_or_tag,\n overwrite_existing=False)\n\n return sandbox_dataset_id", "def __create_test_environment(self):\n os.chdir(self.wd)\n temp_dir = tempfile.gettempdir()\n self.test_root = os.path.join(temp_dir, \"test-grpc\")\n print(\"Creating testing environment in {}\".format(self.test_root))\n if os.path.exists(self.test_root):\n # delete any previous environment\n shutil.rmtree(self.test_root)\n # create root directory\n os.makedirs(self.test_root)\n def copy_app(name):\n app_root = os.path.join(self.test_root, name)\n os.makedirs(app_root)\n filename = \"grpc-{}\".format(name)\n src = os.path.join(self.args.bin, filename)\n dst = os.path.join(app_root, filename)\n shutil.copy(src, dst)\n return dst\n # copy client and server into the new test environment\n self.server_path = copy_app(\"server\")\n self.client_path = copy_app(\"client\")", "def setUpTestData(cls):\n data_gen.run()", "def setUpTestData(cls):\n data_gen.run()", "def setUpTestData(cls):\n data_gen.run()", "def setUpTestData(cls):\n data_gen.run()", "def test_dataset_autogen(autogen_dataset):\n train_dummy = \"eget, venenatis a, magna. Lorem ipsum dolor sit amet, consectetuer\"\n val_dummy = \"leo. Vivamus nibh dolor, nonummy ac, feugiat non, lobortis quis,\"\n test_dummy = \"turpis egestas. Aliquam fringilla cursus purus. Nullam scelerisque neque sed\"\n\n assert autogen_dataset.train[0][0] == train_dummy\n assert autogen_dataset.train[0][1] == '8'\n assert len(autogen_dataset.train) == 64\n\n assert autogen_dataset.val[0][0] == val_dummy\n assert autogen_dataset.val[0][1] == '1'\n assert len(autogen_dataset.val) == 16\n\n assert autogen_dataset.test[0][0] == test_dummy\n assert autogen_dataset.test[0][1] == '6'\n assert len(autogen_dataset.test) == 20", "def init_data(dataset_config: dict):\n # train and dev will be in random order, test may be ordered according to labels\n if dataset_config[\"name\"] == \"CoLA\":\n train, dev, test, num_classes = load_cola(dataset_config)\n elif dataset_config[\"name\"] == \"AGNews\":\n train, dev, test, num_classes = load_ag_news(dataset_config)\n elif dataset_config[\"name\"] == \"DBPedia\":\n train, dev, test, num_classes = load_dbpedia(dataset_config)\n elif dataset_config[\"name\"] == \"YRF\":\n train, dev, test, num_classes = load_yrf(dataset_config)\n else:\n raise NameError(f\"Dataset {dataset_config['name']} not implemented.\")\n # etc.\n\n # shrink size if debugging\n if dataset_config[\"debug\"]:\n # choose a random subset using huggingface select function\n train = train.select(random.sample(range(len(train)), k=200))\n dev = dev.select(random.sample(range(len(dev)), k=40))\n test = test.select(random.sample(range(len(test)), k=200))\n\n # create class imbalance\n random.seed(dataset_config[\"seed\"])\n if dataset_config[\"pool_balance\"] == \"balanced\":\n pass\n elif dataset_config[\"pool_balance\"] == \"imbalanced\":\n train = train.filter(lambda example: create_imbalanced_dataset(example, dataset_config[\"imbalance_prop\"], dataset_config['imbalance_cls']))\n else:\n NameError(f\"pool_balance = {dataset_config['pool_balance']} not allowed\")\n\n if dataset_config[\"dev_balance\"] == \"balanced\":\n pass\n elif dataset_config[\"dev_balance\"] == \"imbalanced\":\n dev = dev.filter(lambda example: create_imbalanced_dataset(example, dataset_config[\"imbalance_prop\"], dataset_config['imbalance_cls']))\n else:\n NameError(f\"dev_balance = {dataset_config['dev_balance']} not allowed\")\n\n # get seed labelled pool indices (using the same seed data every time)\n random.seed(dataset_config[\"seed\"])\n if dataset_config[\"seed_balance\"] == \"balanced\":\n # this is random (will have some variance vs pool)\n indices = list(range(len(train)))\n unlabelled_pool_idx, labelled_pool_idx = split(\n indices,\n random_state=dataset_config[\"seed\"],\n test_size=dataset_config[\"seed_size\"]\n )\n elif dataset_config[\"seed_balance\"] == \"stratified\":\n # this is the same as the underlying train set (which may be unbalanced)\n indices = list(range(len(train)))\n unlabelled_pool_idx, labelled_pool_idx = split(\n indices,\n random_state=dataset_config[\"seed\"],\n test_size=dataset_config[\"seed_size\"],\n stratify=train['label']\n )\n elif dataset_config[\"seed_balance\"] == \"imbalanced\":\n # artificially sample an imbalanced seed set from the pool\n unlabelled_pool_idx, labelled_pool_idx = create_imbalanced_seed(\n train,\n num_classes,\n dataset_config[\"seed_size\"],\n dataset_config['imbalance_prop'],\n dataset_config['imbalance_cls']\n )\n else:\n raise NameError(f\"seed_balance = {dataset_config['seed_balance']} not allowed\")\n\n return train, dev, test, num_classes, labelled_pool_idx, unlabelled_pool_idx", "def setUp(self):\n\n # Create a data pipe.\n self.interpreter.pipe.create('test', 'mf')\n\n # Create a temporary file name.\n ds.tmpfile = mktemp()", "def setUp(self):\n\n # Create the data pipe.\n self.interpreter.pipe.create('dasha', 'mf')\n\n # Create a temporary directory for Dasha outputs.\n ds.tmpdir = mkdtemp()", "def create_dataset():\n with open(\"/root/config.json\", \"r\") as f:\n config = json.load(f)\n\n # create environmental variables\n for (key, value) in config.items():\n os.environ[key] = str(value)\n\n # run blender\n command = '/usr/lib/blender/blender {} --python {} --background'.\\\n format(\"/root/models/default.blend\", \"/root/rendering.py\")\n os.system(command)\n\n # post processing\n post_processing()", "def make_dataset():\n\n\tnumberOfTrials = dataset_params.num_of_samples\n\tnumberOfTrials_train = int(numberOfTrials*0.8)\n\tnumberOfTrials_test = int(numberOfTrials*0.2)\n\n\tprint(\"==================================================\")\n\tprint(\"1. Generating Train images ......\")\n\tprint(\"\\nTrain image per variation\", numberOfTrials_train)\n\tmakeDataset(numberOfTrials_train, \"train\")\n\n\tprint(\"==================================================\")\n\tprint(\"2. Generating Test images ......\")\n\tprint(\"\\nTest image per variation\", numberOfTrials_test)\n\tmakeDataset(numberOfTrials_test, \"test\")\n\n\tprint(\"==================================================\")\n\tprint(\"Done!!!\")", "def autogen_dataset():\n return TabularDataset.autogen('tests/data/dummy_tabular/train.csv',\n seed=42,\n sep=',')", "def setup(self):\n # TODO check if need both dataset together\n self.train_dataset = ABSADataset(data_path=self.train_path, mode=self.in_mode, task=self.task, \n tokenizer=self.tokenizer, vocab=\"bert\")\n self.vocabulary = self.train_dataset.vocabulary\n\n self.eval_dataset = ABSADataset(data_path=self.dev_path, mode=self.in_mode, task=self.task,\n tokenizer=self.tokenizer, vocab=self.vocabulary)\n #self.train_restaurant = ABSADataset(data_path=RESTAURANT_TRAIN)\n #self.eval_restaurant = ABSADataset(data_path=RESTAURANT_DEV)", "def setUpClass(cls):\n cls.checkpoint = 10\n cls.dataset = 'I-AM-A-TEST-DATASET'\n cls.experiment = 'I-AM-A-TEST-FOLDER'\n cls.tokenizer_parameters = {\n 'is_uncased': [False, True],\n 'tokenizer': [\n ('char_dict', lmp.tokenizer.CharDictTokenizer),\n ('char_list', lmp.tokenizer.CharListTokenizer),\n ('whitespace_dict', lmp.tokenizer.WhitespaceDictTokenizer),\n ('whitespace_list', lmp.tokenizer.WhitespaceListTokenizer),\n ],\n }\n cls.test_dir = os.path.join(lmp.path.DATA_PATH, cls.experiment)\n os.makedirs(cls.test_dir)", "def setUp(self):\n\n self.test_data_path = 'testing/test_data/'", "def main(unused_argv):\n del unused_argv\n if not os.path.exists(FLAGS.data_dir):\n os.makedirs(FLAGS.data_dir)\n\n tfds_cached_dict = {}\n data_dir = FLAGS.tfds_data_dir if FLAGS.tfds_data_dir else None\n name = FLAGS.dataset_name\n tfds_cached_dict[name] = tfds.load(name, batch_size=-1, data_dir=data_dir)\n dataset_dict = tfds_cached_dict[name]\n dataset_dict[tfds.Split.TRAIN] = tfds.as_numpy(\n dataset_dict[tfds.Split.TRAIN])\n dataset_dict[tfds.Split.TEST] = tfds.as_numpy(\n dataset_dict[tfds.Split.TEST])\n # To mock the API of tfds.load to cache the downloaded datasets.\n # Used as an argument to `get_dataset`.\n def load_fn(name, data_dir=None, batch_size=-1):\n # This function will always return the whole dataset.\n assert batch_size == -1\n del data_dir\n del batch_size\n return tfds_cached_dict[name]\n class_ids = sorted([int(x) for x in FLAGS.class_ids])\n num_classes = len(class_ids)\n for i in range(num_classes):\n for j in range(i+1, num_classes):\n print('Generating pos {} neg {}'.format(i, j))\n positive_class = class_ids[i]\n negative_class = class_ids[j]\n random_seeds = range(FLAGS.min_data_seed, FLAGS.max_data_seed)\n for seed in random_seeds:\n dataset = create_projected_binary_dataset(\n FLAGS.dataset_name, positive_class, negative_class,\n FLAGS.num_train_examples, FLAGS.num_valid_examples,\n FLAGS.num_test_examples, FLAGS.projected_dim, seed, load_fn)\n filename = 'binary_{}-pos_{}-neg_{}-dim_{}-seed_{}'.format(\n FLAGS.dataset_name, positive_class, negative_class,\n FLAGS.projected_dim, seed)\n serialized_dataset = dataset.SerializeToString()\n\n with open(os.path.join(FLAGS.data_dir, filename), 'wb') as f:\n f.write(serialized_dataset)", "def test_dataset_autogen_with_test(autogen_dataset_with_test):\n train_dummy = \"Etiam ligula tortor, dictum eu, placerat eget, venenatis a, magna.\"\n val_dummy = \"turpis egestas. Aliquam fringilla cursus purus. Nullam scelerisque neque sed\"\n test_dummy = \"a sollicitudin orci sem eget massa. Suspendisse eleifend. Cras sed\"\n\n assert autogen_dataset_with_test.train[0][0] == train_dummy\n assert autogen_dataset_with_test.train[0][1] == '6'\n assert len(autogen_dataset_with_test.train) == 80\n\n assert autogen_dataset_with_test.val[0][0] == val_dummy\n assert autogen_dataset_with_test.val[0][1] == '6'\n assert len(autogen_dataset_with_test.val) == 20\n\n assert autogen_dataset_with_test.test[0][0] == test_dummy\n assert autogen_dataset_with_test.test[0][1] == '3'\n assert len(autogen_dataset_with_test.test) == 50", "def make_testsuite(testsuite: Dict) -> NoReturn:\n # validate testsuite format\n load_testsuite(testsuite)\n\n testsuite_config = testsuite[\"config\"]\n testsuite_path = testsuite_config[\"path\"]\n testsuite_variables = convert_variables(\n testsuite_config.get(\"variables\", {}), testsuite_path\n )\n\n logger.info(f\"start to make testsuite: {testsuite_path}\")\n\n # create directory with testsuite file name, put its testcases under this directory\n testsuite_path = ensure_file_abs_path_valid(testsuite_path)\n testsuite_dir, file_suffix = os.path.splitext(testsuite_path)\n # demo_testsuite.yml => demo_testsuite_yml\n testsuite_dir = f\"{testsuite_dir}_{file_suffix.lstrip('.')}\"\n\n for testcase in testsuite[\"testcases\"]:\n # get referenced testcase content\n testcase_file = testcase[\"testcase\"]\n testcase_path = __ensure_absolute(testcase_file)\n testcase_dict = load_test_file(testcase_path)\n testcase_dict.setdefault(\"config\", {})\n testcase_dict[\"config\"][\"path\"] = testcase_path\n\n # override testcase name\n testcase_dict[\"config\"][\"name\"] = testcase[\"name\"]\n # override base_url\n base_url = testsuite_config.get(\"base_url\") or testcase.get(\"base_url\")\n if base_url:\n testcase_dict[\"config\"][\"base_url\"] = base_url\n # override verify\n if \"verify\" in testsuite_config:\n testcase_dict[\"config\"][\"verify\"] = testsuite_config[\"verify\"]\n # override variables\n # testsuite testcase variables > testsuite config variables\n testcase_variables = convert_variables(\n testcase.get(\"variables\", {}), testcase_path\n )\n testcase_variables = merge_variables(testcase_variables, testsuite_variables)\n # testsuite testcase variables > testcase config variables\n testcase_dict[\"config\"][\"variables\"] = convert_variables(\n testcase_dict[\"config\"].get(\"variables\", {}), testcase_path\n )\n testcase_dict[\"config\"][\"variables\"].update(testcase_variables)\n\n # override weight\n if \"weight\" in testcase:\n testcase_dict[\"config\"][\"weight\"] = testcase[\"weight\"]\n\n # make testcase\n testcase_pytest_path = make_testcase(testcase_dict, testsuite_dir)\n pytest_files_run_set.add(testcase_pytest_path)", "def test_dataset_autogen_dir_with_test(autogen_dataset_dir_with_test):\n train_dummy = \"Donec non justo. Proin non massa non ante bibendum ullamcorper.\"\n val_dummy = \"nibh. Aliquam ornare, libero at auctor ullamcorper, nisl arcu iaculis\"\n test_dummy = \"a sollicitudin orci sem eget massa. Suspendisse eleifend. Cras sed\"\n\n assert autogen_dataset_dir_with_test.train[0][0] == train_dummy\n assert autogen_dataset_dir_with_test.train[0][1] == '4'\n assert len(autogen_dataset_dir_with_test.train) == 201\n\n assert autogen_dataset_dir_with_test.val[0][0] == val_dummy\n assert autogen_dataset_dir_with_test.val[0][1] == '10'\n assert len(autogen_dataset_dir_with_test.val) == 51\n\n assert autogen_dataset_dir_with_test.test[0][0] == test_dummy\n assert autogen_dataset_dir_with_test.test[0][1] == '3'\n assert len(autogen_dataset_dir_with_test.test) == 50", "def default_builder(self, dataset_name, eval_dataset_name):\n builder = tfds.builder(dataset_name, data_dir=self.data_dir)\n self.default_builder_obj = builder\n shard_spec = self.build_shard_spec()\n logging.info('Training on TFDS dataset %s with split %s',\n dataset_name, 'train' + shard_spec)\n train_data = builder.as_dataset(split='train' + shard_spec,\n shuffle_files=self.shuffle_train_files)\n\n if eval_dataset_name is None:\n logging.info('Evaluating on TFDS dataset %s with split %s',\n dataset_name, 'validation' + shard_spec)\n eval_data = self.default_eval_builder(builder, shard_spec)\n else:\n eval_dataset, *eval_split = eval_dataset_name.split(':')\n if not eval_split:\n eval_split = 'validation'\n else:\n eval_split = eval_split[0]\n logging.info('Evaluating on TFDS dataset %s with split %s',\n eval_dataset, eval_split + shard_spec)\n eval_builder = tfds.builder(eval_dataset, data_dir=self.data_dir)\n eval_data = eval_builder.as_dataset(split=eval_split + shard_spec,\n shuffle_files=False)\n return train_data, eval_data", "def setUp(self):\n program = program_utils.seedProgram()\n self.profile = profile_utils.seedSOCStudent(program)", "def autogen_dataset_dir():\n return TabularDataset.autogen('tests/data/dummy_tabular',\n seed=42,\n sep=',')", "def refresh_test_dataset(self):\n inputs_id, inputs_starts, inputs_paths, inputs_ends, inputs_label = self.build_data(self.reader, self.test_items, self.option.max_path_length)\n self.test_dataset = CodeDataset(inputs_id, inputs_starts, inputs_paths, inputs_ends, inputs_label)", "def _get_setup(self, dataset_name):\n for potential_setup in self.setup:\n for dataset in potential_setup[\"datasets\"]:\n if dataset_name in dataset:\n test_setup = potential_setup\n self.io_args.color = os.path.join(self.io_args.input_root, dataset)\n return test_setup", "def create_data_generators(shuffle=True, novelty_type='normal', item_to_include='None',\n scale_level=1):\n\n total_noi_i = 10 # Number of processed images from one environemnt i\n noe = 1 # Numer of environments\n n_p = 32 # Patch size, patch --> n_p x n_p\n\n novelty = novelty_type\n datasets = []\n\n for i in range(noe):\n\n # Load only images of the environment which includes images of the stated novel item.\n if item_to_include is not None and novelty == 'novel_item':\n dataset_env_i = PolycraftDatasetWithSpecificItem(\n nov_type=novelty, noi=total_noi_i, env_idx=i, p_size=n_p, scale_factor=scale_level,\n item_name=item_to_include)\n datasets.append(dataset_env_i)\n # We only process the one environment with the item (maybe change this\n # if we have more than one environement per novel_item!?)\n break\n\n # No specific item given which should be included.\n else:\n dataset_env_i = PolycraftDatasetNoSpecificItem(\n nov_type=novelty, noi=total_noi_i, env_idx=i, p_size=n_p, scale_factor=scale_level)\n datasets.append(dataset_env_i)\n\n final_dataset = ConcatDataset(datasets)\n\n total_noi = len(final_dataset) # Total number of processed images from all datasets\n\n if(total_noi < 7):\n print('Number of samples too small for splitting dataset in training-/valid-/test set.')\n\n train_noi = int(0.7 * total_noi) # Number of images used for training (70 %)\n valid_noi = int(0.15 * total_noi) # Number of images used for validation (15 %)\n test_noi = total_noi - train_noi - valid_noi # Number of images used for testing (15 %)\n train_dataset, valid_dataset, test_dataset = torch.utils.data.random_split(\n final_dataset, [train_noi, valid_noi, test_noi])\n\n train_loader = DataLoader(train_dataset, batch_size=1, shuffle=True)\n valid_loader = DataLoader(valid_dataset, batch_size=1, shuffle=True)\n test_loader = DataLoader(test_dataset, batch_size=1, shuffle=True)\n\n return train_loader, valid_loader, test_loader", "def get_settings(dataset: DS):\n if dataset == DS.ARTIFICIAL_BBOX:\n project_path = Path('data/artificial/')\n project_file = project_path / 'annotations.json'\n image_dir = 'images'\n _, annotations = create_color_classification(path=project_path, n_samples=50,\n size=(500, 500))\n\n anno = {str(project_path / image_dir / k): [f'{v}.jpg'] for k, v in annotations.items()}\n\n with open(project_file, 'w') as f:\n json.dump(anno, f)\n\n return Settings(project_path=project_path,\n project_file=project_file,\n image_dir=image_dir,\n label_dir='class_images',\n # used on create step - should be empty!\n result_dir='create_results',\n im_width=50, im_height=50,\n label_width=30, label_height=30,\n n_cols=3)\n elif dataset == DS.ARTIFICIAL_VIDEO:\n project_path = Path('data/artificial/')\n project_file = project_path / 'annotations.json'\n image_dir = 'images'\n create_mot_ds(project_path, image_dir, 20, True)\n return Settings(\n project_path=project_path,\n project_file=project_file,\n image_dir=image_dir,\n im_width=200,\n im_height=200,\n result_dir='create_results',\n )\n elif dataset == DS.CIFAR10:\n cifar_train_p, cifar_test_p = get_cifar10(Path('data'))\n\n return Settings(project_path=Path('data/cifar10/'),\n project_file=cifar_test_p,\n image_dir='test',\n label_dir=None,\n # used on create step - should be empty!\n result_dir='create_results',\n im_width=50, im_height=50,\n label_width=140, label_height=30,\n n_cols=2)\n\n elif dataset == DS.OXFORD102:\n flowers102_train_p, flowers102_test_p = get_oxford_102_flowers(Path('data'))\n\n return Settings(project_path=Path('data/oxford-102-flowers'),\n project_file=flowers102_test_p,\n image_dir='jpg',\n label_dir=None,\n # used on create step - should be empty!\n result_dir='create_results',\n im_width=50, im_height=50,\n label_width=40, label_height=30,\n n_cols=7)\n\n elif dataset == DS.CUB200:\n cub200_train_p, cub200_test_p = get_cub_200_2011(Path('data'))\n\n return Settings(project_path=Path('data/CUB_200_2011'),\n project_file=cub200_test_p,\n image_dir='images',\n label_dir=None,\n # used on create step - should be empty!\n result_dir='create_results',\n im_width=50, im_height=50,\n label_width=50, label_height=50,\n n_cols=7)\n else:\n raise UserWarning(f\"Dataset {dataset} is not supported!\")", "def creation_data_sets(quality, dataset, test_case=False):\n current_path = Path.cwd()\n if dataset == 0:\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n del y_train, y_test\n train_path = current_path.joinpath(\"Mnist_{}\".format(quality))\n test_path = current_path.joinpath(\"Mnist_{}_test\".format(quality))\n else:\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n del y_train, y_test\n train_path = current_path.joinpath(\"Cifar-10_{}\".format(quality))\n test_path = current_path.joinpath(\"Cifar-10_{}_test\".format(quality))\n\n create_directories(train_path, test_path)\n convert(train_path, x_train, dataset, quality, test_case)\n convert(test_path, x_test, dataset, quality, test_case)", "def setup(self, ds):\n pass", "def setUp(self):\n lang = self._sim_lang\n self._simulator = self._find_resource(\n f\"drake/examples/hardware_sim/hardware_sim_{lang}\")\n self._example_scenarios = self._find_resource(\n \"drake/examples/hardware_sim/example_scenarios.yaml\")\n self._test_scenarios = self._find_resource(\n \"drake/examples/hardware_sim/test/test_scenarios.yaml\")\n self._default_extra = {\n # For our smoke test, exit fairly quickly.\n \"simulation_duration\": 0.0625,\n }", "def setUpClass(cls):\n cur_dir = os.path.dirname(os.path.realpath(__file__))\n meta_file = os.path.join(cur_dir, 'testdata/meta.json')\n graph_file = os.path.join(cur_dir, 'testdata/graph.json')\n output_file = os.path.join(cur_dir, 'testdata/graph.dat')\n builder = os.path.join(cur_dir, '../../../tools/bin/json2dat.py')\n\n command = \"python {builder} -i {input} -c {meta} -o {output}\".format(\n builder=builder, input=graph_file, meta=meta_file, output=output_file)\n\n try:\n subprocess.call(command, shell=True)\n except:\n raise RuntimeError(\"Build Graph for test failed\")\n\n base.initialize_graph({\n 'mode': 'Local',\n 'directory': os.path.join(cur_dir, 'testdata'),\n 'load_type': 'compact'\n })", "def make_dataset(dataset_name):\n return {\n\n 'duc': DUCDataset(),\n\n 'icsi-asr': ICSIASRDataset(),\n 'icsi-ht': ICSIHumanTranscriptDataset(),\n\n 'inspec-train': InspectTrainingDataset(),\n 'inspec-val': InspectValidationDataset(),\n 'inspec-test': InspectTestDataset(),\n\n 'nus': NUSDataset()\n\n }[dataset_name]", "def test_make_dataset_happy_path(self):\n # User story: user runs src.make_dataset() on the current directory\n # and gets a fully functional dataset\n pass", "def testset(self, batchsize=None, flatten=True):\n if batchsize is None:\n batchsize = self.batchsize\n\n return self.GENERATOR(self.x_test, self.y_test, batchsize, flatten=flatten, evaluate=True)", "def setUp(self):\n\n self.directory = tempfile.mkdtemp(dir=os.getcwd())\n spirv_args = self.test.spirv_args\n # Instantiate placeholders in spirv_args\n self.test.spirv_args = [\n arg.instantiate_for_spirv_args(self)\n if isinstance(arg, PlaceHolder) else arg for arg in self.test.spirv_args\n ]\n # Get all shader files' names\n self.inputs = [arg for arg in spirv_args if isinstance(arg, PlaceHolder)]\n self.file_shaders = [arg.filename for arg in self.inputs]\n\n if 'environment' in get_all_variables(self.test):\n self.test.environment.write(self.directory)\n\n expectations = [\n v for v in get_all_variables(self.test)\n if v.startswith(EXPECTED_BEHAVIOR_PREFIX)\n ]\n # Instantiate placeholders in expectations\n for expectation_name in expectations:\n expectation = getattr(self.test, expectation_name)\n if isinstance(expectation, list):\n expanded_expections = [\n element.instantiate_for_expectation(self)\n if isinstance(element, PlaceHolder) else element\n for element in expectation\n ]\n setattr(self.test, expectation_name, expanded_expections)\n elif isinstance(expectation, PlaceHolder):\n setattr(self.test, expectation_name,\n expectation.instantiate_for_expectation(self))", "def setUpClass(cls):\n cls.testDir = tempfile.mkdtemp()\n cls.readonlyDir = tempfile.mkdtemp()\n cls.testfile = os.path.join(cls.testDir, 'desispec_test_io.fits')\n cls.testyfile = os.path.join(cls.testDir, 'desispec_test_io.yaml')\n cls.testlog = os.path.join(cls.testDir, 'desispec_test_io.log')\n # cls.testbrfile appears to be unused by this class.\n cls.testbrfile = os.path.join(cls.testDir, 'desispec_test_io-br.fits')\n cls.origEnv = {'SPECPROD': None,\n \"DESI_ROOT\": None,\n \"DESI_ROOT_READONLY\": None,\n \"DESI_SPECTRO_DATA\": None,\n \"DESI_SPECTRO_REDUX\": None,\n \"DESI_SPECTRO_CALIB\": None,\n }\n cls.testEnv = {'SPECPROD':'dailytest',\n \"DESI_ROOT\": cls.testDir,\n \"DESI_ROOT_READONLY\": cls.readonlyDir,\n \"DESI_SPECTRO_DATA\": os.path.join(cls.testDir, 'spectro', 'data'),\n \"DESI_SPECTRO_REDUX\": os.path.join(cls.testDir, 'spectro', 'redux'),\n \"DESI_SPECTRO_CALIB\": os.path.join(cls.testDir, 'spectro', 'calib'),\n }\n cls.datadir = cls.testEnv['DESI_SPECTRO_DATA']\n cls.reduxdir = os.path.join(cls.testEnv['DESI_SPECTRO_REDUX'],\n cls.testEnv['SPECPROD'])\n for e in cls.origEnv:\n if e in os.environ:\n cls.origEnv[e] = os.environ[e]\n os.environ[e] = cls.testEnv[e]", "def make_test_dataset(outdir, overwrite=False,\n observatory_name='HESS', n_obs=10,\n az_range=Angle([0, 360], 'deg'),\n alt_range=Angle([45, 90], 'deg'),\n date_range=(Time('2010-01-01'),\n Time('2015-01-01')),\n n_tels_range=(3, 4),\n sigma=Angle(5., 'deg'),\n spectral_index=2.7,\n random_state='random-seed'):\n from ..data import DataStore\n random_state = get_random_state(random_state)\n\n # create output folder\n Path(outdir).mkdir(exist_ok=overwrite)\n\n # generate observation table\n observation_table = make_test_observation_table(observatory_name=observatory_name,\n n_obs=n_obs,\n az_range=az_range,\n alt_range=alt_range,\n date_range=date_range,\n use_abs_time=False,\n n_tels_range=n_tels_range,\n random_state=random_state)\n\n # save observation list to disk\n outfile = Path(outdir) / 'runinfo.fits'\n observation_table.write(str(outfile))\n\n # create data store for the organization of the files\n # using H.E.S.S.-like dir/file naming scheme\n if observatory_name == 'HESS':\n scheme = 'HESS'\n else:\n s_error = \"Warning! Storage scheme for {}\".format(observatory_name)\n s_error += \"not implemented. Only H.E.S.S. scheme is available.\"\n raise ValueError(s_error)\n\n data_store = DataStore(dir=outdir, scheme=scheme)\n\n # loop over observations\n for obs_id in observation_table['OBS_ID']:\n event_list, aeff_hdu = make_test_eventlist(observation_table=observation_table,\n obs_id=obs_id,\n sigma=sigma,\n spectral_index=spectral_index,\n random_state=random_state)\n\n # save event list and effective area table to disk\n outfile = data_store.filename(obs_id, filetype='events')\n outfile_split = outfile.rsplit(\"/\", 1)\n os.makedirs(outfile_split[0]) # recursively\n event_list.write(outfile)\n outfile = data_store.filename(obs_id, filetype='effective area')\n aeff_hdu.writeto(outfile)", "def setUpClass(cls):\n super(Module05Tests, cls).setUpClass()\n cls.datasets = {\n 0: DATASETS_ROOT + 'diffusion_synthetic_normal_L8_r2_slices_41_50_gr15_b1200',\n 1: DATASETS_ROOT + 'filtered',\n 2: DATASETS_ROOT + 'noise'\n }\n cls.data = smns.load_object(file_path=cls.datasets[2])", "def setUp(self):\n test_helpers.patch_environ(self)\n\n data = []\n\n strategy1 = data_types.FuzzStrategyProbability()\n strategy1.strategy_name = 'fork,corpus_subset,'\n strategy1.probability = 0.33\n strategy1.engine = 'libFuzzer'\n data.append(strategy1)\n\n strategy2 = data_types.FuzzStrategyProbability()\n strategy2.strategy_name = 'random_max_len,value_profile,'\n strategy2.probability = 0.34\n strategy2.engine = 'libFuzzer'\n data.append(strategy2)\n ndb.put_multi(data)\n\n distribution = fuzz_task.get_strategy_distribution_from_ndb()\n\n environment.set_value('USE_BANDIT_STRATEGY_SELECTION', True)\n environment.set_value('STRATEGY_SELECTION_DISTRIBUTION', distribution)", "def generate_train_test(self):\n x, y = self.read_data()\n x_train, y_train, x_test, y_test = self.sample_data(x, y)\n self.train = (x_train, y_train)\n self.test = (x_test, y_test)", "def setUp(self) -> None:\n\n self.helper = EnvironmentVariableHelper()\n\n self.test_name = \"PYFUNCEBLE_TESTING\"\n self.temp_env_file = tempfile.NamedTemporaryFile(\"w\", delete=False)", "def setUp(self):\n self.cfg_path = \"acloud_unittest.config\"\n file_write = open(self.cfg_path, 'w')\n file_write.write(_CreateCfgFile().strip())\n file_write.close()\n self.gcp_env_runner = gcp_setup_runner.GcpTaskRunner(self.cfg_path)\n self.gcloud_runner = gcp_setup_runner.GoogleSDKBins(\"\")", "def generate(ctx, include, host_data_type, encryption_type, match_rate, sparsity, guest_data_size,\n host_data_size, guest_feature_num, host_feature_num, output_path, force, split_host, upload_data,\n remove_data, use_local_data, parallelize, **kwargs):\n ctx.obj.update(**kwargs)\n ctx.obj.post_process()\n namespace = ctx.obj[\"namespace\"]\n config_inst = ctx.obj[\"config\"]\n if ctx.obj[\"extend_sid\"] is not None:\n config_inst.extend_sid = ctx.obj[\"extend_sid\"]\n if ctx.obj[\"auto_increasing_sid\"] is not None:\n config_inst.auto_increasing_sid = ctx.obj[\"auto_increasing_sid\"]\n if parallelize and upload_data:\n upload_data = False\n yes = ctx.obj[\"yes\"]\n echo.welcome()\n echo.echo(f\"testsuite namespace: {namespace}\", fg='red')\n echo.echo(\"loading testsuites:\")\n if host_data_size is None:\n host_data_size = guest_data_size\n suites = _load_testsuites(includes=include, excludes=tuple(), glob=None)\n suites += _load_testsuites(includes=include, excludes=tuple(), glob=None,\n suffix=\"benchmark.json\", suite_type=\"benchmark\")\n for suite in suites:\n if upload_data:\n echo.echo(f\"\\tdataget({len(suite.dataset)}) dataset({len(suite.dataset)}) {suite.path}\")\n else:\n echo.echo(f\"\\tdataget({len(suite.dataset)}) {suite.path}\")\n if not yes and not click.confirm(\"running?\"):\n return\n\n _big_data_task(include, guest_data_size, host_data_size, guest_feature_num, host_feature_num, host_data_type,\n config_inst, encryption_type, match_rate, sparsity, force, split_host, output_path, parallelize)\n if upload_data:\n if use_local_data:\n _config.use_local_data = 0\n _config.data_switch = remove_data\n client_upload(suites=suites, config_inst=config_inst, namespace=namespace, output_path=output_path)", "def switch_to_test_data(self) -> None:\n if self._test_name not in self._datasets:\n raise ValueError(\"Test data not provided.\")\n self.switch_to_dataset(self._test_name)", "def create_train_test(option, transform, params, split=0.2):\r\n clip_im_dir = option.clip_im_dir\r\n matting_dir = option.matting_dir\r\n csv_path = option.csv_path\r\n \r\n print(\"create datasets\")\r\n \r\n \r\n data_df = pd.read_csv(csv_path)\r\n # data_df = MergeDataframe(clip_im_dir, matting_dir)\r\n \r\n #separate data in training and test data (20/80)\r\n train_df, test_df = train_test_split(data_df, test_size=split)\r\n \r\n #search right Dataset class\r\n package_dir = Path(src.dataset.__file__).resolve().parent\r\n\r\n for (_, module_name, _) in iter_modules([package_dir]):\r\n # print(module_name, self.ComType)\r\n if option.dataset.lower() == module_name.lower() :\r\n modelModule = importlib.import_module(\".\"+module_name)\r\n break\r\n \r\n # train data\r\n training_set = modelModule(train_df, clip_im_dir, matting_dir, transform, transform)\r\n train_loader = DataLoader(training_set, **params)\r\n \r\n \r\n #test data\r\n testing_set = modelModule(test_df, clip_im_dir, matting_dir, transform, transform)\r\n test_loader = DataLoader(testing_set, **params)\r\n \r\n return train_loader, test_loader", "def prepare_testenv(config=None, template=None, args=None):\n\n class MockArgs(object):\n def __getattr__(self, name):\n return self.name if name in self.__dict__ else None\n\n config = {} if config is None else dict(config)\n\n with generate_temp_dir() as work_dir, interim_working_dir(work_dir):\n # force root directory to temporary directory; or configure all working\n # content based off the generated temporary directory\n if 'root_dir' not in config:\n config['root_dir'] = work_dir\n else:\n if 'cache_dir' not in config:\n config['cache_dir'] = os.path.join(work_dir, 'cache')\n if 'dl_dir' not in config:\n config['dl_dir'] = os.path.join(work_dir, 'dl')\n if 'out_dir' not in config:\n config['out_dir'] = os.path.join(work_dir, 'out')\n\n if template:\n copy_template(template, work_dir)\n\n # build arguments instance\n test_args = MockArgs()\n for k, v in config.items():\n setattr(test_args, k, v)\n\n # prepare engine options and build an engine instance\n opts = RelengEngineOptions(args=test_args, forward_args=args)\n engine = RelengEngine(opts)\n\n yield engine", "def setUp(self):\n\n # setup init variables\n self.init_vars = {\n 'suppress_logfile': True,\n 'verbosity': 0,\n 'mothur_seed': 54321,\n }\n\n # setup directories for testing\n test_dir = os.path.join(os.getcwd(), 'tests')\n self.test_output_dir = os.path.join(test_dir, 'test_output')\n if not os.path.isdir(self.test_output_dir):\n os.makedirs(self.test_output_dir)\n self.test_input_dir = os.path.join(test_dir, 'test_data')\n\n return", "def setUpTestData(cls):\n call_command('loaddata', 'db.json', verbosity=0)", "def test(self, dataset) -> None:\n raise NotImplementedError()", "def setUp(self):\n\n self.client = app.test_client()\n app.config['Testing'] = True\n app.config['SECRET_KEY'] = 'test'\n connect_to_db(app, db_uri='postgresql:///testdb', echo=False)\n db.create_all()\n\n example_data() # Need to expand!", "def make_environment(seed, task_horizon):\n # Load the gym environment.\n environment = CartPoleEnv()\n environment = gym_wrappers.TimeLimit(environment, task_horizon)\n environment.seed(seed)\n environment = wrappers.GymWrapper(environment)\n environment = wrappers.SinglePrecisionWrapper(environment)\n return environment", "def build():\n\tconsole = Console()\n\tconsole.clear()\n\tconsole.print(BANNER)\n\tif not os.path.exists(\"dataset.yaml\"):\n\t\tclick.clear()\n\t\tconsole.print(\"Dataset config file not found\\nRun - idt init\\n\")\n\t\texit(0)\n\n\twith open('dataset.yaml') as f:\n\t\tdata = yaml.load(f, Loader=yaml.FullLoader)\n\t\n\tclick.clear()\n\tconsole.print(\"Building [bold blue]{dataset_name}[/bold blue] dataset...\\n\".format(dataset_name=data['DATASET_NAME']))\n\tfor classes in data['CLASSES']:\n\t\tclick.clear()\n\t\tconsole.print('Creating [bold blue]{name} class[/bold blue] \\n'.format(name=classes['CLASS_NAME']))\n\t\tsearch_list = classes['SEARCH_KEYWORDS'].split(\",\")\n\t\tfor keywords in search_list:\n\t\t\tfactory = SearchEngineFactory(keywords,data['SAMPLES_PER_SEARCH'],classes['CLASS_NAME'],data['RESIZE_METHOD'], data['DATASET_NAME'],data['IMAGE_SIZE'], data['ENGINE'],data['API_KEY'])\n\t# Remove corrupt files\n\tremove_corrupt(data['DATASET_NAME'])\n\n\t# Create a CSV with dataset info\n\tcreate_dataset_csv(data['DATASET_NAME'])\n\tclick.clear()\n\tconsole.print(\"Dataset READY!\")", "def setUp(self):\n self.env = EnvironmentStub(default_data=True,\n enable=['ticket-field-config.*'])\n\n # this is the default data that is in the test Trac database\n self.default = {\n 'priority':['blocker', 'critical', 'major', 'minor', 'trivial'],\n 'severity':[],\n 'resolution': ['fixed','invalid','wontfix','duplicate','worksforme'],\n 'ticket_type':['defect', 'enhancement', 'task'],\n 'component':['component1', 'component2'],\n }\n\n # this is the new data we plan to put in configuration\n self.new = {\n 'priority': ['P1','P2','P3'],\n 'severity': ['High','Medium','Low'],\n 'resolution': ['fixed','wontfix','invalid','duplicate','worksforme'],\n 'ticket_type': ['Bug','Release','Project'],\n 'component': ['new/blog','new/site','old/blog','old/site'],\n }", "def test_save_load(dataset_type, repeat):\n set_random_seed_from_args(\"test_save_load\", dataset_type, repeat)\n dataset = get_dataset_and_name_from_type(dataset_type)\n dataset_name = repr(dataset)\n dataset.save(dataset_name, output_dir)\n dataset_loaded = data.DataSet().load(dataset_name, output_dir)\n assert np.all(dataset.train.x == dataset_loaded.train.x )\n assert np.all(dataset.train.y == dataset_loaded.train.y )\n assert np.all(dataset.train.n == dataset_loaded.train.n )\n assert np.all(dataset.test.x == dataset_loaded.test.x )\n assert np.all(dataset.test.y == dataset_loaded.test.y )\n assert np.all(dataset.test.n == dataset_loaded.test.n )", "def setUp(self):\n self.samples = 5\n self.otus = 10\n seed(0) # this will seed numpy prng at 0 before each test", "def generate_dataset(self):\n if self.training:\n dataset = UnpairedDataset(self.opt, self.training)\n datasetA, datasetB = dataset.generate(cacheA='./dataA.tfcache', cacheB='./dataB.tfcache')\n dataA_iter = datasetA.make_initializable_iterator()\n dataB_iter = datasetB.make_initializable_iterator()\n\n return dataA_iter, dataB_iter, dataA_iter.get_next(), dataB_iter.get_next()\n else: # only need shadow dataset for testing\n dataset = SingleDataset(self.opt, self.training)\n datasetA = dataset.generate()\n dataA_iter = datasetA.make_initializable_iterator()\n\n return dataA_iter, dataA_iter.get_next()", "def create_datasets(config, data_rng):\n # Compute batch size per device from global batch size.\n if config.batch_size % jax.device_count() != 0:\n raise ValueError(f'Batch size ({config.batch_size}) must be divisible by '\n f'the number of devices ({jax.device_count()}).')\n per_device_batch_size = config.batch_size // jax.device_count()\n\n dataset_builder = tfds.builder(config.dataset)\n\n def cast_int32(batch):\n img = tf.cast(batch['image'], tf.int32)\n out = batch.copy()\n out['image'] = img\n return out\n\n def drop_info(batch):\n \"\"\"Removes unwanted keys from batch.\"\"\"\n if 'id' in batch:\n batch.pop('id')\n if 'rng' in batch:\n batch.pop('rng')\n return batch\n\n if config.data_augmentation:\n should_augment = True\n should_randflip = True\n should_rotate = True\n else:\n should_augment = False\n should_randflip = False\n should_rotate = False\n\n def augment(batch):\n img = tf.cast(batch['image'], tf.float32)\n aug = None\n if should_augment:\n if should_randflip:\n img_flipped = tf.image.flip_left_right(img)\n aug = tf.random.uniform(shape=[]) > 0.5\n img = tf.where(aug, img_flipped, img)\n if should_rotate:\n u = tf.random.uniform(shape=[])\n k = tf.cast(tf.floor(4. * u), tf.int32)\n img = tf.image.rot90(img, k=k)\n aug = aug | (k > 0)\n if aug is None:\n aug = tf.convert_to_tensor(False, dtype=tf.bool)\n\n out = batch.copy()\n out['image'] = img\n return out\n\n def preprocess_train(batch):\n return cast_int32(augment(drop_info(batch)))\n\n def preprocess_eval(batch):\n return cast_int32(drop_info(batch))\n\n # Read instructions to shard the dataset!\n print('train', dataset_builder.info.splits['train'].num_examples)\n # TODO(emielh) use dataset_info instead of num_examples.\n train_split = deterministic_data.get_read_instruction_for_host(\n 'train', num_examples=dataset_builder.info.splits['train'].num_examples)\n train_ds = deterministic_data.create_dataset(\n dataset_builder,\n split=train_split,\n num_epochs=1,\n shuffle=True,\n batch_dims=[jax.local_device_count(), per_device_batch_size],\n preprocess_fn=preprocess_train,\n rng=data_rng,\n prefetch_size=tf.data.AUTOTUNE,\n drop_remainder=True\n )\n\n # TODO(emielh) check if this is necessary?\n\n # Test batches are _not_ sharded. In the worst case, this simply leads to some\n # duplicated information. In our case, since the elbo is stochastic we get\n # multiple passes over the test data.\n if config.test_batch_size % jax.local_device_count() != 0:\n raise ValueError(f'Batch size ({config.batch_size}) must be divisible by '\n f'the number of devices ({jax.local_device_count()}).')\n test_device_batch_size = config.test_batch_size // jax.local_device_count()\n\n eval_ds = deterministic_data.create_dataset(\n dataset_builder,\n split='test',\n # Repeated epochs for lower variance ELBO estimate.\n num_epochs=config.num_eval_passes,\n shuffle=False,\n batch_dims=[jax.local_device_count(), test_device_batch_size],\n preprocess_fn=preprocess_eval,\n # TODO(emielh) Fix this with batch padding instead of dropping.\n prefetch_size=tf.data.AUTOTUNE,\n drop_remainder=False)\n\n return dataset_builder.info, train_ds, eval_ds", "def setUp(self):\n test_helpers.patch_environ(self)\n\n data = []\n\n strategy1 = data_types.FuzzStrategyProbability()\n strategy1.strategy_name = 'corpus_subset,'\n strategy1.probability = 0.33\n strategy1.engine = 'afl'\n data.append(strategy1)\n\n strategy2 = data_types.FuzzStrategyProbability()\n strategy2.strategy_name = 'corpus_mutations_radamsa,corpus_subset,'\n strategy2.probability = 0.34\n strategy2.engine = 'afl'\n data.append(strategy2)\n\n strategy3 = data_types.FuzzStrategyProbability()\n strategy3.strategy_name = 'corpus_subset,'\n strategy3.probability = 0.33\n strategy3.engine = 'afl'\n data.append(strategy3)\n ndb.put_multi(data)\n\n distribution = fuzz_task.get_strategy_distribution_from_ndb()\n\n environment.set_value('USE_BANDIT_STRATEGY_SELECTION', True)\n environment.set_value('STRATEGY_SELECTION_DISTRIBUTION', distribution)", "def setUp(self):\n with open(SRC_PATH + \"configs/etl_config.json\", \"r\") as f:\n self.config = json.loads(f.read())\n self.spark = SparkBuilder(\"test\").build_sc()\n self.test_data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../tests/test_data/')", "def set_random_envs(self):\n self.randomizer.randomize(num_samples=self.num_teachers)\n params = self.randomizer.get_params(fmt=\"dict\", dtype=\"numpy\")\n\n for e in range(self.num_teachers):\n self.teacher_envs.append(deepcopy(self.env_real))\n print({key: value[e] for key, value in params.items()})\n self.teacher_envs[e].domain_param = {key: value[e] for key, value in params.items()}", "def __init__(\n self,\n train_dataset, # TODO: Allow providing separate (train_input, train_target) dfs\n environment_params_path=None,\n *,\n results_path=None,\n metrics=None,\n holdout_dataset=None, # TODO: Allow providing separate (holdout_input, holdout_target) dfs\n test_dataset=None, # TODO: Allow providing separate (test_input, test_target) dfs\n target_column=None,\n id_column=None,\n do_predict_proba=None,\n prediction_formatter=None,\n metrics_params=None,\n cv_type=None,\n runs=None,\n global_random_seed=None,\n random_seeds=None,\n random_seed_bounds=None,\n cv_params=None,\n verbose=None,\n file_blacklist=None,\n reporting_params=None,\n to_csv_params=None,\n do_full_save=None,\n experiment_callbacks=None,\n experiment_recorders=None,\n ):\n G.Env = self\n self.environment_params_path = environment_params_path\n self.results_path = results_path\n\n #################### Attributes Used by Experiments ####################\n self.target_column = target_column\n self.id_column = id_column\n\n self.train_dataset = train_dataset\n self.holdout_dataset = holdout_dataset\n self.test_dataset = test_dataset\n\n self.do_predict_proba = do_predict_proba\n self.prediction_formatter = prediction_formatter\n self.metrics = metrics\n self.metrics_params = metrics_params\n\n self.cv_type = cv_type\n self.runs = runs\n self.global_random_seed = global_random_seed\n self.random_seeds = random_seeds\n self.random_seed_bounds = random_seed_bounds\n self.cv_params = cv_params\n\n #################### Ancillary Environment Settings ####################\n self.verbose = verbose\n self.file_blacklist = file_blacklist\n self.reporting_params = reporting_params or {}\n self.to_csv_params = to_csv_params or {}\n self.do_full_save = do_full_save\n self.experiment_callbacks = experiment_callbacks or []\n self.experiment_recorders = experiment_recorders or []\n\n self.result_paths = {\n \"root\": self.results_path,\n \"checkpoint\": None,\n \"description\": None,\n \"heartbeat\": None,\n \"predictions_holdout\": None,\n \"predictions_in_fold\": None,\n \"predictions_oof\": None,\n \"predictions_test\": None,\n \"script_backup\": None,\n \"tested_keys\": None,\n \"key_attribute_lookup\": None,\n \"leaderboards\": None,\n \"global_leaderboard\": None,\n \"current_heartbeat\": None,\n }\n self.current_task = None\n self.cross_experiment_key = None\n\n self.environment_workflow()", "def test_create_extended(setup_teardown_file):\n f = setup_teardown_file[3]\n grp = f.create_group(\"test\")\n\n dset = grp.create_dataset('foo', (63,))\n assert dset.shape == (63,)\n assert dset.size == 63\n\n dset = f.create_dataset('bar', (6, 10))\n assert dset.shape == (6, 10)\n assert dset.size == (60)", "def setup(self):\n in_dataset, out_dataset = self.get_datasets()", "def setup(self):\n in_dataset, out_dataset = self.get_datasets()", "def setUp(self):\n self.workspace_dir = tempfile.mkdtemp()", "def setUp(self):\n self.workspace_dir = tempfile.mkdtemp()", "def setUp(self):\n self.workspace_dir = tempfile.mkdtemp()", "def setUp(self):\n self.workspace_dir = tempfile.mkdtemp()", "def setUp(self):\n self.workspace_dir = tempfile.mkdtemp()", "def setUp(self):\n self.workspace_dir = tempfile.mkdtemp()", "def setUp(self):\n self.workspace_dir = tempfile.mkdtemp()", "def setUpSuite():\n global _output_dir\n global _suite_configured\n\n if _suite_configured:\n return\n\n def remove_output_dir():\n global _output_dir\n if _output_dir != '':\n try:\n shutil.rmtree(_output_dir)\n except FileNotFoundError:\n pass\n\n atexit.register(remove_output_dir)\n _output_dir = tempfile.mkdtemp(dir=TESTS_DIR)\n\n os.environ['VOC_BUILD_DIR'] = os.path.join(_output_dir, 'build')\n os.environ['VOC_DIST_DIR'] = os.path.join(_output_dir, 'dist')\n\n # If the code has been precompiled, we don't have to\n # compile it as part of the test suite setup.\n precompile = os.environ.get('PRECOMPILE', 'true').lower() == 'true'\n if not precompile:\n _suite_configured = True\n return\n\n proc = subprocess.Popen(\n \"ant java\",\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n shell=True,\n )\n\n try:\n out, err = proc.communicate(timeout=30)\n except subprocess.TimeoutExpired:\n proc.kill()\n out, err = proc.communicate()\n raise\n\n if proc.returncode != 0:\n raise Exception(\"Error compiling java sources: \" + out.decode('ascii'))\n\n _suite_configured = True", "def setup(self, stage: Union[str, None] = None) -> None:\n self.data_splits = {}\n # set up each of the dataset splits\n for key, path in self.paths.items():\n self.data_splits[key] = self.dataset_class(path)", "def setup(self, stage: Optional[str] = None):\n if stage in (None, 'fit'):\n # Get a 20% of the train data for validation in a stratified way.\n _x = [i[1] for i in self.splits['train']]\n _y = [i[0] for i in self.splits['train']]\n\n _train_x, _val_x, _train_y, _val_y = train_test_split(_x, _y, test_size=0.2,\n stratify=_y)\n #print(np.unique(_train_y, return_counts=True))\n #print(np.unique(_val_y, return_counts=True))\n\n self.splits['train'] = [[i, j] for i,j in zip(_train_y, _train_x)]\n self.splits['valid'] = [[i, j] for i,j in zip(_val_y, _val_x)]\n\n self.datasets['train'] = FewShotDataset(self.splits['train'], self.ops)\n self.datasets['valid'] = FewShotDataset(self.splits['valid'], self.ops)\n\n if stage in (None, 'test'):\n self.datasets['test'] = FewShotDataset(self.splits['test'], self.ops)", "def setUp(self) -> None:\n self.sqlite_db = setup_sqlite_in_memory_db()\n create_tables(self.sqlite_db)\n seed_all_distributions()\n container_flow_generation_manager = ContainerFlowGenerationManager()\n container_flow_generation_manager.set_properties(\n name=\"Test previews\",\n start_date=datetime.datetime.now().date(),\n end_date=datetime.datetime.now().date() + datetime.timedelta(days=21)\n )", "def setUp(self):\n self.runtime = fakeRuntime()\n self.runtime.getEnvVarsService().set(\"DATAWIRE_TOKEN\", \"something\")\n self.mdk = MDKImpl(self.runtime)\n self.mdk.start()", "def setUpClass(cls):\n cls.config.setup_toolbox('ENVI', 'qa_envitaskengine_datatype_sarscapedataarray',\n 'test_datatype_sarscapedataarray')", "def init(default):\n\tconsole = Console()\n\tconsole.clear()\n\n\tif default:\n\t\tdocument_dict = {\n\t\t\t\"DATASET_NAME\": \"dataset\",\n\t\t\t\"API_KEY\": \"\",\n\t\t\t\"SAMPLES_PER_SEARCH\": 50,\n\t\t\t\"IMAGE_SIZE\": 512,\n\t\t\t\"ENGINE\": \"duckgo\",\n\t\t\t\"RESIZE_METHOD\": \"longer_side\",\n\t\t\t\"CLASSES\": [{\"CLASS_NAME\": \"Test\", \"SEARCH_KEYWORDS\": \"images of cats\"}]}\n\n\t\tif not os.path.exists(\"dataset.yaml\"):\n\t\t\tconsole.print(\"[bold]Creating a dataset configuration file...[/bold]\")\n\t\t\t\n\t\t\tf = open(\"dataset.yaml\", \"w\")\n\t\t\tf.write(yaml.dump(document_dict))\n\t\t\tif f:\n\t\t\t\tconsole.clear()\n\t\t\t\tconsole.print(\"Dataset YAML file has been created sucessfully. Now run [bold blue]idt build[/bold blue] to mount your dataset!\")\n\t\t\t\texit(0)\n\t\t\t\n\t\t\n\t\telse:\n\t\t\tconsole.print(\"[red]A dataset.yaml is already created. To use another one, delete the current dataset.yaml file[/red]\")\n\t\t\texit(0)\n\n\tconsole.print(BANNER)\n\tdataset_name = click.prompt(\"Insert a name to your dataset: \")\n\n\tconsole.clear()\n\tsamples = click.prompt(\"How many samples per seach will be necessary? \",type=int)\n\n\tconsole.clear()\n\tconsole.print(\"[bold]Choose image resolution[/bold]\", justify=\"center\")\n\tconsole.print(\"\"\"\n\n[1] 512 pixels / 512 pixels [bold blue](recommended)[/bold blue]\n[2] 1024 pixels / 1024 pixels\n[3] 256 pixels / 256 pixels\n[4] 128 pixels / 128 pixels\n[5] Keep original image size\n\n[italic]ps: note that the aspect ratio of the image will [bold]not[/bold] be changed, so possibly the images received will have slightly different size[/italic]\n\t\t\n\t\t\"\"\")\n\n\n\timage_size_ratio = click.prompt(\"What is the desired image size ratio\", type=int)\n\twhile image_size_ratio < 1 or image_size_ratio > 5:\n\t\tconsole.print(\"[italic red]Invalid option, please choose between 1 and 5. [/italic red]\")\n\t\timage_size_ratio= click.prompt(\"\\nOption: \",type=int)\n\n\tif image_size_ratio == 1:\n\t\timage_size_ratio= 512\n\telif image_size_ratio == 2:\n\t\timage_size_ratio = 1024\n\telif image_size_ratio == 3:\n\t\timage_size_ratio = 256\n\telif image_size_ratio == 4:\n\t\timage_size_ratio= 128\n\telif image_size_ratio == 5:\n\t\timage_size_ratio = 0\n\n\tconsole.clear()\n\tconsole.print(\"[bold]Choose a resize method[/bold]\", justify=\"center\")\n\tconsole.print(\"\"\"\n\n[1] Resize image based on longer side\n[2] Resize image based on shorter side\n[3] Smartcrop\n\n[italic]ps: note that the aspect ratio of the image will [bold]not[/bold] be changed, so possibly the images received will have slightly different size[/italic]\n\t\t\n\t\t\"\"\")\n\tresize_method = click.prompt(\"Desired Image resize method: \", type=int)\n\twhile resize_method < 1 or resize_method > 3:\n\t\tconsole.print(\"[red]Invalid option[/red]\")\n\t\tresize_method = click.prompt(\"Choose method [1-3]: \")\n\n\tresize_method_options = ['','longer_side','shorter_side','smartcrop']\n\n\n\tconsole.clear()\n\tnumber_of_classes = click.prompt(\"How many image classes are required? \",type=int)\n\n\tdocument_dict = {\n \n \"DATASET_NAME\": dataset_name,\n \n \"SAMPLES_PER_SEARCH\": samples,\n \n \"IMAGE_SIZE\": image_size_ratio,\n \n \"RESIZE_METHOD\": resize_method_options[resize_method],\n \n \"CLASSES\": []\n \n}\n\n\tconsole.clear()\n\tconsole.print(\"[bold]Choose a search engine[/bold]\", justify=\"center\")\n\tconsole.print(\"\"\"\n\n[1] Duck GO [bold blue](recommended)[/bold blue]\n[2] Bing\n[3] Bing API [italic yellow](Requires API key)[/italic yellow]\n[4] Flickr API [italic yellow](Requires API key)[/italic yellow]\n\n\t\t\"\"\")\n\tsearch_engine= click.prompt(\"Select option:\", type=int)\n\twhile search_engine < 0 or search_engine > 4:\n\t\tconsole.print(\"[italic red]Invalid option, please choose between 1 and 4.[/italic red]\")\n\t\tsearch_engine = click.prompt(\"\\nOption: \", type=int)\n\n\tsearch_options = ['none', 'duckgo', 'bing', 'bing_api', 'flickr_api']\n\tdocument_dict['ENGINE'] = search_options[search_engine]\n\n\tif search_engine > 2:\n\t\tconsole.clear()\n\t\tconsole.print(f'Insert your [bold blue]{search_options[search_engine]}[/bold blue] API key')\n\t\tengine_api_key = click.prompt(\"API key: \", type=str)\n\t\tdocument_dict['API_KEY'] = engine_api_key\n\telse:\n\t\tdocument_dict['API_KEY'] = \"NONE\"\n\n\tsearch_engine = search_options[search_engine]\n\n\tfor x in range(number_of_classes):\n\t\tconsole.clear()\n\t\tclass_name = click.prompt(\"Class {x} name: \".format(x=x+1))\n\t\tconsole.clear()\n\n\t\tconsole.print(\"\"\"In order to achieve better results, choose several keywords that will be provided to the search engine to find your class in different settings.\n\t\n[bold blue]Example: [/bold blue]\n\nClass Name: [bold yellow]Pineapple[/bold yellow]\n[italic]keywords[/italic]: [underline]pineapple, pineapple fruit, ananas, abacaxi, pineapple drawing[/underline]\n\n\t\t\t\"\"\")\n\t\tkeywords = click.prompt(\"Type in all keywords used to find your desired class, separated by commas: \")\n\t\tdocument_dict['CLASSES'].append({'CLASS_NAME': class_name, 'SEARCH_KEYWORDS': keywords})\n \n\tif not os.path.exists(\"dataset.yaml\"):\n\t\tconsole.print(\"[bold]Creating a dataset configuration file...[/bold]\")\n\t\ttry:\n\t\t\tf = open(\"dataset.yaml\", \"w\")\n\t\t\tf.write(yaml.dump(document_dict))\n\t\t\tif f:\n\t\t\t\tconsole.clear()\n\t\t\t\tconsole.print(\"Dataset YAML file has been created sucessfully. Now run [bold blue]idt build[/bold blue] to mount your dataset!\")\n\t\texcept:\n\t\t\tconsole.print(\"[red]Unable to create file. Please check permission[/red]\")\n\t\t\n\telse:\n\t\tconsole.print(\"[red]A dataset.yaml is already created. To use another one, delete the current dataset.yaml file[/red]\")", "def generate_VS_data(self, testmode=True):\n if \"vcnmodel.VS_datasets\" not in list(dir()):\n from vcnmodel import VS_datasets as VS_datasets\n print(dir())\n importlib.reload(VS_datasets)\n config = toml.load(open(\"wheres_my_data.toml\", \"r\"))\n\n \"\"\"\n Generate the table in VS_data.py by analyzing the data from \n VS_datasets.py\n \"\"\"\n cprint(\"r\", \"Generate VS Data\")\n\n fout = \"VS_data.py\" # we will generate this automatically\n with open(fout, \"w\") as fh:\n fh.write(f'\"\"\"\\n')\n fh.write(\n \" Vector strength for models with SAM tones, different input configurations.\\n\"\n )\n fh.write(\" 17 Aug 2021 version.\\n\")\n fh.write(\n \" Results are printout from DataTablesVCN after selecting the data runs.\\n\"\n )\n fh.write(\n \"NOTE: This table is automatically written by figures.py and should not be\\n\"\n )\n fh.write(\" directly edited.\")\n fh.write(f' pbm\\n\"\"\"\\n')\n fh.write('\\ndata = \"\"\"')\n\n fl = True\n for i, celln in enumerate(grAList()):\n if testmode and i != 1:\n continue\n self.analyze_VS_data(VS_datasets, celln, fout, firstline=fl)\n fl = False\n with open(fout, \"a\") as fh:\n fh.write(f'\"\"\"\\n')\n print(\"VS_Finis\")", "def set_test_environment():\n import flask_monitoringdashboard\n\n flask_monitoringdashboard.config.database_name = 'sqlite:///test-database.db'", "def _prepare_test_cases(ptfhost, request):\n logger.info(\"Preparing SAI test environment.\")\n _create_sai_test_folders(ptfhost)\n _copy_sai_test_cases(ptfhost, request)", "def evaluate(\n self,\n test_dataset: Union[Dataset, InstanceDataset],\n batch_size: int = 16,\n lazy: bool = False,\n output_dir: Optional[Union[str, Path]] = None,\n verbose: bool = True,\n ) -> Dict[str, Any]:\n trainer = Trainer(self, lazy=lazy)\n\n return trainer.test(\n test_dataset, batch_size=batch_size, output_dir=output_dir, verbose=verbose\n )", "def setUp(self):\n\n fq_dataset_name = self.fq_table_names[0].split('.')\n self.fq_dataset_name = '.'.join(fq_dataset_name[:-1])\n\n fq_sandbox_name = self.fq_sandbox_table_names[0].split('.')\n self.fq_sandbox_name = '.'.join(fq_sandbox_name[:-1])\n\n super().setUp()", "def define_datasets(cfg, to_run=None):\n\n # Create the training dataset with info from the cfg file.\n train_dataset = dp.AlexNetDataset(\n file_names=cfg.train_tfrecord_filepaths,\n batch_size=cfg.batch_size,\n num_epochs=cfg.num_epochs,\n shuffle_buffer_size=cfg.shuffle_buffer_size,\n image_size=cfg.image_size,\n rgb_mean=cfg.rgb_mean,\n keep_prob=cfg.keep_prob,\n num_threads=cfg.num_dataset_threads,\n to_run=to_run\n )\n\n # Add random crop, random flip and pixel distortions to the images.\n train_dataset.random_crop(cfg.crop_image_size)\n if not cfg.flip_constrain_fc6:\n train_dataset.random_flip()\n if cfg.rgb_distort:\n train_dataset.rgb_distort(\n rgb_eigenvectors=cfg.rgb_eigenvectors,\n rgb_eigenvalues=cfg.rgb_eigenvalues,\n stddev=cfg.rgb_stddev\n )\n\n # Create the training eval dataset with info from the cfg file.\n train_eval_dataset = dp.AlexNetDataset(\n file_names=cfg.train_tfrecord_filepaths,\n batch_size=cfg.batch_size,\n max_iterations=cfg.train_eval_max_iterations,\n shuffle_buffer_size=cfg.shuffle_buffer_size,\n image_size=cfg.image_size,\n rgb_mean=cfg.rgb_mean,\n keep_prob=1.0,\n num_threads=cfg.num_dataset_threads,\n to_run=to_run\n )\n\n # Center crop the images\n train_eval_dataset.center_crop(cfg.crop_image_size)\n\n # Create the training dataset with info from the cfg file.\n val_eval_dataset = dp.AlexNetDataset(\n file_names=cfg.validation_tfrecord_filepaths,\n batch_size=cfg.batch_size,\n num_epochs=1,\n shuffle_buffer_size=cfg.shuffle_buffer_size,\n image_size=cfg.image_size,\n rgb_mean=cfg.rgb_mean,\n keep_prob=1.0,\n num_threads=cfg.num_dataset_threads,\n to_run=to_run\n )\n\n # Center crop the images\n val_eval_dataset.center_crop(cfg.crop_image_size)\n\n # Create the iterator shared by all three dataset\n data_iter = train_dataset.reinitializable_iterator()\n\n # Create the initializers of the datasets\n train_initializer = train_dataset.iterator_initializer(data_iter)\n train_eval_initializer = train_eval_dataset.iterator_initializer(data_iter)\n val_eval_initializer = val_eval_dataset.iterator_initializer(data_iter)\n\n return data_iter, train_dataset, train_initializer, train_eval_initializer, val_eval_initializer", "def test_dataset_for_personal_accounts(self):\n pass", "def setUp(self):\n self._create_IDAA_user()\n # Create admin_user for country HQ\n self._create_admin_user()\n\n with open(self.idaa_sample_data_path) as file:\n self.idaa_json = json.load(file)\n\n with open(self.msr_sample_data_path) as file:\n self.msr_json = json.load(file)\n\n target_idaa_program = self.idaa_json['value'][self.create_idaa_program_index]['fields']\n\n new_program = self._create_tola_program(target_idaa_program, fields={\n \"name\": target_idaa_program['ProgramName'],\n \"funding_status\": target_idaa_program['ProgramStatus'],\n \"start_date\": program.convert_date(target_idaa_program['ProgramStartDate']),\n \"end_date\": program.convert_date(target_idaa_program['ProgramEndDate'])\n }, create_country=False)\n\n new_country = workflow_models.CountryFactory(country='Timor-Leste', code='TL')\n\n new_program.country.add(new_country)\n\n workflow_models.CountryFactory(country='HQ', code='HQ')\n workflow_models.CountryFactory(country='Palestine (West Bank / Gaza)', code='PS')\n\n region = models.Region(name='Middle East', gait_region_id=7)\n region.save()", "def run(dataset = 1):\n train_list, test_list = load_list(dataset, False)\n train_imgs = process_list(train_list)\n test_imgs = process_list(test_list)\n with open(os.path.join(WORKING_DIR, 'data', 'train' + str(dataset) + '.txt'), 'w') as f:\n for img in train_imgs:\n f.write(img)\n f.write(' ')\n if img[-14] == 'F':\n f.write('1')\n else:\n f.write('0')\n f.write('\\n')\n with open(os.path.join(WORKING_DIR, 'data', 'test' + str(dataset) + '.txt'), 'w') as f:\n for img in test_imgs:\n f.write(img)\n f.write(' ')\n if img[-14] == 'F':\n f.write('1')\n else:\n f.write('0')\n f.write('\\n')" ]
[ "0.7599391", "0.72667193", "0.69813424", "0.66441184", "0.64025056", "0.6268748", "0.625678", "0.6244715", "0.61173195", "0.60907793", "0.60787404", "0.60093623", "0.5926749", "0.5911557", "0.5894079", "0.58914727", "0.58914727", "0.58914727", "0.58914727", "0.58877015", "0.5883739", "0.5851698", "0.58320737", "0.58311224", "0.57986146", "0.5752506", "0.57058454", "0.5705715", "0.56993276", "0.56850374", "0.56829816", "0.5680589", "0.56574434", "0.56573915", "0.56433517", "0.56431806", "0.5635483", "0.5570082", "0.5567278", "0.55415595", "0.5536617", "0.55135494", "0.5499112", "0.5495031", "0.54835975", "0.54820764", "0.5477829", "0.54746836", "0.5472656", "0.5472233", "0.5465633", "0.54568756", "0.5452611", "0.5450126", "0.543862", "0.543626", "0.54349476", "0.5431416", "0.54306334", "0.54294246", "0.5421085", "0.5418109", "0.5411218", "0.54075897", "0.53999096", "0.53944594", "0.5389611", "0.5380227", "0.53800285", "0.5378314", "0.5376521", "0.53682375", "0.5353893", "0.5352871", "0.53466105", "0.53464484", "0.53464484", "0.53410393", "0.53410393", "0.53410393", "0.53410393", "0.53410393", "0.53410393", "0.53410393", "0.5337665", "0.5337544", "0.53366673", "0.5332664", "0.53259796", "0.5324107", "0.5311345", "0.53008527", "0.52943", "0.52864426", "0.5283851", "0.5283698", "0.5281719", "0.5277647", "0.5276359", "0.52748567" ]
0.7804764
0
For debugging test environments
def test_generate_test_environment(dataset): print("## =========================================================") print("## Dataset:", dataset) print("## ---------------------------------------------------------") print("") tmpdir = "/tmp/collagen" generate_test_environment(tmpdir, dataset) # Generate the archive files for usage in ['train', 'test']: for dstype in ['images', 'labels']: dataset_type = usage + '.' + dstype mnist_dataset = 'datasets.mnist.' + dataset_type filepath = get_setting(mnist_dataset) # 'file:///some/path' to '/some/path' if filepath[:7] == 'file://': filepath = filepath[7:] # Unpack print("") print("{}: {}".format(mnist_dataset, filepath)) print("") data = idxgz.load(filepath) print("data:", data) print("type:", type(data)) print("dtype:", data.dtype) print("shape:", data.shape) print("")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def debug():", "def debugging_tests():\n logging.warning(\"Running debugging tests...\")\n pass", "def debug() -> bool:", "def test_debug(self) -> Debug:\n return self._test_debug", "def test_debug_info():\n # Just check a sample we control.\n assert version.__version__ in _env_info", "def test_func(debug: bool) -> None:\n click.echo(debug)", "def test_debug(self, test_debug: Debug):\n\n self._test_debug = test_debug", "def debug():\n assert current_app.debug == False, \"Don't panic! You're here by request of debug()\"", "def debug(self):\r\n for test in self:\r\n test.debug()", "def debug(self):\n raise NotImplementedError", "def setup_debugging():\n import sys\n sys.path.append('/root/pycharm-debug-py3k.egg')\n import pydevd\n pydevd.settrace('192.168.4.47', port=5422, stdoutToServer=True, stderrToServer=True, suspend=False)", "def local_test():\n pass", "def debug(state: bool, /) -> None:", "def test(self):\n pass", "def debug_run(self):\n raise NotImplementedError", "def _test(self):\n pass", "def _test(self):\n pass", "def _test(self):\n pass", "def unitary_test():", "def runtest(self):", "def main(debug):\n click.echo('Debug mode is {{}}'.format(debug))", "def startTestHook(self):", "def debug(self):\r\n debug = _DebugResult()\r\n self._wrapped_run(debug, True)\r\n self._tearDownPreviousClass(None, debug)\r\n self._handleModuleTearDown(debug)", "def debug(self):\n try:\n super(FaucetTopoTestBase, self).debug()\n except Exception:\n pprint.pprint(self.host_information)\n raise", "def _debug():\n return _DEBUG", "def debug(self, *args, **kwargs):", "def main(config, debug):\n config.debug = debug\n if config.debug:\n click.echo('Debug info...')", "def tests():", "def test():\n pass", "def test_app_is_development(self):\n self.assertFalse(app.config['SECRET_KEY'] == 'my_precious')\n self.assertTrue(app.config['DEBUG'])\n self.assertFalse(current_app is None)", "def runTest(self):\n return True", "def test_create_experiment_debug_mode(self, tmp_path):\n\n conf_file = str(tmp_path / \"db.pkl\")\n\n experiment = create_experiment(\n config[\"name\"],\n space={\"x\": \"uniform(0, 10)\"},\n storage={\n \"type\": \"legacy\",\n \"database\": {\"type\": \"pickleddb\", \"host\": conf_file},\n },\n )\n\n storage = experiment._experiment._storage\n assert isinstance(storage, Legacy)\n assert isinstance(storage._db, PickledDB)\n\n experiment = create_experiment(\n config[\"name\"],\n space={\"x\": \"uniform(0, 10)\"},\n storage={\"type\": \"legacy\", \"database\": {\"type\": \"pickleddb\"}},\n debug=True,\n )\n\n storage = experiment._experiment._storage\n assert isinstance(storage, Legacy)\n assert isinstance(storage._db, EphemeralDB)", "def test_variables(self):\n self._api.SetVariable(\"debug_file\", \"/dev/null\")\n self.assertEqual(self._api.GetVariableAsString(\"debug_file\"), \"/dev/null\")", "def setDebug():\n\tglobal debug\n\tdebug = True", "def testToggleDebug(self):\n pl = Pipeline(loadInitFile=False)\n repl = REPL(pl)\n repl.runCommandLine('%d')\n self.assertIs(True, pl.debug)\n repl.runCommandLine('%d')\n self.assertIs(False, pl.debug)", "def test_args_debug():\n args = cli.parse_args(['-d'])\n assert args.debug\n args = cli.parse_args(['--debug'])\n assert args.debug", "def debug(self):\n self._debug = True\n self.run()\n self._debug = False", "def test_app_is_testing(self):\n self.assertFalse(app.config['SECRET_KEY'] == 'my_precious')\n self.assertTrue(app.config['DEBUG'])\n self.assertTrue(app.config['TESTING'])", "def test_verbose_debug():\n output = subprocess.run(['smif', 'list', '-vv'], stderr=subprocess.PIPE)\n assert 'DEBUG' in str(output.stderr)", "def test_app_is_testing(self):\n self.assertFalse(app.config['SECRET_KEY'] == 'my_precious')\n self.assertTrue(app.config['DEBUG'])\n self.assertFalse(app.config['TESTING'])", "def test_access_with_debug(self):\n review_request = self.create_review_request(publish=True)\n review = self.create_review(review_request, publish=True)\n\n with self.settings(DEBUG=True):\n response = self.client.get(\n local_site_reverse(\n 'preview-review-email',\n kwargs={\n 'review_request_id': review_request.pk,\n 'review_id': review.pk,\n 'message_format': 'text',\n }))\n\n self.assertEqual(response.status_code, 200)", "def test_app_is_development(self):\n\n self.assertFalse(self.app.config['SECRET_KEY'] is 'my_precious')\n self.assertTrue(self.app.config['DEBUG'] is True)\n self.assertFalse(current_app is None)", "def debug():\n return bool(_environ.get(\"ACCELPY_DEBUG\", False))", "def debug(self):\n #breakpoint() # infinite loop\n print(self.ttl)", "def debug(self):\r\n self.setUp()\r\n getattr(self, self._testMethodName)()\r\n self.tearDown()\r\n while self._cleanups:\r\n function, args, kwargs = self._cleanups.pop(-1)\r\n function(*args, **kwargs)", "def development_function(self): \n return None", "def startTestRun(self):", "def test():", "def test():", "def test_run_started(self):", "def is_debugging() -> bool:\n if os.getenv(\"DEBUGGING\") == \"1\":\n return True\n return False", "def get_debug():\n return _DEBUG", "def debug(verbose, bot, proxy, no_browsers=False, exp_config=None):\n debugger = DebugDeployment(Output(), verbose, bot, proxy, exp_config, no_browsers)\n log(header, chevrons=False)\n debugger.run()", "def toggle_remote_debug():\n import sys\n import os\n\n debug_on = len(sys.argv) >= 2 and '--remote-debug' in sys.argv[1]\n\n if debug_on:\n egg_path = os.path.abspath(os.path.join(os.path.dirname(__file__), \"pycharm-debug-py3k.egg\"))\n sys.path.append(egg_path)\n import pydevd\n pydevd.settrace('localhost', port=9090)\n\n yield\n\n if debug_on:\n import pydevd\n pydevd.stoptrace()", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def debug(self, *args: Any, **kwargs) -> None:\n ...", "def debug(self, message):\r\n pass", "def test_is_development_env(self) -> None:\n os.environ.update({\"NC_MODE\": \"development\"})\n is_develop = is_development_env()\n self.assertTrue(is_develop)", "def tests():\n api.local('nosetests')", "def test_swift_globals(self):\n self.build()\n self.do_test()", "def in_debugger():\n return bool(sys.gettrace())", "def cmake_debug(session):\n _cmake(session, BUILD_TYPE_DEBUG)", "def debug_option(args, run):\n run.debug = True", "def debug(self):\n return Config.DEBUG", "def test_basic_execution(self):", "def testing(self):\n print('test successful')", "def run(self, test, env):\n\n raise NotImplementedError", "def test_logging(self):\n self._verify_logging()", "def setUpClass(cls):\n app.debug = False", "def test_middleware_loads(self):\n self.client.get(\"/__debug__\")", "def NeedsDebugInfo(self):\n return True", "def setup_development_environment():\n import hashlib\n\n # Enable SQL debug\n sql_debug(True)\n\n # Create test user\n test_name = \"test\"\n test_salt = \"salt\"\n test_password = hashlib.sha256(\"{name}{salt}\".format(name=test_name, salt=test_salt).encode(\"utf8\")).hexdigest()\n account = Account(name=test_name,\n password=test_password,\n salt=test_salt)\n\n # Create test universe\n universe = Universe.create(name=\"Test universe\", owner=account)\n planet = Planet(name=\"Test planet\")\n region = Region(name=\"Test region\", planet=planet)\n\n place = Place(\n name=\"Void\",\n description=\"You are in the void\",\n universe=universe,\n region=region\n )", "def debug(s):\n if app.config['DEBUG']:\n print(s)", "def debug(s):\n if app.config['DEBUG']:\n print(s)", "def is_debug_environment():\n return find_loader('cli') is None", "def test_generate_project_with_debug(self) -> None:\n output_path = os.path.join(os.getcwd(), 'tmp')\n input_path = os.path.abspath(\n os.path.join(os.getcwd(),\n 'examples',\n 'classification',\n 'lmnet_quantize_cifar10',\n 'minimal_graph_with_shape.pb'))\n\n try:\n gp.run(input_path=input_path,\n dest_dir_path=output_path,\n project_name='unittest6',\n activate_hard_quantization=True,\n threshold_skipping=True,\n num_pe=16,\n use_tvm=True,\n use_onnx=False,\n debug=True,\n cache_dma=False,\n )\n\n finally:\n if os.path.exists(output_path):\n shutil.rmtree(output_path)\n\n print(\"Script test with debug options passed!\")", "def runTest(self):\n\t\tself.setUp()\n\t\tself.test_postopProgramming1()", "def output_debug_info(self):", "def test_debug_context_construction():\n sl = _ir.SourceLocation(\"foo\", \"bar.py\", 10)\n di = _ir.DebugInfo(_ir.DebugContext(), \"test\")\n _ir.DebugContext(\"baz\", sl)\n _ir.DebugContext()\n _ir.DebugContext(\"baz\")\n _ir.DebugContext(di, \"baz\")", "def set_debug_mode(self):\n self.debug_mode = True", "def test_get_run(self):\n pass", "def TestOneStep(self):\n pass", "def debug():\n # \"EMBEDDED_MODE\" is True precisely when the Sage notebook is running.\n from sage.plot.plot import EMBEDDED_MODE\n if not EMBEDDED_MODE:\n # Must be the command line, so suggest using the IPython debugger.\n print(\"You should use %debug on the command line.\")\n else:\n # Create the Debug object and make it interactive.\n Debug().interact()", "def debug(target=None):\n logger.verbose(True)\n man = Manager()\n man.mode_dbg = True\n man.init_components(target)\n man.start_app()", "def is_debug ():\n\n return __debug__ and DEBUG", "def test_DDSim_runIt_success_Debug(self):\n self.ddsim.platform = \"Windows\"\n self.ddsim.applicationLog = self.logFileName\n self.ddsim.SteeringFile = \"mySteering.py\"\n self.ddsim.NumberOfEvents = 123\n self.ddsim.debug = True\n ## side effect for Steering1a, Steering1b, Steering2, Script, userlibs, log, logAfter\n with patch(\"os.path.exists\", new=Mock(side_effect=[False, True, True, False, False, False, True] ) ):\n res = self.ddsim.runIt()\n assertDiracSucceeds( res, self )\n self.assertIn( \" --printLevel DEBUG \", self.ddsim.extraCLIarguments )", "def test_generate_all_testing(self):\n pass", "def test_verbose_debug_alt():\n output = subprocess.run(['smif', 'list', '--verbose', '--verbose'], stderr=subprocess.PIPE)\n assert 'DEBUG' in str(output.stderr)", "def __debug(msg):\n\n pass", "def test_debug_set_trace(self):\n self.set_trace()\n self.open_port.assert_called_with(4444)", "def set_debug(state):\n global _DEBUG\n _DEBUG = bool(state)", "def NeedsDebugInfo(self):\n return False", "def test_launch_deployment(self):\n pass", "def test_module(self):\n pass", "def test_quick_build(self):\n pass", "def debug():\n return exported_res_dict", "def __main() :\n launchTests()" ]
[ "0.76501894", "0.74378276", "0.7319192", "0.7256087", "0.7246349", "0.7120246", "0.7063667", "0.69600326", "0.6871428", "0.67112607", "0.6628233", "0.66059375", "0.65979975", "0.65336007", "0.6533267", "0.6531516", "0.6531516", "0.6531516", "0.65074044", "0.64774877", "0.64328605", "0.64283293", "0.64262956", "0.6396646", "0.6386452", "0.63722", "0.63703287", "0.6363118", "0.6343802", "0.63295454", "0.63294584", "0.6326141", "0.6320935", "0.63106596", "0.6297768", "0.6291895", "0.62903905", "0.62024134", "0.61947507", "0.6193942", "0.6177065", "0.61716354", "0.61625916", "0.61491835", "0.614786", "0.6145998", "0.61371744", "0.612684", "0.612684", "0.6125092", "0.6124698", "0.6117574", "0.6110944", "0.6108451", "0.60989225", "0.60989225", "0.60989225", "0.60989225", "0.60989225", "0.60974944", "0.60907584", "0.605697", "0.60381985", "0.6023879", "0.6022367", "0.602053", "0.60179406", "0.60088664", "0.6008288", "0.6002253", "0.5994933", "0.59909433", "0.59761804", "0.5975665", "0.59673995", "0.5960262", "0.5951993", "0.5951993", "0.59362036", "0.5926176", "0.59226537", "0.5915886", "0.59127444", "0.5912118", "0.5906343", "0.58932346", "0.5888482", "0.5888001", "0.58829385", "0.5882515", "0.58808345", "0.58662885", "0.58636266", "0.5862228", "0.58554983", "0.5853309", "0.5850833", "0.5850788", "0.5844053", "0.5841325", "0.5841052" ]
0.0
-1
Extracts (typically) overlapping regular patches from a grayscale image Changing the offset and stride parameters will result in images reconstructed by reconstruct_from_grayscale_patches having different dimensions! Callers should pad and unpad as necessary!
def extract_grayscale_patches( img, shape, offset=(0,0), stride=(1,1) ): px, py = np.meshgrid( np.arange(shape[1]),np.arange(shape[0])) l, t = np.meshgrid( np.arange(offset[1],img.shape[1]-shape[1]+1,stride[1]), np.arange(offset[0],img.shape[0]-shape[0]+1,stride[0]) ) l = l.ravel() t = t.ravel() x = np.tile( px[None,:,:], (t.size,1,1)) + np.tile( l[:,None,None], (1,shape[0],shape[1])) y = np.tile( py[None,:,:], (t.size,1,1)) + np.tile( t[:,None,None], (1,shape[0],shape[1])) return img[y.ravel(),x.ravel()].reshape((t.size,shape[0],shape[1])), (t,l)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _extract_patches_and_positions_from_image(\n image, patch_size, patch_stride, hse_grid_size,\n n_crops, h, w,\n c, scale_id, max_seq_len):\n p = tf.image.extract_patches(\n image, [1, patch_size, patch_size, 1], [1, patch_stride, patch_stride, 1],\n [1, 1, 1, 1],\n padding='SAME')\n\n p = tf.reshape(p, [n_crops, -1, patch_size * patch_size * c])\n\n count_h = _ceil_divide_int(h, patch_stride)\n count_w = _ceil_divide_int(w, patch_stride)\n\n # Shape (num_patches, 1)\n spatial_p = get_hashed_spatial_pos_emb_index(hse_grid_size, count_h, count_w)\n # Shape (1, num_patches, 1)\n spatial_p = tf.expand_dims(spatial_p, axis=0)\n # Shape (n_crops, num_patches, 1)\n spatial_p = tf.tile(spatial_p, (n_crops, 1, 1))\n spatial_p = tf.cast(spatial_p, dtype=p.dtype)\n # Shape (n_crops, num_patches, 1)\n scale_p = tf.ones_like(spatial_p, dtype=p.dtype) * scale_id\n # Shape (n_crops, num_patches, 1)\n mask_p = tf.ones_like(spatial_p, dtype=p.dtype)\n\n # Concatenating is a hacky way to pass both patches, positions and input\n # mask to the model.\n # Shape (n_crops, num_patches, patch_size * patch_size * c + 3)\n out = tf.concat([p, spatial_p, scale_p, mask_p], axis=2)\n if max_seq_len >= 0:\n out = _pad_or_cut_to_max_seq_len(out, max_seq_len)\n out = tf.reshape(out,\n [n_crops, max_seq_len, c * patch_size * patch_size + 3])\n else:\n out = tf.reshape(out, [n_crops, -1, c * patch_size * patch_size + 3])\n return out", "def get_patches(rimage, gimage, mimage, num_patches=48, patch_size=80, patch_stride=80):\n num_FSpatches = 16\n num_RApatches = 32\n rpatches = []\n gpatches = []\n mpatches = []\n #R_imgs = ((rimage+1)*127.5).astype(np.uint8)\n #scipy.misc.imsave('results'+'/' + 'rainy.jpg', R_imgs[0,:,:,:])\n for i in range(int(math.sqrt(num_FSpatches))):\n for j in range(int(math.sqrt(num_FSpatches))):\n point_x = patch_stride*i\n point_y = patch_stride*j\n rpatch = rimage[:,(point_x):(point_x+patch_size), (point_y):(point_y+patch_size),:]\n #print(point_x)\n #print(point_y)\n #print(point_y+patch_size)\n #P_imgs = ((rpatch+1)*127.5).astype(np.uint8)\n #scipy.misc.imsave('results'+'/' + 'patch_%d_%d.jpg'%(i,j), P_imgs[0,:,:,:])\n #print(rpatch.shape)\n rpatches.append(rpatch)\n #print(np.array(rpatches).shape)\n gpatch = gimage[:,(point_x):(point_x+patch_size), (point_y):(point_y+patch_size),:]\n gpatches.append(gpatch)\n mpatch = mimage[:,(point_x):(point_x+patch_size), (point_y):(point_y+patch_size),:]\n mpatches.append(mpatch)\n\n for k in range(num_RApatches):\n point1 = random.randint(0,240) # 116 comes from the image source size (320) - the patch dimension (80)\n point2 = random.randint(0,240)\n #rpatch = tf.image.crop_to_bounding_box(rimage, point1, point2, patch_size, patch_size)\n rpatch = rimage[:,(point1):(point1+patch_size), (point2):(point2+patch_size),:]\n #P_imgs = ((rpatch+1)*127.5).astype(np.uint8)\n #scipy.misc.imsave('results'+'/' + 'patch_%d.jpg'%i, P_imgs[0,:,:,:])\n #print(rpatch.shape)\n rpatches.append(rpatch)\n #print(np.array(rpatches).shape)\n gpatch = gimage[:,(point1):(point1+patch_size), (point2):(point2+patch_size),:]\n gpatches.append(gpatch)\n mpatch = mimage[:,(point1):(point1+patch_size), (point2):(point2+patch_size),:]\n mpatches.append(mpatch)\n\n rpatches = np.array(rpatches)\n rpatches = np.squeeze(rpatches)\n #print(rpatches.shape)\n gpatches = np.array(gpatches)\n gpatches = np.squeeze(gpatches)\n mpatches = np.array(mpatches)\n mpatches = np.squeeze(mpatches)\n #assert rpatches.get_shape().dims == [num_patches, patch_size, patch_size, 3]\n assert rpatches.shape == (num_patches, patch_size, patch_size, 3)\n return rpatches, gpatches, mpatches", "def recreate_from_patches(data):\n overlap_height = (PATCHES * PATCH_HEIGHT - IMG_HEIGHT) // (PATCHES - 1) # Overlap of patches along y axis\n step_size_height = PATCH_HEIGHT - overlap_height # Step size along y axis\n\n overlap_width = (PATCHES * PATCH_WIDTH - IMG_WIDTH) // (PATCHES - 1) # Overlap of patches along x axis\n step_size_width = PATCH_WIDTH - overlap_width # Step size along x axis\n\n whole_images = []\n i = 0\n while i < len(data):\n image = np.zeros((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS)) # Create an empty image to pin patches on\n\n for h in range(PATCHES - 1):\n for w in range(PATCHES - 1):\n # Insert patches into image starting from top left corner, without the patches touching right or bottom border\n if h > 0: # First row has no overlap with patches above them\n if overlap_height > 0:\n # Create array of overlap along y axis with mean values from overlapping patches\n mean_overlap_height = cv2.addWeighted(data[i][:overlap_height], 0.5,\n data[i - PATCHES][step_size_height:], 0.5, 0)\n\n # Insert into patch where it overlaps\n rest = data[i][overlap_height:]\n data[i] = np.append(mean_overlap_height, rest, axis=0)\n\n # Insert patch into image\n image = insert_patch_subpixel(image, data[i], (w * step_size_width + PATCH_WIDTH / 2,\n h * step_size_height + PATCH_HEIGHT / 2))\n\n if w == PATCHES - 2: # If we are at the second to last patch, overlap may be calculated different\n i += 1\n continue\n\n else:\n i += 1\n if overlap_width > 0:\n # Create array of overlap with mean values from overlapping patches\n mean_overlap_width = cv2.addWeighted(data[i][:, [i for i in range(0, overlap_width)]], 0.5,\n data[i - 1][:,\n [i for i in range(PATCH_WIDTH - overlap_width,\n PATCH_WIDTH)]], 0.5, 0)\n # Insert into next patch\n rest = data[i][:, [i for i in range(overlap_width, PATCH_WIDTH)]]\n data[i] = np.append(mean_overlap_width, rest, axis=1)\n\n # Insert patch which touches right border on this height, may overlap more\n overlap_last_width = (PATCH_WIDTH + (PATCHES - 2) * step_size_width) - (IMG_WIDTH - PATCH_WIDTH)\n\n if overlap_last_width > 0:\n # Create array of overlap with mean values from overlapping patches\n mean_overlap_width = cv2.addWeighted(data[i][:, [i for i in range(0, overlap_last_width)]], 0.5,\n data[i - 1][:, [i for i in range(PATCH_WIDTH - overlap_last_width,\n PATCH_WIDTH)]], 0.5, 0)\n # Insert array of overlap into patch, where it overlaps\n rest = data[i][:, [i for i in range(overlap_last_width, PATCH_WIDTH)]]\n data[i] = np.append(mean_overlap_width, rest, axis=1)\n\n # Insert patch into image\n image = insert_patch_subpixel(image, data[i], (IMG_WIDTH - PATCH_WIDTH / 2,\n h * step_size_height + PATCH_HEIGHT / 2))\n i += 1\n\n for w in range(PATCHES - 1):\n # Insert patches from the bottom border, may overlap more\n overlap_last_height = (PATCH_HEIGHT + (PATCHES - 2) * step_size_height) - (IMG_HEIGHT - PATCH_HEIGHT)\n\n if overlap_last_height > 0:\n # Create array of overlap with mean values from overlapping patches\n mean_overlap_height = cv2.addWeighted(data[i][:overlap_last_height], 0.5,\n data[i - PATCHES][PATCH_HEIGHT - overlap_last_height:], 0.5, 0)\n\n # Insert array of overlap into patch where it overlaps\n rest = data[i][overlap_last_height:]\n data[i] = np.append(mean_overlap_height, rest, axis=0)\n\n # Insert patch into image\n image = insert_patch_subpixel(image, data[i], (w * step_size_width + PATCH_WIDTH / 2,\n IMG_HEIGHT - PATCH_HEIGHT / 2))\n i += 1\n\n # Insert patch in the bottom right corner, may overlap more\n overlap_last_width = (PATCH_WIDTH + (PATCHES - 2) * step_size_width) - (IMG_WIDTH - PATCH_WIDTH)\n\n if overlap_last_width > 0:\n # Create array of overlap along x axis with mean values form overlapping patches\n mean_overlap_width = cv2.addWeighted(data[i][:, [i for i in range(0, overlap_last_width)]], 0.5,\n data[i - 1][:, [i for i in range(PATCH_WIDTH - overlap_last_width,\n PATCH_WIDTH)]], 0.5, 0)\n\n # Insert array of overlap into patch\n rest = data[i][:, [i for i in range(overlap_last_width, PATCH_WIDTH)]]\n data[i] = np.append(mean_overlap_width, rest, axis=1)\n\n overlap_last_height = (PATCH_HEIGHT + (PATCHES - 2) * step_size_height) - (IMG_HEIGHT - PATCH_HEIGHT)\n\n if overlap_last_height > 0:\n # Create array of overlap along y axis with mean values from overlapping patches\n mean_overlap_height = cv2.addWeighted(data[i][:overlap_last_height], 0.5,\n data[i - PATCHES][PATCH_HEIGHT - overlap_last_height:], 0.5, 0)\n\n # Insert array of overlap into patch where it overlaps\n rest = data[i][overlap_last_height:]\n data[i] = np.append(mean_overlap_height, rest, axis=0)\n\n image = insert_patch_subpixel(image, data[i], (IMG_WIDTH - PATCH_WIDTH / 2, IMG_HEIGHT - PATCH_HEIGHT / 2))\n i += 1\n whole_images.append(\n image) # All corresponding patches are pinned inside the image, therefore this image is finished\n\n return whole_images", "def extract_patches(image, patchshape, overlap_allowed=0.1, cropvalue=None, crop_fraction_allowed=0.1):\r\n jump_cols = int(patchshape[1] * overlap_allowed)\r\n jump_rows = int(patchshape[0] * overlap_allowed)\r\n\r\n # Restrict ourselves to the rectangle containing non-cropped pixels\r\n if cropvalue is not None:\r\n rows, cols = np.where(image != cropvalue)\r\n rows.sort()\r\n cols.sort()\r\n active = image[rows[0]:rows[-1], cols[0]:cols[-1]]\r\n else:\r\n active = image\r\n\r\n rowstart = 0\r\n colstart = 0\r\n\r\n # Array tracking where we've already taken patches.\r\n covered = np.zeros(active.shape, dtype=bool)\r\n patches = []\r\n regions = []\r\n while rowstart <= active.shape[0] - patchshape[0]:\r\n # Record whether or not e've found a patch in this row,\r\n # so we know whether to skip ahead.\r\n got_a_patch_this_row = False\r\n colstart = 0\r\n while colstart <= active.shape[1] - patchshape[1]:\r\n # Slice tuple indexing the region of our proposed patch\r\n region = (slice(rowstart, rowstart + patchshape[0]),\r\n slice(colstart, colstart + patchshape[1]))\r\n\r\n # The actual pixels in that region.\r\n patch = active[region]\r\n\r\n # The current mask value for that region.\r\n cover_p = covered[region]\r\n if cropvalue is None or \\\r\n frac_eq_to(patch, cropvalue) <= crop_fraction_allowed and \\\r\n frac_eq_to(cover_p, True) <= overlap_allowed:\r\n # Accept the patch.\r\n patches.append(patch)\r\n regions.append(region)\r\n # Mask the area.\r\n covered[region] = True\r\n\r\n # Jump ahead in the x direction.\r\n colstart += jump_cols\r\n got_a_patch_this_row = True\r\n # print \"Got a patch at %d, %d\" % (rowstart, colstart)\r\n else:\r\n # Otherwise, shift window across by one pixel.\r\n colstart += 1\r\n\r\n if got_a_patch_this_row:\r\n # Jump ahead in the y direction.\r\n rowstart += jump_rows\r\n else:\r\n # Otherwise, shift the window down by one pixel.\r\n rowstart += 1\r\n\r\n # Return a 3D array of the patches with the patch index as the first\r\n # dimension (so that patch pixels stay contiguous in memory, in a\r\n # C-ordered array).\r\n return np.concatenate([pat[np.newaxis, ...] for pat in patches], axis=0),regions", "def reconstruct_from_grayscale_patches( patches, origin, epsilon=1e-12 ):\n patch_width = patches.shape[2]\n patch_height = patches.shape[1]\n img_width = np.max( origin[1] ) + patch_width\n img_height = np.max( origin[0] ) + patch_height\n\n out = np.zeros( (img_height,img_width) )\n wgt = np.zeros( (img_height,img_width) )\n for i in range(patch_height):\n for j in range(patch_width):\n out[origin[0]+i,origin[1]+j] += patches[:,i,j]\n wgt[origin[0]+i,origin[1]+j] += 1.0\n\n return out/np.maximum( wgt, epsilon ), wgt", "def image_to_patches(image, patch_size=8, overlap=False, is_mask=False):\n H, W = np.shape(image)\n num_patches = (\n (H - patch_size + 1) * (W - patch_size + 1)\n if overlap\n else int(H / patch_size) * int(W / patch_size)\n )\n patches = (\n np.zeros((patch_size ** 2, patch_size ** 2, num_patches))\n if is_mask\n else np.zeros((patch_size ** 2, num_patches))\n )\n overlap_step = 1 if overlap else patch_size\n count = 0\n for i in np.arange(H - patch_size + 1, step=overlap_step):\n for j in np.arange(W - patch_size + 1, step=overlap_step):\n if is_mask:\n patches[:, :, count] = np.diag(\n np.reshape(image[i : i + patch_size, j : j + patch_size], (-1))\n )\n else:\n patches[:, count] = np.reshape(image[i : i + patch_size, j : j + patch_size], (-1))\n count += 1\n return patches", "def divide_image_to_patches(img, patch_size, stride=None):\n\n stride = stride or patch_size\n if not 0 < stride <= patch_size:\n raise ValueError(\n 'stride should be positive and smaller than or equal to patch_size')\n\n if len(img.shape) == 2: # this is a mask\n img = np.expand_dims(img, -1)\n\n height, width, n_channels = img.shape\n\n # Sometimes we need to extend the original image so that the sliding window\n # won't move out of the image\n ext_height, ext_width = _get_extended_image_size(\n height, width, patch_size, stride)\n ext_img = np.zeros((ext_height, ext_width, n_channels))\n ext_img[:height, :width] = img\n\n x = []\n\n for i in range(0, ext_height - patch_size + 1, stride):\n for j in range(0, ext_width - patch_size + 1, stride):\n x.append(ext_img[i:i + patch_size, j:j + patch_size, :])\n\n return np.array(x).astype('uint8')", "def extract_patches(data,patch_dim):\n \n m = data.shape[0]\n im_x = data.shape[1]\n im_y = data.shape[2]\n \n assert im_x%float(patch_dim)==0 and im_y%float(patch_dim)==0, \\\n \"patch_size must divide x and y dimensions of image\"\n\n numpatchs = m*(im_x/patch_dim)*(im_y/patch_dim)\n patch_size = patch_dim**2\n\n patches = np.empty((patch_size,numpatchs))\n p=0\n for i in range(data.shape[0]):\n image = data[i,...]\n for x in np.r_[0:im_x:patch_dim]:\n for y in np.r_[0:im_y:patch_dim]:\n patch = image[x:x+patch_dim,y:y+patch_dim]\n patches[:,p] = patch.ravel()\n p+=1\n \n return patches", "def create_patches_from_mask(image, mask, patchSize=32, pad=32, depth=1, searchSlices=None):\n rois = []\n images = []\n labels = []\n searchSlices = range(len(mask)) if searchSlices is None else searchSlices\n for i in searchSlices:\n # For each voxel, generate a ROI centered there\n if not np.any(mask[i]):\n continue\n xS, yS = np.nonzero(mask[i, :, :])\n xS -= xS % patchSize\n yS -= yS % patchSize\n allPatches = set(zip(xS, yS))\n for x, y in allPatches:\n patch = np.copy(\n # agafem el patch que ens interessa i agafem un contorn per si de cas (padding)\n # potser seria interessant reduir el padding (la quantitat de marge que deixem)\n # ara mateix tenim patches de 96, quan ens interessa el centre de 32 d'aquests\n image[i - depth: i + 1 + depth, x - pad:x + patchSize + pad, y - pad:y + patchSize + pad]\n )\n label = np.copy(\n # quan fem rotacio al fer data augmentation, ens volem assegurar d'estar treballant amb\n # el mateix\n mask[i: i + 1, x - pad: x + patchSize + pad, y - pad:y + patchSize + pad]\n )\n\n rois.append(np.array([x, y, i]))\n images.append(patch)\n labels.append(label)\n return rois, images, labels", "def divide_image_to_patches(img, patch_size):\n\n assert len(img.shape) == 3 and img.shape[-1] == 3\n\n height, width, n_channels = img.shape\n coordinates = _get_top_left_coordinates(height, width, patch_size)\n\n patches = []\n\n for top, left in coordinates:\n patches.append(img[top:top + patch_size, left:left + patch_size])\n\n return np.array(patches).astype('uint8')", "def get_patches(image, label, coordmaps, sample, num_pos = 100, num_neg = 100, all_patches=False, patch_shape= (48,48,48), spacing=(24,24,24), start_idx = 0):\n image_shape = np.shape(image)\n cn_size = image_shape[0]\n sg_size = image_shape[1]\n cr_size = image_shape[2]\n ax_size = image_shape[3]\n\n if not all_patches:\n idx_pos = np.stack(np.where(label[0, ...] > 0))\n \n # Only include points not near boundary\n #sg_idx = np.where(((patch_shape[0]/2) < idx_pos[0]) & (idx_pos[0] < (sg_size - (patch_shape[0]/2))))\n #idx_pos = idx_pos[:,sg_idx[0]]\n #cr_idx = np.where(((patch_shape[1]/2) < idx_pos[1]) & (idx_pos[1] < (cr_size - (patch_shape[1]/2))))\n #idx_pos = idx_pos[:, cr_idx[0]]\n #ax_idx = np.where(((patch_shape[2]/2) < idx_pos[2]) & (idx_pos[2] < (ax_size - (patch_shape[2]/2))))\n #idx_pos = idx_pos[:, ax_idx[0]]\n \n idx_rand = np.random.choice(idx_pos[0].shape[0], num_pos, replace = False)\n cpts_pos_sampled = idx_pos[:, idx_rand] \n \n image_patch_list = []\n label_patch_list = []\n coordmaps_patch_list = []\n for i in range(num_pos):\n idx1_sg = cpts_pos_sampled[0][i] - int(patch_shape[0]/2)\n idx1_cr = cpts_pos_sampled[1][i] - int(patch_shape[1]/2)\n idx1_ax = cpts_pos_sampled[2][i] - int(patch_shape[2]/2)\n \n idx2_sg = idx1_sg + patch_shape[0]\n idx2_cr = idx1_cr + patch_shape[1]\n idx2_ax = idx1_ax + patch_shape[2]\n \n image_patch_orig = image[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n image_patch = p.standardize_image(image_patch_orig)\n #image_patch = p.Normalize(image_patch)\n label_patch = label[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n coordmaps_patch = coordmaps[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n \n image_patch_list.append(image_patch)\n label_patch_list.append(label_patch)\n coordmaps_patch_list.append(coordmaps_patch)\n \n #Write patch/image and control points to csv and save image\n write_patch_to_file(image_patch, label_patch, coordmaps_patch, sample, cpts_pos_sampled[:,i], start_idx + i)\n \n # For negative points\n idx_neg = np.stack(np.where(label[0, ...]==0), axis = 0)\n \n # Only include points not near boundary\n sg_idx = np.where(((patch_shape[0]/2) < idx_pos[0]) & (idx_pos[0] < (sg_size - (patch_shape[0]/2))))\n idx_neg = idx_neg[:,sg_idx[0]]\n cr_idx = np.where(((patch_shape[1]/2) < idx_pos[1]) & (idx_pos[1] < (cr_size - (patch_shape[1]/2))))\n idx_neg = idx_neg[:, cr_idx[0]]\n ax_idx = np.where(((patch_shape[2]/2) < idx_pos[2]) & (idx_pos[2] < (ax_size - (patch_shape[2]/2))))\n idx_neg = idx_neg[:, ax_idx[0]]\n \n idx_rand = np.random.choice(idx_neg[0].shape[0], num_neg, replace = False)\n cpts_neg_sampled = idx_neg[:, idx_rand] \n \n for i in range(num_neg):\n idx1_sg = cpts_pos_sampled[0][i] - int(patch_shape[0]/2)\n idx1_cr = cpts_pos_sampled[1][i] - int(patch_shape[1]/2)\n idx1_ax = cpts_pos_sampled[2][i] - int(patch_shape[2]/2)\n \n idx2_sg = idx1_sg + patch_shape[0]\n idx2_cr = idx1_cr + patch_shape[1]\n idx2_ax = idx1_ax + patch_shape[2]\n \n image_patch_orig = image[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n image_patch = p.standardize_image(image_patch_orig)\n #image_patch = p.Normalize(image_patch)\n label_patch = label[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n coordmaps_patch = coordmaps[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n \n image_patch_list.append(image_patch)\n label_patch_list.append(label_patch)\n coordmaps_patch_list.append(coordmaps_patch)\n \n #Write patch/image and control points to csv and save image\n write_patch_to_file(image_patch, label_patch, coordmaps_patch, sample, cpts_pos_sampled[:,i], start_idx + num_pos + i)\n \n cpts = np.concatenate((cpts_pos_sampled, cpts_neg_sampled), axis = 1)\n \n return image_patch_list, label_patch_list, coordmaps_patch_list, cpts, start_idx + num_pos + i\n\n else:\n \n idx = p.grid_center_points(image.shape[1:], spacing)\n \n # Only include points not near boundary\n sg_idx = np.where(((patch_shape[0]/2) < idx[0]) & (idx[0] < (sg_size - (patch_shape[0]/2))))\n idx = idx[:,sg_idx[0]]\n cr_idx = np.where(((patch_shape[1]/2) < idx[1]) & (idx[1] < (cr_size - (patch_shape[1]/2))))\n idx = idx[:, cr_idx[0]]\n ax_idx = np.where(((patch_shape[2]/2) < idx[2]) & (idx[2] < (ax_size - (patch_shape[2]/2))))\n idx = idx[:, ax_idx[0]]\n \n image_patch_list = []\n label_patch_list = []\n coordmaps_patch_list = []\n \n for i in range(idx.shape[1]):\n \n idx1_sg = idx[0][i] - int(patch_shape[0]/2)\n idx1_cr = idx[1][i] - int(patch_shape[1]/2)\n idx1_ax = idx[2][i] - int(patch_shape[2]/2)\n \n idx2_sg = idx1_sg + patch_shape[0]\n idx2_cr = idx1_cr + patch_shape[1]\n idx2_ax = idx1_ax + patch_shape[2]\n \n image_patch_orig = image[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n image_patch = p.standardize_image(image_patch_orig)\n #image_patch = p.Normalize(image_patch)\n label_patch = label[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n coordmaps_patch = coordmaps[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n \n image_patch_list.append(image_patch)\n label_patch_list.append(label_patch)\n coordmaps_patch_list.append(coordmaps_patch)\n \n return image_patch_list, label_patch_list, coordmaps_patch_list, idx, len(image_patch_list)", "def img_to_patches(img, patch_size, stride, overlapping=True):\r\n h, w, _ = img.shape\r\n\r\n assert h == w, 'height should be equal to width ({} != {})'.format(h, w)\r\n assert overlapping or patch_size % stride == 0, 'cannot have non overlapping patches with {} % {} != 0' \\\r\n .format(patch_size, stride)\r\n assert (h - patch_size) % stride == 0, 'height - patch_size should be dividable by stride but {} % {} != 0' \\\r\n .format(h - patch_size, stride)\r\n\r\n n_stride = (h - patch_size) // stride + 1\r\n patches = []\r\n for i in range(n_stride):\r\n if overlapping or i * stride % patch_size == 0:\r\n for j in range(n_stride):\r\n if overlapping or j * stride % patch_size == 0:\r\n patch = img[i * stride: i * stride + patch_size, j * stride: j * stride + patch_size]\r\n patches.append(patch)\r\n return np.array(patches)", "def patches_sampling(self, image, patch_size, stride):\n h, w = image.shape[2:4]\n patches = []\n for i in range(0, h - patch_size + 1, stride):\n for j in range(0, w - patch_size + 1, stride):\n patches.append(image[:, :, i:i + patch_size, j:j + patch_size])\n patches = torch.cat(patches, dim=0).to(self.device)\n return patches", "def patches_to_img(patches, stride, img_shape):\r\n if len(img_shape) > 2:\r\n channels = [patches_to_img(patches[:, :, :, i], stride, img_shape[:2]) for i in range(3)]\r\n return np.concatenate(channels, axis=2)\r\n\r\n h, w = img_shape\r\n patch_size = patches.shape[1]\r\n n_stride = (h - patch_size) // stride + 1\r\n\r\n assert h == w, \"only squared image are accepted\"\r\n assert (h - patch_size) % stride == 0, \"The stride must be adapted on image and patch size\"\r\n assert len(patches) == n_stride ** 2, \"They must be the right number of patches per image\"\r\n\r\n pred_final = np.zeros(img_shape + (1,)) # Accumulator for the final prediction\r\n pred_normalizer = np.zeros(img_shape + (1,)) # Counter of the patch per prediction per pixel\r\n\r\n for i in range(n_stride):\r\n for j in range(n_stride):\r\n x_from, x_to = i * stride, i * stride + patch_size\r\n y_from, y_to = j * stride, j * stride + patch_size\r\n idx = i * n_stride + j\r\n pred_final[x_from: x_to, y_from: y_to] += patches[idx].reshape(patch_size, patch_size, 1)\r\n pred_normalizer[x_from: x_to, y_from: y_to] += 1\r\n return pred_final / pred_normalizer", "def dense_patch_slices(image_size, patch_size, scan_interval):\n num_spatial_dims = len(image_size)\n if num_spatial_dims not in (2, 3):\n raise ValueError(\"image_size should has 2 or 3 elements\")\n patch_size = get_valid_patch_size(image_size, patch_size)\n scan_interval = ensure_tuple_size(scan_interval, num_spatial_dims)\n\n scan_num = [\n int(math.ceil(float(image_size[i]) / scan_interval[i])) if scan_interval[i] != 0 else 1\n for i in range(num_spatial_dims)\n ]\n slices = []\n if num_spatial_dims == 3:\n for i in range(scan_num[0]):\n start_i = i * scan_interval[0]\n start_i -= max(start_i + patch_size[0] - image_size[0], 0)\n slice_i = slice(start_i, start_i + patch_size[0])\n\n for j in range(scan_num[1]):\n start_j = j * scan_interval[1]\n start_j -= max(start_j + patch_size[1] - image_size[1], 0)\n slice_j = slice(start_j, start_j + patch_size[1])\n\n for k in range(0, scan_num[2]):\n start_k = k * scan_interval[2]\n start_k -= max(start_k + patch_size[2] - image_size[2], 0)\n slice_k = slice(start_k, start_k + patch_size[2])\n slices.append((slice_i, slice_j, slice_k))\n else:\n for i in range(scan_num[0]):\n start_i = i * scan_interval[0]\n start_i -= max(start_i + patch_size[0] - image_size[0], 0)\n slice_i = slice(start_i, start_i + patch_size[0])\n\n for j in range(scan_num[1]):\n start_j = j * scan_interval[1]\n start_j -= max(start_j + patch_size[1] - image_size[1], 0)\n slice_j = slice(start_j, start_j + patch_size[1])\n slices.append((slice_i, slice_j))\n return slices", "def dense_patch_slices(\n image_size: Sequence[int], patch_size: Sequence[int], scan_interval: Sequence[int], return_slice: bool = True\n) -> list[tuple[slice, ...]]:\n num_spatial_dims = len(image_size)\n patch_size = get_valid_patch_size(image_size, patch_size)\n scan_interval = ensure_tuple_size(scan_interval, num_spatial_dims)\n\n scan_num = []\n for i in range(num_spatial_dims):\n if scan_interval[i] == 0:\n scan_num.append(1)\n else:\n num = int(math.ceil(float(image_size[i]) / scan_interval[i]))\n scan_dim = first(d for d in range(num) if d * scan_interval[i] + patch_size[i] >= image_size[i])\n scan_num.append(scan_dim + 1 if scan_dim is not None else 1)\n\n starts = []\n for dim in range(num_spatial_dims):\n dim_starts = []\n for idx in range(scan_num[dim]):\n start_idx = idx * scan_interval[dim]\n start_idx -= max(start_idx + patch_size[dim] - image_size[dim], 0)\n dim_starts.append(start_idx)\n starts.append(dim_starts)\n out = np.asarray([x.flatten() for x in np.meshgrid(*starts, indexing=\"ij\")]).T\n if return_slice:\n return [tuple(slice(s, s + patch_size[d]) for d, s in enumerate(x)) for x in out]\n return [tuple((s, s + patch_size[d]) for d, s in enumerate(x)) for x in out] # type: ignore", "def _extract_patches(img, patch_s):\n def np_extract_patches(img):\n orig = np.array(img.shape[:2])\n new = patch_s[0] * np.ceil(orig / patch_s[0]).astype(int)\n points = new - orig\n img = np.pad(img, [(0, points[0]), (0, points[1]), (0, 0)],\n mode='constant')\n patches = view_as_blocks(img, tuple(patch_s)).astype(np.float32)\n patches = patches.reshape(-1, *patch_s)\n return patches\n\n patches = tf.numpy_function(np_extract_patches, [img], tf.float32)\n return patches", "def prepare_train_patches(images_path, labels_path, indices, patch_size, overlap, overlap_amount, aug_config):\n\n # Load images and labels\n images = extract_images(images_path, indices)\n labels = extract_images(labels_path, indices)\n\n # Get patches\n if overlap:\n image_patches = [patch for im in images for patch in patchify_overlap(im, patch_size, overlap_amount)]\n label_patches = [patch for label in labels for patch in patchify_overlap(label, patch_size, overlap_amount)]\n else:\n image_patches = [patch for im in images for patch in patchify(im, patch_size)]\n label_patches = [patch for label in labels for patch in patchify(label, patch_size)]\n \n if not aug_config:\n return image_patches, label_patches\n\n patches = zip(image_patches, label_patches)\n\n # Rotation needs to be applied on whole image\n if aug_config.do_rotation:\n images_rot = rotate_images(images, aug_config.rotation_angles)\n labels_rot = rotate_images(labels, aug_config.rotation_angles)\n\n for im, label in zip(images_rot, labels_rot):\n p = patchify_no_corner(im, label, patch_size, overlap, overlap_amount)\n image_patches.extend(p[0])\n label_patches.extend(p[1])\n\n # Flip each patch horizontally\n images_flipped = []\n labels_flipped = []\n if aug_config.do_flip:\n flip_hor = iaa.Fliplr(0.5).to_deterministic()\n flip_ver = iaa.Flipud(0.5).to_deterministic()\n images_flipped.extend(flip_hor.augment_images(image_patches))\n images_flipped.extend(flip_ver.augment_images(image_patches))\n labels_flipped.extend(flip_hor.augment_images(label_patches))\n labels_flipped.extend(flip_ver.augment_images(label_patches))\n\n image_patches.extend([im.copy() for im in images_flipped])\n label_patches.extend([im.copy() for im in labels_flipped])\n\n # For all the patches (even new ones), augment channels\n if aug_config.augment_channels:\n image_patches = augment_channels(image_patches, aug_config)\n\n return image_patches, label_patches", "def extract_patch(n, patch_size, imgs):\n # Extract patches from input images\n img_patches = [img_crop(imgs[i], patch_size, patch_size) for i in range(n)]\n #gt_patches = [img_crop(gt_imgs[i], patch_size, patch_size) for i in range(n)]\n\n # Linearize list of patches\n img_patches = np.asarray([img_patches[i][j] for i in range(len(img_patches)) for j in range(len(img_patches[i]))])\n #gt_patches = np.asarray([gt_patches[i][j] for i in range(len(gt_patches)) for j in range(len(gt_patches[i]))])\n \n return img_patches #,gt_patches", "def combine_patches_to_image(y_pred, img, stride):\n\n counter = 0\n height, width = img.shape[:2]\n output_size = y_pred.shape[1]\n\n # The last channel is the number of overlapping patches for a given pixel,\n # used for averaging predictions from multiple windows.\n combined = np.zeros((height, width, y_pred.shape[-1] + 1))\n\n for i in range(0, height - output_size + 1, stride):\n for j in range(0, width - output_size + 1, stride):\n patch = combined[i:i + output_size, j:j + output_size, :-1]\n overlaps = combined[i:i + output_size, j:j + output_size, -1:]\n patch = (patch * overlaps + y_pred[counter]) / (overlaps + 1)\n combined[i:i + output_size, j:j + output_size, :-1] = patch\n overlaps += 1.\n counter += 1\n\n return combined[:height, :width, :-1]", "def get_patches(image_mat, stride):\n window_shape = (128, 128, 3)\n windows = view_as_windows(image_mat, window_shape, step=stride)\n patches = []\n for m in range(windows.shape[0]):\n for n in range(windows.shape[1]):\n patches += [windows[m][n][0]]\n return patches", "def img_to_patches(img, win, stride=1):\n k = 0\n endc = img.shape[0]\n endw = img.shape[1]\n endh = img.shape[2]\n if endw<win or endh<win:\n return np.zeros([endc,win,win,0])\n patch = img[:, 0:endw-win+0+1:stride, 0:endh-win+0+1:stride]\n total_pat_num = patch.shape[1] * patch.shape[2]\n res = np.zeros([endc, win*win, total_pat_num], np.float32)\n for i in range(win):\n for j in range(win):\n patch = img[:, i:endw-win+i+1:stride, j:endh-win+j+1:stride]\n res[:, k, :] = np.array(patch[:]).reshape(endc, total_pat_num)\n k = k + 1\n return res.reshape([endc, win, win, total_pat_num])", "def apply_patch_on_the_image(img, patch, count=5, offset=150):\n mask = np.zeros(shape=img.shape)\n boxes = []\n prev = (0, 0)\n gen = gencoordinates(img.shape[0], img.shape[1])\n for i in range(count):\n rnd = random.choice([x for x in range(100)])\n x_offset = rnd + patch.shape[0]\n y_offset = rnd + patch.shape[1]\n x_offset += prev[0]\n y_offset += prev[1]\n if y_offset < patch.shape[1]:\n y_offset = patch.shape[1]\n if x_offset < patch.shape[0]:\n x_offset = patch.shape[0]\n img[y_offset:y_offset+patch.shape[0], x_offset:x_offset+patch.shape[1]] = patch\n mask[y_offset:y_offset+patch.shape[0], x_offset:x_offset+patch.shape[1]] = 1\n boxes.append((y_offset, patch.shape[0], x_offset, patch.shape[1]))\n prev = (x_offset, y_offset)\n return img, mask, boxes", "def extract_patches_single_scale(\n patch_size: int,\n stride: int,\n image_lt: lt.LabeledTensor,\n name: str = None,\n) -> Tuple[np.ndarray, lt.LabeledTensor]:\n with tf.compat.v1.name_scope(name, \"extract_patches_single_scale\", [image_lt]) as scope:\n image_lt = lt.transpose(image_lt, [\"batch\", \"row\", \"column\", \"channel\"])\n image_lt = tensorcheck.bounds(0.0, 1.0, image_lt)\n\n logging.info(\"extract_patches_single_scale: Input axes: %s\", image_lt.axes)\n\n batch_size = len(image_lt.axes[\"batch\"])\n num_rows = len(image_lt.axes[\"row\"])\n num_columns = len(image_lt.axes[\"column\"])\n\n row_offsets = range(0, num_rows - patch_size + 1, stride)\n if not row_offsets:\n raise ValueError(\"num_rows - patch_size + 1 must be >= 1\")\n expected_num_rows = _num_extracted_rows_and_columns(num_rows, patch_size,\n stride, 1, 2)\n assert len(row_offsets) == expected_num_rows, (len(row_offsets),\n expected_num_rows,\n (num_rows, patch_size,\n stride))\n\n column_offsets = range(0, num_columns - patch_size + 1, stride)\n assert column_offsets\n expected_num_columns = _num_extracted_rows_and_columns(\n num_columns, patch_size, stride, 1, 2)\n assert len(column_offsets) == expected_num_columns, (len(column_offsets),\n expected_num_columns,\n (num_rows, patch_size,\n stride))\n\n offsets = [(r, c) for r in row_offsets for c in column_offsets]\n\n patch_lts = []\n for b in range(batch_size):\n for (row, column) in offsets:\n patch_lt = lt.slice(\n image_lt, {\n \"batch\": slice(b, b + 1),\n \"row\": slice(row, row + patch_size),\n \"column\": slice(column, column + patch_size)\n })\n patch_lts.append(patch_lt)\n\n pack_lt = lt.concat(patch_lts, \"batch\")\n reshape_lt = lt.reshape(pack_lt, [\"batch\"], [\n image_lt.axes[\"batch\"], (\"patch_row\", len(row_offsets)),\n (\"patch_column\", len(column_offsets))\n ])\n\n reshape_lt = tensorcheck.shape(reshape_lt)\n reshape_lt = tensorcheck.bounds(0.0, 1.0, reshape_lt, name=scope)\n\n centers = [\n (r + patch_size / 2.0, c + patch_size / 2.0) for (r, c) in offsets\n ]\n\n logging.info(\"extract_patches_single_scale: Output axes: %s\",\n reshape_lt.axes)\n\n return np.array(centers), reshape_lt", "def get_patches_non_overlap(array, patch_height, patch_width): \n total_patches_in_height = array.shape[0]//patch_height\n total_patches_in_width = array.shape[1]//patch_width\n # print(\"total patches in height from supplied image array : {}\".format(total_patches_in_height))\n # print(\"total patches in width from supplied image array : {}\".format(total_patches_in_width))\n \n total_patches = total_patches_in_height * total_patches_in_width\n # print(\"total patches from supplied image array : {}\".format(total_patches))\n patches = np.empty(shape=(total_patches, 1, patch_height, patch_width), dtype=np.uint8)\n \n patch_no = 0\n for i in range(0, array.shape[0], patch_height):\n for j in range(0, array.shape[1], patch_width):\n if (i+patch_height <= array.shape[0]+1) and (j+patch_width <= array.shape[1]+1):\n patches[patch_no, 0, :, :] = array[i:i+patch_height, j:j+patch_width]\n patch_no += 1\n return patches", "def generate_patches_from_img(img, patch_size=128):\n\n new_width, new_height, channels = img.shape\n\n if img.shape[0] % 128 != 0:\n new_width = img.shape[0] + (128 - img.shape[0] % 128)\n\n if img.shape[1] % 128 != 0:\n new_height = img.shape[1] + (128 - img.shape[1] % 128)\n\n resized_img = resize(img, (new_width, new_height))\n\n block_shape = (128, 128, 3)\n img_blocks = view_as_blocks(resized_img, block_shape=block_shape)\n\n img_patches = {}\n\n for r in range(img_blocks.shape[0]):\n for c in range(img_blocks.shape[1]):\n img = img_blocks[r, c]\n img = np.reshape(img, (128, 128, 3))\n img_patches[(r, c)] = img\n\n return img_patches", "def extract_patches(image_list, mask_src, image_src, mask_dst, image_dst, patch_size):\n class_counts = defaultdict(lambda: 0)\n skipped = 0\n total = 0\n for im in tqdm(image_list):\n img = cv2.imread(os.path.join(image_src, im))\n msk = cv2.imread(os.path.join(mask_src, im), 0)\n \n assert (img.shape[0] == msk.shape[0]) \\\n and (img.shape[1] == msk.shape[1]), \"Mismatch!\"\n\n img_patches = patchify(img, (patch_size, patch_size, 3), step=patch_size)\n msk_patches = patchify(msk, (patch_size, patch_size), step=patch_size)\n img_patches = img_patches.reshape((-1, patch_size, patch_size, 3))\n msk_patches = msk_patches.reshape((-1, patch_size, patch_size))\n # Step = 256 for patch size means no overlap\n for i in range(img_patches.shape[0]):\n # Replace class labels\n mask_patch = replace_classes(msk_patches[i])\n unique, counts = np.unique(mask_patch, return_counts=True)\n # If outside of RoI takes > 90% and there is only 1 class, ignore the patch.\n outside = np.mean(mask_patch == 0) > 0.9\n if outside and (len(unique) < 2):\n skipped += 1\n continue\n for x, y in enumerate(unique):\n class_counts[y] += counts[x].item()\n img_patch = img_patches[i]\n filename = im.split(\".png\")[0] + \"_\" + str(i) + \".png\"\n cv2.imwrite(os.path.join(image_dst, filename), img_patch)\n cv2.imwrite(os.path.join(mask_dst, filename), mask_patch)\n total += 1\n print('Skipped: {} / {}'.format(skipped, total))\n return class_counts", "def extract_patch_from_img(array, patch_index, patch_size, z_offset=0, mean=None, std=None):\n patch_index[0] -= z_offset\n patch_index[1] -= z_offset\n\n z, x, y = array.shape\n ww = [patch_size[0], patch_size[1], patch_size[2]]\n\n ret = np.zeros(ww)\n temp_patch_index = np.array(patch_index).copy()\n ww = [0, patch_size[0], 0, patch_size[1], 0, patch_size[2]]\n\n # if patch overlaps image boundry (needs 0 padding) offset image index\n if temp_patch_index[0] < 0:\n ww[0] -= temp_patch_index[0]\n temp_patch_index[0] = 0\n if temp_patch_index[2] < 0:\n ww[2] -= temp_patch_index[2]\n temp_patch_index[2] = 0\n if temp_patch_index[4] < 0:\n ww[4] -= temp_patch_index[4]\n temp_patch_index[4] = 0\n\n if temp_patch_index[1] > z:\n ww[1] -= temp_patch_index[1] - z\n temp_patch_index[1] = z\n if temp_patch_index[3] > x:\n ww[3] -= temp_patch_index[3] - x\n temp_patch_index[3] = x\n if temp_patch_index[5] > y:\n ww[5] -= temp_patch_index[5] - y\n temp_patch_index[5] = y\n if temp_patch_index[0] >= temp_patch_index[1]:\n temp_patch_index[0] = temp_patch_index[1] - 1\n\n insert = array[temp_patch_index[0]:temp_patch_index[1],\n temp_patch_index[2]:temp_patch_index[3],\n temp_patch_index[4]:temp_patch_index[5]]\n\n # normalize patch\n if not (mean is None or std is None):\n insert = np.divide(insert - mean, std)\n\n ret[ww[0]:ww[1], ww[2]:ww[3], ww[4]:ww[5]] = insert\n\n return ret", "def extract_image_patches(images, ksizes, strides, rates, padding='same'):\n assert len(images.size()) == 4\n assert padding in ['same', 'valid']\n batch_size, channel, height, width = images.size()\n\n if padding == 'same':\n images = same_padding(images, ksizes, strides, rates)\n elif padding == 'valid':\n pass\n else:\n raise NotImplementedError('Unsupported padding type: {}.\\\n Only \"same\" or \"valid\" are supported.'.format(padding))\n\n unfold = torch.nn.Unfold(kernel_size=ksizes,\n dilation=rates,\n padding=0,\n stride=strides)\n patches = unfold(images)\n return patches # [N, C*k*k, L], L is the total number of such blocks", "def iter_patch(\n arr: NdarrayOrTensor,\n patch_size: Sequence[int] | int = 0,\n start_pos: Sequence[int] = (),\n overlap: Sequence[float] | float = 0.0,\n copy_back: bool = True,\n mode: str | None = NumpyPadMode.WRAP,\n **pad_opts: dict,\n) -> Generator[tuple[NdarrayOrTensor, np.ndarray], None, None]:\n\n from monai.transforms.croppad.functional import pad_nd # needs to be here to avoid circular import\n\n # ensure patchSize and startPos are the right length\n patch_size_ = get_valid_patch_size(arr.shape, patch_size)\n start_pos = ensure_tuple_size(start_pos, arr.ndim)\n\n # set padded flag to false if pad mode is None\n padded = bool(mode)\n is_v = [bool(p) for p in ensure_tuple_size(patch_size, arr.ndim)] # whether a valid patch size provided\n _pad_size = tuple(p if v and padded else 0 for p, v in zip(patch_size_, is_v)) # pad p if v else 0\n _overlap = [op if v else 0.0 for op, v in zip(ensure_tuple_rep(overlap, arr.ndim), is_v)] # overlap if v else 0.0\n # pad image by maximum values needed to ensure patches are taken from inside an image\n if padded:\n arrpad = pad_nd(arr, to_pad=[(p, p) for p in _pad_size], mode=mode, **pad_opts) # type: ignore\n # choose a start position in the padded image\n start_pos_padded = tuple(s + p for s, p in zip(start_pos, _pad_size))\n\n # choose a size to iterate over which is smaller than the actual padded image to prevent producing\n # patches which are only in the padded regions\n iter_size = tuple(s + p for s, p in zip(arr.shape, _pad_size))\n else:\n arrpad = arr\n start_pos_padded = start_pos\n iter_size = arr.shape\n\n for slices in iter_patch_slices(iter_size, patch_size_, start_pos_padded, _overlap, padded=padded):\n # compensate original image padding\n if padded:\n coords_no_pad = tuple((coord.start - p, coord.stop - p) for coord, p in zip(slices, _pad_size))\n else:\n coords_no_pad = tuple((coord.start, coord.stop) for coord in slices)\n yield arrpad[slices], np.asarray(coords_no_pad) # data and coords (in numpy; works with torch loader)\n\n # copy back data from the padded image if required\n if copy_back:\n slices = tuple(slice(p, p + s) for p, s in zip(_pad_size, arr.shape))\n arr[...] = arrpad[slices] # type: ignore", "def create_image_patches(img, size=512):\n\n patch_list = []\n width, height = img.shape[1], img.shape[0]\n w, h = size, size\n for y in range(0, height, h):\n y_end = min(y + h, width)\n for x in range(0, width, w):\n x_end = min(x + w, height)\n patch = img[y:y_end, x:x_end]\n patch_list.append(patch)\n return patch_list", "def image_patch(self, image):\n height, width = image.shape\n self.current_image = image\n H_out, W_out = output_shape(height, width, self.filter_size, self.padding, self.stride)\n for j in range(H_out):\n for k in range(W_out):\n image_patch = image[j*self.stride : (j*self.stride + self.filter_size), k*self.stride:(k*self.stride+self.filter_size)]\n yield image_patch, j, k", "def extract_patches_loop(arr, patch_shape, sub_findings):\n arr_row, arr_col = arr.shape\n patch_row, patch_col = patch_shape\n\n # Extract findings and patches\n findings = sub_findings[:,:2]\n patches = image.extract_patches_2d(arr, patch_shape)\n\n # Patches per row/column\n ppc = arr_row - patch_row + 1\n ppr = arr_col - patch_col + 1\n\n # Mapping between indexing\n i2c = lambda idx: (idx // ppr, idx % ppr)\n c2i = lambda xs: xs[0] * ppr + xs[1]\n iden = lambda j: c2i(i2c(j))\n funcx = lambda fx: np.arange(patch_row) + (fx - patch_row + 1)\n funcy = lambda fy: np.arange(patch_col) + (fy - patch_col + 1)\n\n # Extract patches with findings\n idx = findings_2_idx(findings, c2i, funcx, funcy)\n mask = ~np.ones(patches.shape[0], dtype = bool)\n mask[idx] = True\n\n patches_with_findings = filter_empty_patches(patches[mask])\n patches_without_findings = filter_empty_patches(patches[~mask])\n\n return patches_without_findings, patches_with_findings", "def _sample_patches(imgs, \n labelimgs, \n patch_size, \n patchgroup, \n padding_mode, \n padding_values, \n ignore_labels,\n startidx=0):\n samplelist = []\n \n # number of bands should be constant, therefore the dimensionality can be read from any \n # sub img\n bands = imgs[0].shape[-1]\n\n # calculate remapping for labels when removing `ignore_labels`\n # flatten labelimgs and convert to numpy array to use np.unique function on it\n flattened_labelimgs = np.concatenate([labelimg.reshape(-1) for labelimg in labelimgs])\n max_label = np.unique(flattened_labelimgs).max()\n remaining_labels = np.setdiff1d(np.arange(max_label+1), ignore_labels)\n label_remap = np.full((max_label+1), -1)\n for i, val in enumerate(remaining_labels):\n label_remap[val] = i\n\n valid_sample_count = 0\n for labelimg in labelimgs:\n valid_sample_count += np.invert(np.isin(labelimg, ignore_labels)).sum()\n print(f'Extracting {valid_sample_count} valid samples...')\n \n if ('data' in patchgroup) and ('labels' in patchgroup):\n # resize existing dataset to append patches from test set\n patchgroup['data'].resize((patchgroup['data'].shape[0] + valid_sample_count), axis=0)\n patchgroup['labels'].resize((patchgroup['labels'].shape[0] + valid_sample_count), axis=0)\n else:\n patchgroup.create_dataset('data', (valid_sample_count, patch_size, patch_size, bands)\n , chunks=(1, patch_size, patch_size, bands)\n , maxshape=(None, patch_size, patch_size, bands)\n , dtype=imgs[0].dtype) # datatype should be the same for all imgs\n patchgroup.create_dataset('labels', (valid_sample_count,1)\n , chunks=True, maxshape=(None, 1)\n , dtype=labelimgs[0].dtype) # datatype should be the same for all labelimgs\n \n idx = startidx\n with tqdm(total=valid_sample_count) as pbar:\n for img, labelimg in zip(imgs, labelimgs):\n\n # pad along spatial axes\n margin = int((patch_size - 1) / 2)\n X = np.pad(img, ((margin, margin), (margin, margin), (0,0)), \n mode=padding_mode, constant_values=padding_values) \n\n # split patches\n for r in range(margin, X.shape[0] - margin):\n for c in range(margin, X.shape[1] - margin):\n patchlabel = labelimg[r-margin, c-margin]\n\n # do not create a sample for 'ignore_labels'\n if patchlabel in ignore_labels:\n continue\n else :\n # correct label\n patchlabel = label_remap[patchlabel]\n\n patch = X[r - margin:r + margin + 1, c - margin:c + margin + 1]\n # store sample in hdf file\n patchgroup['data'][idx] = patch\n patchgroup['labels'][idx] = patchlabel\n\n # update\n idx += 1\n pbar.update(1)\n\n patchgroup.attrs['patch_size'] = patch_size\n patchgroup.attrs['padding_mode'] = padding_mode\n patchgroup.attrs['padding_values'] = padding_values\n patchgroup.attrs['ignore_labels'] = ignore_labels\n\n return valid_sample_count", "def get_multiscale_patches(\n image,\n patch_size,\n patch_stride,\n hse_grid_size,\n longer_side_lengths,\n max_seq_len_from_original_res = None):\n # Sorting the list to ensure a deterministic encoding of the scale position.\n longer_side_lengths = sorted(longer_side_lengths)\n\n # Input channels.\n c = 3\n if len(image.get_shape().as_list()) == 3:\n n_crops = 1\n h, w = tf.shape(image)[0], tf.shape(image)[1]\n image = tf.expand_dims(image, axis=0)\n else:\n n_crops, h, w = (tf.shape(image)[0], tf.shape(image)[1], tf.shape(image)[2])\n\n outputs = []\n for scale_id, longer_size in enumerate(longer_side_lengths):\n resized_image, rh, rw = resize_preserve_aspect_ratio(\n image, h, w, longer_size)\n\n max_seq_len = int(np.ceil(longer_size / patch_stride)**2)\n out = _extract_patches_and_positions_from_image(resized_image, patch_size,\n patch_stride, hse_grid_size,\n n_crops, rh, rw, c,\n scale_id, max_seq_len)\n outputs.append(out)\n\n if max_seq_len_from_original_res is not None:\n out = _extract_patches_and_positions_from_image(\n image, patch_size, patch_stride, hse_grid_size, n_crops, h, w, c,\n len(longer_side_lengths), max_seq_len_from_original_res)\n outputs.append(out)\n\n # Shape: (n_crops, num_total_patches, patch_size * patch_size * c + 3)\n outputs = tf.concat(outputs, axis=1)\n if n_crops == 1:\n # Shape: (num_total_patches, patch_size * patch_size * c + 3).\n # Training mode. 4 dim wasn't handled by loss.\n outputs = outputs[0]\n return outputs", "def cut_image_strided(image, new_size):\n bands = image.shape[0]\n new_size_y, new_size_x = new_size\n old_size_y = image.shape[1]\n old_size_x = image.shape[2]\n nr_images_x = old_size_x // new_size[1]\n nr_images_y = old_size_y // new_size[0]\n if old_size_x % new_size_x != 0 or old_size_y % new_size_y != 0:\n print(\"The patch size is not a full multiple of the complete patch size\")\n\n return as_strided(image, shape=(nr_images_y, nr_images_x, bands, new_size_y, new_size_x),\n strides=(image.strides[1] * new_size_y, image.strides[2] * new_size_x, image.strides[0],\n image.strides[1], image.strides[2]))", "def patches_to_images(patches, stride, img_shape):\r\n h = img_shape[0]\r\n patch_size = patches.shape[1]\r\n n_stride = (h - patch_size) // stride + 1\r\n assert len(patches) % n_stride ** 2 == 0, \"They must be the right number of patches per image\"\r\n\r\n n_images = len(patches) // (n_stride ** 2)\r\n\r\n images = []\r\n for i in range(n_images):\r\n n_patches = n_stride ** 2\r\n img = patches_to_img(patches[i * n_patches:(i + 1) * n_patches], stride, img_shape)\r\n images.append(img)\r\n\r\n return np.array(images)", "def image_mask(image, patch_R, patch_C, seg_model):\n\n im = Image.open(image)\n im_name = os.path.basename(image).split('.')[0]\n im_width, im_height = im.width, im.height\n\n N = patch_R // patch_C\n\n W_ps_NI = im_width // patch_C # 31782 // 256 = 124\n # W_ps_NR = slide_width % patch_C # 31782 % 256 = 38\n H_ps_NI = im_height // patch_R # 24529 // 1024 = 23\n # H_ps_NR = slide_height % patch_R # 24529 % 1024 = 977\n\n cell_ratio = 0.85 # the threshold that decide the patch is background or not\n\n output_dir = os.path.join(current_path, \"..\", \"output\", \"output_mask\")\n if not os.path.isdir(output_dir): os.makedirs(output_dir)\n\n np_im = np.array(im)[:, :, 0:3] # exclude alpha\n for w in range(W_ps_NI):\n for h in range(H_ps_NI):\n subHIC = np_im[h * patch_R: (h+1) * patch_R, w * patch_C:(w+1) * patch_C, :]\n\n # rgb three channels value that >200 and <40 are ignored segment\n rgb_s = (abs(subHIC[:, :, 0] - 120) >= 80) & (abs(subHIC[:, :, 1] - 120) >= 80) & (\n abs(subHIC[:, :, 2] - 120) >= 80) # >200 <40\n\n if np.sum(rgb_s) <= (patch_R * patch_C) * cell_ratio:\n # segment\n subHIC = np.where(rgb_similarity(subHIC, 15, 195), 250, subHIC)\n # adjust equalization histogram and adjust brightness\n for k in range(subHIC.shape[2]):\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(N * 4, 4))\n subHIC[:, :, k] = clahe.apply(subHIC[:, :, k])\n subHIC = exposure.adjust_gamma(subHIC, gamma=1.5)\n subHIC = subHIC.reshape(N, patch_C, patch_C, 3)\n\n subHIC = subHIC.reshape(N, patch_C, patch_C, 3)\n allmask_prob_list = maskrcnn_detection(seg_model, subHIC)\n\n for i in range(len(allmask_prob_list)):\n for layer in range(allmask_prob_list[i].shape[2]):\n image, cnts, hierarchy = cv2.findContours(allmask_prob_list[i][:, :, layer],\n cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n np_im[h * patch_R + i * patch_C: h * patch_R + (i + 1) * patch_C, w * patch_C:(w + 1) * patch_C,\n :] = cv2.drawContours(np_im[h * patch_R + i*patch_C: h*patch_R+(i+1)*patch_C, w * patch_C:(w + 1) * patch_C, :],\n cnts, -1, (0, 255, 0), 1)\n\n # np_im[h * patch_R + i*patch_C: h*patch_R+(i+1)*patch_C, w * patch_C:(w + 1) * patch_C, :] = subHIC[i]\n\n # plt.savefig(os.path.join(output_dir, f\"{im_name}w{w}h{h}N{i}.png\"))\n\n io.imsave(os.path.join(output_dir, f\"{im_name}.png\"), np_im)", "def sliding_window(image, patch_size: tuple, step: int, show_debug: bool = False) -> list:\n if isinstance(image, Image.Image):\n image = np.array(image)\n\n if step == 0:\n h, w = image.shape[0], image.shape[1] # 720, 1280\n w_iter, h_iter = w // patch_size[0], h // patch_size[1]\n crop_image_list = []\n for i in range(h_iter):\n for j in range(w_iter):\n bbox = (i*patch_size[0], j*patch_size[0],\n (i+1)*patch_size[0], (j+1)*patch_size[0])\n crop_image = image[bbox[0]:bbox[2], bbox[1]: bbox[3]]\n if show_debug:\n crop_image = Image.fromarray(crop_image)\n crop_image.save(f\"/data/jiangmingchao/patches/{i}.png\")\n cv2.rectangle(image,\n (i*patch_size[0], j*patch_size[0]),\n ((i+1)*patch_size[0], (j+1)*patch_size[0]),\n (255, 255, 0),\n 2,\n )\n\n crop_image_list.append(Image.fromarray(crop_image))\n\n if show_debug:\n cv2.imwrite(\"1.jpg\", image)\n\n else:\n h, w = image.shape[0], image.shape[1]\n step_w_iter, step_h_iter = (w - patch_size[0]) // step, (h - patch_size[0]) // step\n crop_image_list = []\n for i in range(step_h_iter):\n for j in range(step_w_iter):\n bbox = (i * step, j * step, patch_size[0] + i * step, patch_size[1] + j * step)\n crop_image = image[bbox[0]: bbox[2], bbox[1]: bbox[3]]\n print(crop_image.shape)\n crop_image_list.append(Image.fromarray(crop_image))\n\n return crop_image_list", "def sample_patches(images, psize=(8, 8), n=10000, remove_mean=True):\n d = psize[0] * psize[1]\n patches = np.zeros((d, n))\n standardized = grayscale_and_standardize(images, remove_mean)\n\n shapes = []\n for pic in standardized:\n shapes.append(pic.shape)\n\n rand_pic_num = np.random.randint(0, len(standardized), n)\n rand_x = np.random.rand(n)\n rand_y = np.random.rand(n)\n\n for i in range(n):\n pic_id = rand_pic_num[i]\n pic_shape = shapes[pic_id]\n x = int(np.ceil(rand_x[i] * (pic_shape[0] - psize[1])))\n y = int(np.ceil(rand_y[i] * (pic_shape[1] - psize[0])))\n patches[:, i] = np.reshape(np.ascontiguousarray(\n standardized[pic_id][x:x + psize[0], y:y + psize[1]]), d)\n\n return patches", "def dilationPatches2(rawPatches, dilationIter=20, borderWidth=1): # pixel width of the border after dilation\r\n\r\n total_area = ni.binary_dilation(rawPatches, iterations=dilationIter).astype(np.int)\r\n patchBorder = total_area - rawPatches\r\n\r\n # thinning patch borders\r\n patchBorder = sm.skeletonize(patchBorder)\r\n\r\n # thickening patch borders\r\n if borderWidth > 1:\r\n patchBorder = ni.binary_dilation(patchBorder, iterations=borderWidth - 1).astype(np.int)\r\n\r\n # genertating new patches\r\n newPatches = np.multiply(-1 * (patchBorder - 1), total_area)\r\n\r\n # removing small edges\r\n labeledPatches, patchNum = ni.label(newPatches)\r\n\r\n newPatches2 = np.zeros(newPatches.shape, dtype=np.int)\r\n\r\n for i in range(1, patchNum + 1):\r\n currPatch = np.zeros(labeledPatches.shape, dtype=np.int)\r\n currPatch[labeledPatches == i] = 1\r\n currPatch[labeledPatches != i] = 0\r\n\r\n if (np.sum(np.multiply(currPatch, rawPatches)[:]) > 0):\r\n # currPatch = ni.binary_closing(currPatch,\r\n # structure = np.ones((borderWidth+2,borderWidth+2))).astype(np.int)\r\n newPatches2[currPatch == 1] = 1\r\n\r\n return newPatches2", "def _get_chunk_patch_info(\n img_shape, chunk_input_shape, patch_input_shape, patch_output_shape\n):\n round_to_multiple = lambda x, y: np.floor(x / y) * y\n patch_diff_shape = patch_input_shape - patch_output_shape\n\n chunk_output_shape = chunk_input_shape - patch_diff_shape\n chunk_output_shape = round_to_multiple(\n chunk_output_shape, patch_output_shape\n ).astype(np.int64)\n chunk_input_shape = (chunk_output_shape + patch_diff_shape).astype(np.int64)\n\n patch_input_tl_list, _ = _get_patch_top_left_info(\n img_shape, patch_input_shape, patch_output_shape\n )\n patch_input_br_list = patch_input_tl_list + patch_input_shape\n patch_output_tl_list = patch_input_tl_list + patch_diff_shape\n patch_output_br_list = patch_output_tl_list + patch_output_shape\n patch_info_list = np.stack(\n [\n np.stack([patch_input_tl_list, patch_input_br_list], axis=1),\n np.stack([patch_output_tl_list, patch_output_br_list], axis=1),\n ],\n axis=1,\n )\n\n chunk_input_tl_list, _ = _get_patch_top_left_info(\n img_shape, chunk_input_shape, chunk_output_shape\n )\n chunk_input_br_list = chunk_input_tl_list + chunk_input_shape\n # * correct the coord so it stay within source image\n y_sel = np.nonzero(chunk_input_br_list[:, 0] > img_shape[0])[0]\n x_sel = np.nonzero(chunk_input_br_list[:, 1] > img_shape[1])[0]\n chunk_input_br_list[y_sel, 0] = (\n img_shape[0] - patch_diff_shape[0]\n ) - chunk_input_tl_list[y_sel, 0]\n chunk_input_br_list[x_sel, 1] = (\n img_shape[1] - patch_diff_shape[1]\n ) - chunk_input_tl_list[x_sel, 1]\n chunk_input_br_list[y_sel, 0] = round_to_multiple(\n chunk_input_br_list[y_sel, 0], patch_output_shape[0]\n )\n chunk_input_br_list[x_sel, 1] = round_to_multiple(\n chunk_input_br_list[x_sel, 1], patch_output_shape[1]\n )\n chunk_input_br_list[y_sel, 0] += chunk_input_tl_list[y_sel, 0] + patch_diff_shape[0]\n chunk_input_br_list[x_sel, 1] += chunk_input_tl_list[x_sel, 1] + patch_diff_shape[1]\n chunk_output_tl_list = chunk_input_tl_list + patch_diff_shape // 2\n chunk_output_br_list = chunk_input_br_list - patch_diff_shape // 2 # may off pixels\n chunk_info_list = np.stack(\n [\n np.stack([chunk_input_tl_list, chunk_input_br_list], axis=1),\n np.stack([chunk_output_tl_list, chunk_output_br_list], axis=1),\n ],\n axis=1,\n )\n\n return chunk_info_list, patch_info_list", "def create_patches(self, image):\n images = tf.expand_dims(image, axis=0)\n patches = tf.extract_image_patches(\n images,\n ksizes=[1, self.patch_h, self.patch_w, 1],\n strides=[1, self.strides_rows, self.strides_cols, 1],\n rates=[1, 1, 1, 1],\n padding='VALID',\n name=None\n )\n patches = tf.reshape(\n patches,\n (self.n_rows * self.n_cols, self.patch_h,\n self.patch_w, self.col_channels))\n return patches", "def create_patches(data, patch_shape):\n\n imgs = []\n\n if data[0].shape[0] == test_size:\n step_length = (test_size - patch_shape[0]) // 2 # 176\n else:\n step_length = (training_size - patch_shape[0])\n\n for i in range(data.shape[0]):\n if len(patch_shape) == 3: # RGB images\n patches = patchify(data[i], patch_shape, step=step_length)\n patches = patches.reshape((-1, patch_shape[0], patch_shape[1], patch_shape[2]))\n imgs.extend(patches)\n else:\n patches = patchify(data[i], patch_shape, step=step_length)\n patches = patches.reshape((-1, patch_shape[0], patch_shape[1]))\n imgs.extend(patches)\n\n return np.asarray(imgs)", "def load_pixel_sparse(n_imgs=5, n_patches=100000, patch_x=4, patch_y=4):\n #n = np.random.randn(n_patches, patch_x*patch_y)\n #patches_unnorm = n**3\n #patches = patches_unnorm / np.std(patches_unnorm)\n patches = np.random.laplace(size=(n_patches, patch_x*patch_y))\n #patches = np.random.standard_cauchy(size=(n_patches, patch_x*patch_y))\n W_X = np.eye(patch_x*patch_y)\n # DEBUG why is this different from what's expected of load_van_hateren\n #return patches, W_X\n return patches", "def patches_to_image(patches, H, W, overlap=False):\n image = np.zeros((H, W))\n patch_size = int(np.sqrt(np.shape(patches)[0]))\n overlap_step = 1 if overlap else patch_size\n count = 0\n dev_mask = np.zeros_like(image)\n for i in np.arange(H - patch_size + 1, step=overlap_step):\n for j in np.arange(W - patch_size + 1, step=overlap_step):\n image[i : i + patch_size, j : j + patch_size] += np.reshape(\n patches[:, count], (patch_size, patch_size)\n )\n dev_mask[i : i + patch_size, j : j + patch_size] += 1\n count += 1\n if overlap:\n image = image / dev_mask\n return image", "def iter_patch_slices(\n image_size: Sequence[int],\n patch_size: Sequence[int] | int,\n start_pos: Sequence[int] = (),\n overlap: Sequence[float] | float = 0.0,\n padded: bool = True,\n) -> Generator[tuple[slice, ...], None, None]:\n\n # ensure patch_size has the right length\n patch_size_ = get_valid_patch_size(image_size, patch_size)\n\n # create slices based on start position of each patch\n for position in iter_patch_position(\n image_size=image_size, patch_size=patch_size_, start_pos=start_pos, overlap=overlap, padded=padded\n ):\n yield tuple(slice(s, s + p) for s, p in zip(position, patch_size_))", "def crop_image(X, patch_size=(8, 8)):\n (h, w) = np.shape(X)\n cropped_h = h - patch_size[0] + 1\n cropped_w = w - patch_size[1] + 1\n middle_linear_index = int(\n ((patch_size[0] / 2) * patch_size[1]) + (patch_size[1] / 2))\n columns = im2col(X, patch_size)\n return np.reshape(columns[middle_linear_index, :], [cropped_h, cropped_w])", "def get_identical_patches(imgs, patch_size):\n ih, iw = imgs[0].shape[:2]\n tp = patch_size\n ix = np.random.randint(0, iw - patch_size)\n iy = np.random.randint(0, ih - patch_size)\n imgs = []\n for i in range(len(imgs)):\n imgs.append(imgs[i][iy:iy + tp, ix:ix + tp, :])\n return imgs", "def sparse_patchify(file_name, patch_size, patch_num):\n img = imageio.imread(file_name)\n w, h, c = img.shape\n patches = np.zeros((patch_num, patch_size, patch_size, c), dtype=np.uint8)\n assert w == h\n patch_n_single = int(np.floor(np.sqrt(patch_num)))\n step = w//patch_n_single\n offset = (step - patch_size) // 2\n cnt = 0\n for x in range(0, w-patch_size, step):\n for y in range(0, h-patch_size, step):\n patches[cnt, :, :, :] = img[x+offset:x+offset+patch_size, y+offset:y+offset+patch_size]\n cnt += 1\n return patches", "def gather_patches(raw_data, grid_shape=None):\n num_examples = raw_data.shape[0]\n img_h = raw_data.shape[1]\n img_w = raw_data.shape[2]\n img_c = raw_data.shape[3]\n\n if grid_shape is None:\n grid_shape = get_grid_shape(num_examples)\n\n expected_examples = grid_shape[0] * grid_shape[1]\n padding_pattern = (((0, expected_examples\n - num_examples),)\n + ((0, 0),) * 3)\n padded_data = numpy.pad(\n raw_data,\n pad_width=padding_pattern,\n mode='constant',\n constant_values=0\n )\n\n image = padded_data.view().reshape((\n grid_shape[1], grid_shape[0] * img_h, img_w, img_c)\n ).transpose(\n (1, 0, 2, 3)\n ).reshape(\n (grid_shape[0] * img_h,\n grid_shape[1] * img_w,\n img_c)\n ).copy()\n\n image *= 0.5\n image += 0.5\n image *= 255\n\n return image", "def extract_pointwise_conv2d_patches(inputs,\n filter_shape,\n name=None,\n data_format=None):\n if inputs.shape.ndims != 4:\n raise ValueError(\"inputs must have 4 dims.\")\n if len(filter_shape) != 4:\n raise ValueError(\"filter_shape must have 4 dims.\")\n if filter_shape[0] != 1 or filter_shape[1] != 1:\n raise ValueError(\"filter_shape must have shape 1 along spatial dimensions.\")\n if not is_data_format_channel_last(data_format):\n raise ValueError(\"data_format must be channels last.\")\n with tf.name_scope(name, \"extract_pointwise_conv2d_patches\",\n [inputs, filter_shape]):\n ksizes = [1, 1, 1, 1] # Spatial shape is 1x1.\n strides = [1, 1, 1, 1] # Operate on all pixels.\n rates = [1, 1, 1, 1] # Dilation has no meaning with spatial shape = 1.\n padding = \"VALID\" # Doesn't matter.\n result = tf.extract_image_patches(inputs, ksizes, strides, rates, padding)\n\n batch_size, input_height, input_width, in_channels = inputs.shape.as_list()\n filter_height, filter_width, in_channels, _ = filter_shape\n return tf.reshape(result, [\n batch_size, input_height, input_width, filter_height, filter_width,\n in_channels\n ])", "def _patch_image(self, i, j):\n assert isinstance(i, int), (\"i is not an integer\")\n assert i >= 0, (\"i must be >= 0\")\n assert isinstance(j, int), (\"j is not an integer\")\n assert j >= 0, (\"j must be >= 0\")\n imin, imax = i - self.offset, i + self.offset + 1\n jmin, jmax = j - self.offset, j + self.offset + 1\n image = self.image[imin:imax, jmin:jmax, :]\n return image", "def iter_patch(\n arr: np.ndarray, patch_size, start_pos=(), copy_back: bool = True, mode: str = \"wrap\", **pad_opts,\n):\n # ensure patchSize and startPos are the right length\n patch_size = get_valid_patch_size(arr.shape, patch_size)\n start_pos = ensure_tuple_size(start_pos, arr.ndim)\n\n # pad image by maximum values needed to ensure patches are taken from inside an image\n arrpad = np.pad(arr, tuple((p, p) for p in patch_size), mode, **pad_opts)\n\n # choose a start position in the padded image\n start_pos_padded = tuple(s + p for s, p in zip(start_pos, patch_size))\n\n # choose a size to iterate over which is smaller than the actual padded image to prevent producing\n # patches which are only in the padded regions\n iter_size = tuple(s + p for s, p in zip(arr.shape, patch_size))\n\n for slices in iter_patch_slices(iter_size, patch_size, start_pos_padded):\n yield arrpad[slices]\n\n # copy back data from the padded image if required\n if copy_back:\n slices = tuple(slice(p, p + s) for p, s in zip(patch_size, arr.shape))\n arr[...] = arrpad[slices]", "def get_image_patches_by_sliding_window(img, stepSize, window_size, overlapping):\n # read the image and define the stepSize and window size\n # (width,height)\n if overlapping == 100:\n return None\n # generation step size for overlapping\n overlapping = 100 - overlapping\n stepSize = int(stepSize * (overlapping / 100))\n\n patches = []\n image = img # your image path\n tmp = image # for drawing a rectangle\n (w_width, w_height) = (window_size, window_size) # window size\n for x in range(0, image.shape[1] - w_width, stepSize):\n for y in range(0, image.shape[0] - w_height, stepSize):\n window = image[x:x + w_width, y:y + w_height, :]\n # add the window into your patches array.\n patches.append(window)\n\n return patches", "def get_patch_predictions(slide, model):\n dims = slide.dims\n ratio = slide.ratio\n imgArr = np.zeros((BATCH_THRESHOLD, WINDOW_SIZE[0], WINDOW_SIZE[1], 3))\n totalIndex = -1 # Total count of all patches\n imgIndex = 0 # Count of only image patches\n \n for iy, y in enumerate(np.arange(0, dims[-1][1], STEP_SIZE/ratio[1])):\n for ix, x in enumerate(np.arange(0, dims[-1][0], STEP_SIZE/ratio[0])):\n totalIndex += 1\n\n # If there are no cells in this row, add the index to the set of white patches. \n if iy not in slide.regionDict:\n slide.add_white_patch(totalIndex)\n continue\n\n # Else, check if the x values fall within the region boundaries.\n regions = slide.regionDict[iy]\n cont = np.any([(region[0] <= x < region[1]) for region in regions])\n if not cont: # If x doesn't fall within the boundaries, add index to white patches.\n slide.add_white_patch(totalIndex)\n continue\n \n # Extract the image patch, convert it to a numpy matrix, and normalize.\n image = slide.image.read_region((x*ratio[0] ,y*ratio[1]), 0, WINDOW_SIZE )\n imgMat = np.array(image.convert(mode=\"RGB\")) / 255. \n imgArr[imgIndex] = imgMat\n imgIndex += 1 \n\n # For every batch (specified by BATCH_THRESHOLD),\n if imgIndex%BATCH_THRESHOLD == 0:\n # Predict the classes for each image patch\n classes = model.predict(imgArr, batch_size=32, verbose=1)\n yield classes\n\n imgIndex = 0\n imgArr = np.zeros((BATCH_THRESHOLD, WINDOW_SIZE[0], WINDOW_SIZE[1], 3))\n\n if totalIndex%2500==0: slide.print_status()\n\n # Trim zeros and predict final batch if any.\n imgArr_trimmed = imgArr[:imgIndex] \n if len(imgArr_trimmed):\n classes = model.predict(imgArr_trimmed, batch_size=32, verbose=1)\n yield classes", "def sample_patches(images, npatches, patch_sz):\n\tnimages, nrows, ncols = images.shape\n\timg_index = np.random.randint(0, nimages, npatches)\n\trow_index = np.random.randint(0, nrows-patch_sz, npatches)\n\tcol_index = np.random.randint(0, ncols-patch_sz, npatches)\n\tpatches = np.empty((npatches, patch_sz, patch_sz))\n\tfor i, (img, row, col) in enumerate(zip(img_index, row_index, col_index)):\n\t\tpatches[i] = images[img, row:row+patch_sz, col:col+patch_sz]\n\treturn patches", "def img_preprocess_core(img_gray_orig):\n \n\timg_flat = img_gray_orig.reshape(img_gray_orig.shape[0] *\n\t\t\t\t\t\t\t\t\t\t img_gray_orig.shape[1])\n\t \n\tkmeans_labels = image_segmentain(img_flat)\n\n\tkmeans_labels_arr = kmeans_labels.reshape(img_gray_orig.shape[0],\n\t\t\t\t\t\t\t\t\t img_gray_orig.shape[1])\n\n\tjust_bone, mask_img = image_mask (kmeans_labels, img_gray_orig)\n\t \n\timg_clean_background = mask_img * img_gray_orig\n\n\timg_just_bone = img_clean_background[min(just_bone[0]):\n\t\t\t\t\tmax(just_bone[0]),min(just_bone[1]):\n\t\t\t\t\tmax(just_bone[1])]\n\t\n\treturn img_just_bone", "def reconstruct_avg(img, nnf, patch_size=5):\r\n\r\n final = np.zeros_like(img)\r\n for i in range(img.shape[0]):\r\n for j in range(img.shape[1]):\r\n\r\n dx0 = dy0 = patch_size // 2\r\n dx1 = dy1 = patch_size // 2 + 1\r\n dx0 = min(j, dx0)\r\n dx1 = min(img.shape[0] - j, dx1)\r\n dy0 = min(i, dy0)\r\n dy1 = min(img.shape[1] - i, dy1)\r\n\r\n patch = nnf[i - dy0:i + dy1, j - dx0:j + dx1]\r\n\r\n lookups = np.zeros(shape=(patch.shape[0], patch.shape[1], img.shape[2]), dtype=np.float32)\r\n\r\n for ay in range(patch.shape[0]):\r\n for ax in range(patch.shape[1]):\r\n x, y = patch[ay, ax]\r\n lookups[ay, ax] = img[y, x]\r\n\r\n if lookups.size > 0:\r\n value = np.mean(lookups, axis=(0, 1))\r\n final[i, j] = value\r\n\r\n return final", "def segment_and_find_positions(self):\n initial_image = self.data\n xdim = self.data.shape[0]\n\n ydim = self.data.shape[1]\n downsized_image = transform.resize(\n initial_image,\n (xdim / DOWNSCALING_FACTOR, ydim / DOWNSCALING_FACTOR),\n mode=\"constant\",\n )\n rescaled_image = exposure.rescale_intensity(downsized_image)\n print(\"Starting Canny filtering\")\n g_edges = skimage.feature.canny(\n rescaled_image,\n sigma=self.canny_sigma,\n low_threshold=self.canny_low_threshold,\n )\n print(\"Starting dilation\")\n dilation = morphology.dilation(g_edges, morphology.disk(3))\n print(\"Starting erosion\")\n eroded = morphology.erosion(dilation, morphology.disk(4))\n dilation = morphology.dilation(\n eroded, morphology.diamond(4)\n ) # Dont change to disk\n print(\"Starting to remove small holes\")\n filled = morphology.remove_small_holes(\n dilation, area_threshold=self.remove_small_holes_area_threshold\n )\n print(\"Starting erosion\")\n eroded = morphology.erosion(filled, morphology.diamond(3))\n print(\"Applying filters\")\n filtered_image = eroded\n if self.colony_filters_dict is not None:\n for filter_name in self.colony_filters_dict.keys():\n filtered_image = segmentation_filters.apply_filter(\n filter_name, filtered_image, self.colony_filters_dict[filter_name]\n )\n\n colony_edges = morphology.dilation(feature.canny(filtered_image, 0.01))\n print(\"Starting outlining\")\n outline = downsized_image.copy()\n outline[colony_edges] = 65535\n distance = ndimage.distance_transform_edt(filtered_image)\n smoothed_well = ndimage.gaussian_filter(downsized_image, 0.35)\n outline.copy()\n objs, num_objs = ndimage.label(filtered_image)\n print(\"Applying filters for points\")\n if self.mode == \"A\":\n # point selection: Smoothest point in the center region\n for obj in range(1, num_objs + 1):\n print(\"On object {} of {}\".format(obj, num_objs))\n mask = objs == obj\n dist_mask = distance * mask\n # for each colony,\n # find the maximum distance from the two fold distance map.\n # The edge is at 0% and the center of the colony is at 100%\n d_max = dist_mask.max()\n # Getting the points which is at least 40% away from the edge\n top_percent = dist_mask > (d_max * 0.40)\n colony_mask = smoothed_well * top_percent\n colony_edges = feature.canny(colony_mask, 0.1)\n # applying the second distance transform\n # to find the smoothest point in the correct region\n inner_edges = ndimage.distance_transform_edt(\n ~colony_edges * top_percent\n )\n smooth_point = numpy.where(inner_edges == inner_edges.max())\n smooth_point = (smooth_point[0][0], smooth_point[1][0])\n smooth_point_corrected = (\n smooth_point[0] * DOWNSCALING_FACTOR,\n smooth_point[1] * DOWNSCALING_FACTOR,\n )\n self._point_locations.append(smooth_point_corrected)\n elif self.mode == \"C\":\n for obj in range(1, num_objs + 1):\n print(\"On object {} of {}\".format(obj, num_objs))\n mask = objs == obj\n dist_mask = distance * mask\n # point selection: edge, ridge & center respectively\n self.get_mode_c_points(dist_mask, 0, 0.03)\n self.get_mode_c_points(dist_mask, 0.15, 0.20)\n self.get_mode_c_points(dist_mask, 0.90, 0.99)", "def find_components(image,deltaPix,lens_rad_arcsec = 6.0,lens_rad_ratio = None,\n center_x = None,center_y = None, gal_rad_ratio = 0.1,\n min_size_arcsec=0.7,thresh=0.5, many_sources = True,\n show_locations=False, title = None):\n\n # convert minimum component size in pixel units\n min_size = int(min_size_arcsec / deltaPix)\n \n #Convert lens radius and central galaxy radius to pixels\n if lens_rad_ratio == None:\n lens_rad = int(lens_rad_arcsec / deltaPix)\n else: lens_rad = int(len(image) * lens_rad_ratio)\n gal_rad = int(len(image) * gal_rad_ratio)\n \n \n# im2[im2 < im2.min() + 10.*thresh] = 0.\n \n # downscale source image to data resolution (for speed + easier for converting to data units)\n #down = image_util.re_size(image, factor=supersampling_factor_source)\n \n # apply laplacian of gaussian (LoG) filter to enhance maxima\n LoG = - gaussian_laplace(deepcopy(image), sigma = min_size, mode='constant', cval=0.) \n \n# LoG = - gaussian_laplace(deepcopy(im2), sigma = 2., mode='constant', cval=0.)\n \n filtered = deepcopy(LoG)\n \n# print(LoG.min(),LoG.max(),np.abs(LoG.min()) + thresh )\n \n# print(type(filtered))\n \n #background mean and std of filtered image \n corners = np.zeros([4,5,5])\n corners[0] = LoG[0:5,0:5]\n corners[1] = LoG[-5:,0:5]\n corners[2] = LoG[0:5,-5:]\n corners[3] = LoG[-5:,-5:]\n means = []\n stds = []\n for c in corners:\n mn,med,s = sigma_clipped_stats(c,sigma=3.0)\n means.append(mn)\n stds.append(s)\n \n stds=np.array(stds)\n means = np.array(means)\n means_std = np.std(means)\n# means_good = means[(means >= means.mean() - 1.0 * means_std) & (means <= means.mean() + 1.0 * means_std)]\n means_good = means[(np.abs(means) <= np.abs(means).min() + 1.0 * means_std)]\n mean_bg = np.mean(means_good)\n std_bg = np.mean(stds[(np.abs(means) <= np.abs(means).min() + 1.0 * means_std)])\n# print('LoG means: {}, Log means std: {}, Log means good: {}, LoG avg mean: {}'.format(means,means_std,means_good,mean_bg))\n# print('min: {}, max: {}, cut: {}'.format(LoG.min(),LoG.max(),mean_bg + thresh))\n# print(LoG.min(),LoG.max(),filtered.min() + thresh)\n \n \n # assume all value below max*threshold can not be maxima, so put all to zero\n# filtered[filtered < thresh*filtered.max()] = 0.\n \n# assume all value below min*threshold can not be maxima, so put all to zero\n# filtered[filtered < filtered.min() + thresh * np.abs(filtered.min())] = 0.\n# filtered[filtered < mean_bg + thresh] = 0.\n filtered[filtered < mean_bg + 6.*std_bg] = 0. #set pixels below the mean + 6x threshold to 0\n \n # find coordinates of local maxima\n #print(int(0.5 * min_size))\n max_idx_2d_small = peak_local_max(filtered, min_distance=0) #All bright pixels\n max_idx_2d_large = peak_local_max(filtered, min_distance=1) #peaks with min size of 1 pixel\n \n x_list_small, y_list_small = max_idx_2d_small[:, 1], max_idx_2d_small[:, 0]\n x_list_large, y_list_large = max_idx_2d_large[:, 1], max_idx_2d_large[:, 0]\n \n im_center_x, im_center_y = len(image) / 2., len(image) / 2. #center of image\n \n if (center_x == None) & (center_y == None):\n new_center_x, new_center_y = im_center_x,im_center_y\n else:\n new_center_x, new_center_y = center_x,center_y #new \"center\" = location of lens galaxy\n \n \n #distance of each detected peak from center\n R_small = np.sqrt((x_list_small - new_center_x)**2 + (y_list_small - new_center_y)**2) \n R_large = np.sqrt((x_list_large - new_center_x)**2 + (y_list_large - new_center_y)**2)\n \n #Contaminant light is only bright pixels further from center than lens_rad\n x_sats, y_sats = x_list_small[R_small > lens_rad], y_list_small[R_small > lens_rad]\n \n if many_sources:\n x_lens, y_lens = deepcopy(x_list_small), deepcopy(y_list_small)\n else:\n x_lens, y_lens = deepcopy(x_list_large), deepcopy(y_list_large)\n \n# x_lens, y_lens = x_list_small[R_small <= lens_rad], y_list_small[R_small <= lens_rad]\n \n if (len(x_lens) == 0) & (len(y_lens) == 0):\n x_lens = [0,15]\n y_lens = [0,15]\n \n sources = QTable([x_lens, y_lens],names={'x_local_peak','y_local_peak'}) #make table of all detected objects\n# print(x_list_large)\n# print(y_list_large)\n# print(sources)\n \n # show maxima on image for debug\n \n if show_locations:\n# fig = plt.figure(figsize=(4, 4))\n #plt.imshow(image, origin='lower', cmap=cmap_flux, norm=LogNorm(1e-2))\n \n f, axes = plt.subplots(1, 5, figsize=(20,5), sharex=False, sharey=False)\n# plt.figure(figsize = (8,8))\n# plt.subplot(1,2,1)\n \n axes[0].imshow(image, origin='lower', norm=SymLogNorm(5))\n axes[0].set_title('Image')\n axes[0].set_axis_off()\n \n \n axes[1].imshow(LoG, origin='lower', norm=SymLogNorm(5))\n axes[1].set_title('LoG Filtered Image')\n axes[1].set_axis_off()\n\n# plt.subplot(1,2,2)\n axes[2].imshow(filtered, origin='lower', norm=SymLogNorm(5))\n axes[2].set_title('Final Filtered Image')\n axes[2].set_axis_off()\n \n axes[3].imshow(image, origin='lower', norm=SymLogNorm(5))\n for i in range(len(x_lens)):\n axes[3].scatter([x_lens[i]], [y_lens[i]], c='red', s=60, marker='+')\n \n for i in range(len(x_list_large)):\n axes[3].scatter([x_list_large[i]], [y_list_large[i]], c='black', s=100, marker='x')\n axes[3].set_title('Detected Objects')\n axes[3].set_axis_off()\n \n axes[4].imshow(image, origin='lower', norm=SymLogNorm(5))\n \n for i in range(len(x_sats)):\n axes[4].scatter([x_sats[i]], [y_sats[i]], c='red', s=60, marker='+')\n \n# plt.annotate(i+1, (x_list[i], y_list[i]), color='black')\n \n# for i in range(len(x_mask)):\n# plt.scatter([x_mask[i]], [y_mask[i]], c='red', s=100, marker='*')\n# plt.annotate(i+1, (x_mask[i], y_mask[i]), color='red')\n axes[4].scatter(new_center_x, new_center_y,c='red', s=100, marker='*')\n \n draw_lens_circle = Circle((new_center_x, new_center_y),lens_rad ,fill=False)\n draw_gal_circle = Circle((new_center_x, new_center_y),gal_rad, fill = False)\n# plt.gcf().gca().add_artist(draw_lens_circle)\n# plt.gcf().gca().add_artist(draw_gal_circle)\n axes[4].add_patch(draw_lens_circle)\n# axes[4].add_patch(draw_gal_circle)\n \n axes[4].set_title('Pixels to Mask: \\n r = {:.3f}'.format(lens_rad_arcsec))\n axes[4].text(1, 1, \"detected components\", color='red')\n axes[4].set_axis_off()\n \n if title != None:\n f.suptitle(title, fontsize = 15)\n# plt.show()\n \n \n return (x_sats, y_sats), (new_center_x, new_center_y), sources", "def slice_patches(data, wanted_height: int, wanted_width: int):\n patches = []\n for _ in range(len(data)):\n current_height = data[_].shape[0]\n current_width = data[_].shape[1]\n\n # If patches fit image perfectly, no overflow handling required\n if PATCHES * wanted_height == current_height and PATCHES * wanted_width == current_width:\n fitting_patches_height = PATCHES\n step_size_height = wanted_height\n\n fitting_patches_width = PATCHES\n step_size_width = wanted_width\n\n for nmr_patch_height in range(fitting_patches_height):\n for nmr_patch_width in range(fitting_patches_width):\n patch = [\n data[_][i][nmr_patch_width * step_size_width:nmr_patch_width * step_size_width + wanted_width]\n for i in\n range(nmr_patch_height * step_size_height, nmr_patch_height * step_size_height + wanted_height)]\n patches.append(patch)\n\n # If patches don't fit along height, y-axis\n elif PATCHES * wanted_height > current_height and PATCHES * wanted_width == current_width:\n fitting_patches_height = PATCHES - 1 # Last patch may not fit with the same step size\n overflow_height = PATCHES * wanted_height - current_height\n overlap_height = overflow_height // fitting_patches_height\n step_size_height = wanted_height - overlap_height\n\n fitting_patches_width = PATCHES\n step_size_width = wanted_width\n\n for nmr_patch_height in range(fitting_patches_height):\n for nmr_patch_width in range(fitting_patches_width):\n patch = [\n data[_][i][nmr_patch_width * step_size_width:nmr_patch_width * step_size_width + wanted_width]\n for i in\n range(nmr_patch_height * step_size_height, nmr_patch_height * step_size_height + wanted_height)]\n patches.append(patch)\n\n # Patches, which may not fit with same step size may overlap more along y axis\n for nmr_patch_width in range(fitting_patches_width):\n patch = [\n data[_][i][nmr_patch_width * step_size_width:nmr_patch_width * step_size_width + wanted_width] for i\n in range(-wanted_height, 0)]\n patches.append(patch)\n\n # If patches don't fit along width, x axis\n elif PATCHES * wanted_height == current_height and PATCHES * wanted_width > current_width:\n fitting_patches_height = PATCHES\n step_size_height = wanted_height\n\n fitting_patches_width = PATCHES - 1\n overflow_width = PATCHES * wanted_width - current_width\n overlap_width = overflow_width // fitting_patches_width\n step_size_width = wanted_width - overlap_width\n\n for nmr_patch_height in range(fitting_patches_height):\n for nmr_patch_width in range(fitting_patches_width):\n patch = [\n data[_][i][nmr_patch_width * step_size_width:nmr_patch_width * step_size_width + wanted_width]\n for i in\n range(nmr_patch_height * step_size_height, nmr_patch_height * step_size_height + wanted_height)]\n patches.append(patch)\n\n # Patch which may not fit with same step size, overlaps more along x axis\n patch = [data[_][i][-wanted_width:] for i in\n range(nmr_patch_height * step_size_height,\n nmr_patch_height * step_size_height + wanted_height)]\n patches.append(patch)\n\n # If patches don't fit along neither height nor width\n elif PATCHES * wanted_height > current_height and PATCHES * wanted_width > current_width:\n fitting_patches_height = PATCHES - 1 # Last patch may not fit with the same step size\n overflow_height = PATCHES * wanted_height - current_height\n overlap_height = overflow_height // fitting_patches_height\n step_size_height = wanted_height - overlap_height\n\n fitting_patches_width = PATCHES - 1\n overflow_width = PATCHES * wanted_width - current_width\n overlap_width = overflow_width // fitting_patches_width\n step_size_width = wanted_width - overlap_width\n\n for nmr_patch_height in range(fitting_patches_height):\n for nmr_patch_width in range(fitting_patches_width):\n patch = [\n data[_][i][nmr_patch_width * step_size_width:nmr_patch_width * step_size_width + wanted_width]\n for i in\n range(nmr_patch_height * step_size_height, nmr_patch_height * step_size_height + wanted_height)]\n patches.append(patch)\n\n # Patch which may not fit with same step size, overlaps more\n patch = [data[_][i][-wanted_width:] for i in\n range(nmr_patch_height * step_size_height,\n nmr_patch_height * step_size_height + wanted_height)]\n patches.append(patch)\n\n for nmr_patch_width in range(fitting_patches_width):\n patch = [\n data[_][i][nmr_patch_width * step_size_width:nmr_patch_width * step_size_width + wanted_width] for i\n in range(-wanted_height, 0)]\n patches.append(patch)\n\n patch = [data[_][i][-wanted_width:] for i in\n range(-wanted_height, 0)] # Last patch which may not fit neither height nor width\n patches.append(patch)\n\n return np.array(patches)", "def reconstruct_image(patch_list, patch_nb=2):\n line_list = []\n for i in range(0, patch_nb ** 2 - 1, patch_nb):\n line_list.append(cv2.hconcat(patch_list[i : i + patch_nb]))\n final_img = cv2.vconcat(line_list)\n return final_img", "def _produce_individual_star_masks(self, dilationWidth=4):\n # TODO: REWRITE THIS METHOD USING THE ASTROPY SEGMENTATION METHODS???\n # Yes, I THINK so...\n\n # Grab binning\n binX, binY = self.imageList[0].binning\n\n # Compute kernel shape\n medianKernShape = (np.int(np.ceil(9.0/binX)), np.int(np.ceil(9.0/binY)))\n\n # Grab the number of images (for user updates)\n numImg = self.numberOfImages\n\n # Construct a blank array to populate with masks\n starMasks = np.zeros(self.shape, dtype=int)\n\n # Loop through the images and compute individual star masks\n for imgNum, img in enumerate(self.imageList):\n print('Building star mask for image {0:g} of {1:g}'.format(imgNum + 1, numImg), end='\\r')\n # Grab the image array\n thisData = img.data.copy()\n\n # Replace bad values with zeros\n badInds = np.where(np.logical_not(np.isfinite(thisData)))\n thisData[badInds] = -1e6\n\n # Filter the image\n medImg = ndimage.median_filter(thisData, size = medianKernShape)\n\n # get stddev of image background\n mean, median, stddev = img.sigma_clipped_stats()\n\n # Look for deviates from the filter (positive values only)\n # starMask1 = np.logical_and(np.abs(thisData - medImg) > 2.0*stddev,\n # thisData > 0)\n starMask1 = (np.abs(thisData - medImg) > 2.0*stddev)\n\n # Use the scipy ndimage opening and closing to clean the mask\n starMask1 = ndimage.binary_opening(starMask1)\n starMask1 = ndimage.binary_closing(starMask1)\n\n # Clean out some edge effects.\n starMask1[:, -4:-1] = 0\n\n #\n # NOTE: This doesn't work when there are nebulae and galaxies in the image!\n #\n # starMask1 = make_source_mask(\n # thisData,\n # snr=2,\n # npixels=5,\n # dilate_size=11,\n # mask_value=-1e6\n # )\n\n # Try using guassian kernel convolution instead\n from astropy.convolution import convolve, convolve_fft, Gaussian2DKernel\n\n # Initalize a dilatingKernel\n gaussian_2D_kernel = Gaussian2DKernel(10.0)\n\n # Normalize the kernel\n gaussian_2D_kernel.normalize()\n\n # If the dialation kernel is larger than 10 pixels, then use FFT\n # convolution.\n starMask11 = convolve_fft(\n starMask1.astype(float),\n gaussian_2D_kernel\n )\n\n # Mask any pixels with values greater than 0.04 (which seems to\n # produce a reasonable result.)\n peakValue = 1/(200*np.pi)\n maskThreshold = 10 * peakValue * np.exp(-0.5*((dilationWidth+0.5)/10.0)**2)\n\n starMask1 = (starMask11 > maskThreshold).astype(np.int8)\n\n # TODO: delete this code if convolution works out\n #\n # # Finally, liberally EXPAND the mask with four dilations\n # starMask1 = ndimage.binary_dilation(\n # starMask1,\n # iterations=starMaskIters\n # ).astype(np.int8)\n\n # TODO: delete this code once I verify everything is working\n #\n # # Count the number of masked neighbors for each pixel\n # neighborCount = np.zeros(thisData.shape, dtype=int)\n # for dx in range(-1,2,1):\n # for dy in range(-1,2,1):\n # neighborCount += np.roll(np.roll(starMask1, dy, axis=0),\n # dx, axis=1).astype(np.int8)\n #\n # # Find pixels with more than two masked neighbor (including self)\n # # starMask1 = np.logical_and(starMask1, neighborCount > 2)\n # starMask1 = (neighborCount > 2).astype(np.int8)\n\n # Place the final mask into its respective slice of the 3D array\n starMasks[imgNum, :, :] = starMask1\n\n # Print a newline character to preserve star mask updates\n print('')\n\n # Once ALL of the star masks have been computed, return them to the user\n return starMasks", "def wsi_patch_splitting(wsi_path, patch_dir, patch_size=299, save_size=299,\n wsi_ext=\"tiff\", save_ext=\"png\",\n pyramid_flag=True, overlap_flag=True, level=0):\n\n if pyramid_flag == False:\n try:\n img = io.imread(wsi_path)\n if img.dtype == \"uint16\":\n img = (img / 256.0).astype(np.uint8)\n elif img.dtype == \"uint8\":\n pass\n else:\n raise Exception(\"Unknow imge data type\")\n except:\n print(\"Cannot handle {}\".format(wsi_path))\n else:\n wsi_header = openslide.OpenSlide(wsi_path)\n img = wsi_header.read_region(location=(0, 0), level=level,\n size=wsi_header.level_dimensions[level])\n img = np.asarray(img)[:,:,:-1]\n\n coors_arr = wsi_coor_splitting(wsi_h=img.shape[0], wsi_w=img.shape[1],\n length=patch_size, overlap_flag=overlap_flag)\n filename = os.path.splitext(os.path.basename(wsi_path))[0]\n for coor in coors_arr:\n h_start, w_start = coor[0], coor[1]\n cur_patch = img[h_start:h_start+patch_size, w_start:w_start+patch_size, :]\n if patch_size != save_size:\n save_patch = transform.resize(cur_patch, (save_size, save_size))\n save_patch = (save_patch * 255.0).astype(np.uint8)\n else:\n save_patch = cur_patch\n\n patch_name = \"{}_{}.{}\".format(filename, str(uuid.uuid4())[:8], save_ext)\n patch_filepath = os.path.join(patch_dir, patch_name)\n io.imsave(patch_filepath, save_patch)", "def get_patch_from_image(cls):\n path, _, x1, y1, x2, y2, height, width = get_candidate_row_from_df(cls)\n img = cv2.imread(path)\n patch = img[y1: y2, x1: x2]\n return patch", "def find_patch0(self):\n orig_image = central_area_crop(self.outp1, crop_size=(128, 192, 160))\n array_shape = np.array(orig_image.shape) # (128, 192, 160)\n patch_shape = np.array([self.patch_size] * 3) # (128)\n space = np.array([16] * 2, dtype=np.uint8) # (8)\n patch_idx_limit = (array_shape[1:] - patch_shape[1:]) // space # (4, 2)\n # construct an array, then np.argmax()\n patches_array = np.zeros(patch_idx_limit)\n for patch_idx_y in range(patch_idx_limit[0]):\n for patch_idx_x in range(patch_idx_limit[1]):\n patch_idx = np.array([patch_idx_y, patch_idx_x])\n patch_start = space * patch_idx\n patch_end = space * patch_idx + np.array(patch_shape[1:])\n cropped_array = orig_image[:, patch_start[0]:patch_end[0], patch_start[1]:patch_end[1]]\n num_tumor_voxel = (cropped_array > 0).sum()\n\n patches_array[patch_idx_y, patch_idx_x] = num_tumor_voxel\n argsmax = np.argwhere(patches_array == patches_array.max())\n patch_idx = argsmax[np.random.randint(len(argsmax))]\n # best_patch_idx = np.unravel_index(patches_array.argmax(), patches_array.shape)\n\n # convert in coords in the whole image\n orig_shape = np.array([155, 240, 240])\n cur_shape = np.array([128, 192, 160])\n coord_diffs = (orig_shape - cur_shape) // 2\n patch0_START_pt = np.array((0, ) + tuple(patch_idx * space)) + coord_diffs\n return patch0_START_pt", "def iter_patch_position(\n image_size: Sequence[int],\n patch_size: Sequence[int] | int | np.ndarray,\n start_pos: Sequence[int] = (),\n overlap: Sequence[float] | float | Sequence[int] | int = 0.0,\n padded: bool = False,\n):\n\n # ensure patchSize and startPos are the right length\n ndim = len(image_size)\n patch_size_ = get_valid_patch_size(image_size, patch_size)\n start_pos = ensure_tuple_size(start_pos, ndim)\n overlap = ensure_tuple_rep(overlap, ndim)\n\n # calculate steps, which depends on the amount of overlap\n if isinstance(overlap[0], float):\n steps = tuple(round(p * (1.0 - o)) for p, o in zip(patch_size_, overlap))\n else:\n steps = tuple(p - o for p, o in zip(patch_size_, overlap))\n\n # calculate the last starting location (depending on the padding)\n end_pos = image_size if padded else tuple(s - round(p) + 1 for s, p in zip(image_size, patch_size_))\n\n # collect the ranges to step over each dimension\n ranges = starmap(range, zip(start_pos, end_pos, steps))\n\n # choose patches by applying product to the ranges\n return product(*ranges)", "def generate_patches(scaled_imgs, constants, all_patches):\n patch_size = constants.PATCH_SIZE\n step = 1 if all_patches else 2\n patches = []\n for k, sc in enumerate(scaled_imgs):\n img_patches = []\n for i in range(0, sc.shape[0] - patch_size, step):\n for j in range(0, sc.shape[1] - patch_size, step):\n raw_patch = sc[i:i + patch_size, j:j + patch_size, :]\n patch = Patch(\n raw_patch=raw_patch,\n patch_size=patch_size,\n )\n patch.store(sc, [i, j])\n img_patches.append(patch)\n patches.append(img_patches)\n return patches", "def _get_slices(\n self,\n stride: int,\n patch_size: Tuple[int, int],\n img_size: Tuple[int, int],\n pad: int = None,\n ) -> Tuple[Dict[str, slice], int, int]:\n y_end, x_end = patch_size\n nrows, pady = self._get_margins(y_end, img_size[0], stride, pad=pad)\n ncols, padx = self._get_margins(x_end, img_size[1], stride, pad=pad)\n\n xyslices = {}\n for row in range(nrows):\n for col in range(ncols):\n y_start = row * stride\n y_end = y_start + patch_size[0]\n x_start = col * stride\n x_end = x_start + patch_size[1]\n xyslices[f\"y-{y_start}_x-{x_start}\"] = (\n slice(y_start, y_end),\n slice(x_start, x_end),\n )\n\n return xyslices, pady, padx", "def _occlude_image(im, cR, cC, size_patch, stride):\n im[cR:cR + stride, cC:cC + stride, :] = 127.5\n occ_map = np.ones((im_target_size, im_target_size))\n occ_map[cR:cR + stride, cC:cC + stride] = 0\n return im, occ_map", "def get_downsampled_patch_advanced(points, h, w, patch=[.50, 1.0, .10, 0.9], ds=[10, 10]):\n t0 = time.time()\n ys = int(patch[0] * h)\n ye = int(patch[1] * h)\n xs = int(patch[2] * w)\n xe = int(patch[3] * w)\n pc_image = points.reshape((h, w, 3))\n\n v_spacing = ds[0]\n if isinstance(v_spacing, list):\n # advanced row spacing, greater row gaps the closer to the camera (bottom of image)\n h_actual = ye - ys\n partition = h_actual // len(v_spacing) # number of partitions\n top = np.arange(0, partition, v_spacing[0])\n mid = np.arange(top[-1] + v_spacing[1], int(2 * partition), v_spacing[1])\n bottom = np.arange(mid[-1] + v_spacing[2], h_actual, v_spacing[2])\n row_indices = np.concatenate((top, mid, bottom)) + ys\n else:\n row_indices = np.arange(ys, ye, v_spacing)\n\n patch = pc_image[row_indices, xs:xe:ds[1]]\n patch = patch.reshape(patch.size // 3, 3)\n patch = filter_zero(patch)\n return patch", "def combine_patches_to_image(patches, target_height, target_width):\n\n counter = 0\n patch_size = patches.shape[1]\n coordinates = _get_top_left_coordinates(target_height, target_width, patch_size)\n\n if len(patches.shape) == 3: # channel dimension is missing\n patches = np.expand_dims(patches, -1)\n\n # The last channel is the number of overlapping patches for a given pixel,\n # used for averaging predictions from multiple windows.\n combined = np.zeros((target_height, target_width, patches.shape[-1] + 1))\n\n for top, left in coordinates:\n patch = combined[top:top + patch_size, left:left + patch_size, :-1]\n overlaps = combined[top:top + patch_size, left:left + patch_size, -1:]\n patch = (patch * overlaps + patches[counter]) / (overlaps + 1)\n combined[top:top + patch_size, left:left + patch_size, :-1] = patch\n overlaps += 1.\n counter += 1\n\n return np.squeeze(combined[..., :-1])", "def dilationPatches(rawPatches, smallPatchThr=5, borderWidth=1): # pixel width of the border after dilation\r\n\r\n # get patch borders\r\n total_area = sm.convex_hull_image(rawPatches)\r\n patchBorder = np.multiply(-1 * (rawPatches - 1), total_area)\r\n\r\n # thinning patch borders\r\n patchBorder = sm.skeletonize(patchBorder)\r\n\r\n # thicking patch borders\r\n if borderWidth > 1:\r\n patchBorder = ni.binary_dilation(patchBorder, iterations=borderWidth - 1).astype(np.int)\r\n\r\n # genertating new patches\r\n newPatches = np.multiply(-1 * (patchBorder - 1), total_area)\r\n\r\n # removing small edges\r\n labeledPatches, patchNum = ni.label(newPatches)\r\n\r\n for i in range(1, patchNum + 1):\r\n currPatch = np.array(labeledPatches)\r\n currPatch[currPatch != i] = 0\r\n currPatch = currPatch / i\r\n\r\n if (np.sum(np.multiply(currPatch, rawPatches)[:]) == 0) or (np.sum(currPatch[:]) < smallPatchThr):\r\n # revCurrPatch = -1 * (currPatch - 1)\r\n # newPatches = np.multiply(newPatches, revCurrPatch)\r\n newPatches[currPatch == 1] = 0\r\n\r\n else:\r\n currPatch = ni.binary_closing(currPatch,\r\n structure=np.ones((borderWidth + 2, borderWidth + 2))).astype(np.int)\r\n newPatches[currPatch == 1] = 1\r\n\r\n return newPatches", "def patches(self, patch_centers, patch_size):\n assert patch_centers.shape[0] > patch_centers.shape[1]\n # x-axis corresponds to the columns of the image\n # y-axis corresponds to the rows of the image\n padding_x = int(patch_size[1]/2)\n padding_y = int(patch_size[0]/2)\n\n min_x = patch_centers[:, 0] - padding_x\n max_x = patch_centers[:, 0] + padding_x + patch_size[1] % 2\n min_y = patch_centers[:, 1] - padding_y\n max_y = patch_centers[:, 1] + padding_y + patch_size[0] % 2\n\n # Save some space by creating local copies with single letter names\n h, w = self.height, self.width\n # Get the patch_centers that are inside the image boundaries\n patches_inside_boundaries = np.logical_and(\n np.logical_and(min_x >= 0, min_y >= 0),\n np.logical_and(max_x <= w, max_y <= h)\n )\n\n # If a single patch is outside the boundaries return None to avoid\n # useless computations\n if ~np.all(patches_inside_boundaries):\n return None\n\n # Initialize the patch with 0.0\n N = patch_centers.shape[0]\n patch_shape = (N,) + patch_size + self._image.shape[2:]\n patches = np.ones(patch_shape, dtype=np.float32)\n\n idxs = np.arange(N)\n for pi in idxs[patches_inside_boundaries]:\n patches[pi] = self._image[min_y[pi]:max_y[pi], min_x[pi]:max_x[pi]]\n\n return patches", "def extract_patches_2d(\n tensor: tf.Tensor,\n kernel_size: Sequence[int],\n shifts: Sequence[int] = (0, 0),\n) -> tf.Tensor:\n if len(tensor.shape) == 2:\n added_batch = True\n tensor = tensor[tf.newaxis, ...]\n else:\n added_batch = False\n\n if len(tensor.shape) != 3:\n raise ValueError('tensor has wrong number of dimensions: {}'.format(tensor))\n\n paddings = paddings_for_conv2d(kernel_size, shifts)[:-1]\n padded = pad_periodic(tensor, paddings)\n\n size_x, size_y = kernel_size\n extracted = tf.extract_image_patches(padded[..., tf.newaxis],\n [1, size_x, size_y, 1],\n strides=[1, 1, 1, 1],\n rates=[1, 1, 1, 1],\n padding='VALID')\n\n if added_batch:\n result = tf.squeeze(extracted, axis=0)\n else:\n result = extracted\n\n return result", "def blending_example1():\n pic_desert = read_image(relpath(\"./externals/pic_desert.jpg\"), 2)\n pic_pool = read_image(relpath(\"./externals/pic_swim.jpg\"), 2)\n mask = read_image(relpath(\"./externals/mask_desert.jpg\"), 1)\n # making the mask binary (normalizing 2 original values)\n mask = strech_helper(mask).astype(np.bool)\n print(pic_desert.shape[2])\n [R1, G1, B1] = np.dsplit(pic_desert, pic_desert.shape[2])\n [R2, G2, B2] = np.dsplit(pic_pool, pic_pool.shape[2])\n R1 = np.reshape(R1, (512,1024))\n R2 = np.reshape(R2, (512,1024))\n G1 = np.reshape(G1, (512,1024))\n G2 = np.reshape(G2, (512,1024))\n B1 = np.reshape(B1, (512,1024))\n B2 = np.reshape(B2, (512,1024))\n\n blend1 = pyramid_blending(R2, R1, mask, 3, 3, 3)\n blend2 = pyramid_blending(G2, G1, mask, 3, 3, 3)\n blend3 = pyramid_blending(B2, B1, mask, 3, 3, 3)\n\n blend1 = np.reshape(blend1, (blend1.shape[0], blend1.shape[1], 1))\n blend2 = np.reshape(blend2, (blend2.shape[0], blend3.shape[1], 1))\n blend3 = np.reshape(blend3, (blend3.shape[0], blend3.shape[1], 1))\n\n new_pic = np.concatenate((blend1, blend2, blend3), axis=2)\n # plotting the images\n fig = plt.figure()\n ax1 = fig.add_subplot(221)\n ax2 = fig.add_subplot(222)\n ax3 = fig.add_subplot(223)\n ax4 = fig.add_subplot(224)\n ax1.imshow(pic_desert)\n ax2.imshow(pic_pool)\n ax3.imshow(mask, cmap='gray')\n ax4.imshow(new_pic)\n plt.show()\n\n return pic_desert, pic_pool, mask, new_pic", "def imageRGB_as_strided(img, kernel_size=224, stride=32):\n for ch in range(3):\n channel = img[:,:,ch]\n new_channel, x0, y0 = image2d_as_strided(channel, kernel_size=kernel_size, stride=stride)\n if ch == 0:\n new_img = np.zeros(new_channel.shape + (3,), dtype=np.uint8)\n new_img[:, :, :, :, 0] = new_channel\n else:\n new_img[:, :, :, :, ch] = new_channel\n\n return new_img, x0, y0", "def patches_to_image(patch_centers: np.ndarray,\n patch_lt: lt.LabeledTensor,\n name: Optional[str] = None) -> lt.LabeledTensor:\n with tf.compat.v1.name_scope(name, \"patches_to_image\", [patch_lt]) as scope:\n patch_lt = lt.transpose(patch_lt, [\"batch\", \"row\", \"column\", \"channel\"])\n\n num_extracted_rows = len(set([l[0] for l in patch_centers]))\n num_extracted_columns = len(set([l[1] for l in patch_centers]))\n assert num_extracted_rows * num_extracted_columns == patch_centers.shape[0]\n\n batch_size = len(patch_lt.axes[\"batch\"])\n assert batch_size % len(patch_centers) == 0\n output_batch_size = batch_size // len(patch_centers)\n\n # TODO(ericmc): This will fail if the stride is not homogeneous.\n if patch_centers.shape[0] == 1:\n stride = 0\n else:\n [row_0, column_0] = patch_centers[0]\n [row_1, column_1] = patch_centers[1]\n if row_0 == row_1:\n stride = column_1 - column_0\n else:\n stride = row_1 - row_0\n assert stride > 0\n assert abs(round(stride) - stride) < 0.0001\n stride = int(round(stride))\n\n patch_lt = lt.reshape(patch_lt, [\"batch\"],\n [(\"batch\", output_batch_size),\n (\"patch_row\", num_extracted_rows),\n (\"patch_column\", num_extracted_columns)])\n tf.compat.v1.logging.info(\"%r\", patch_lt.axes)\n\n row_lts = []\n for r in range(num_extracted_rows):\n this_row = []\n for c in range(num_extracted_columns):\n this_row.append(patch_lt[:, r, c, :, :, :])\n row_lts.append(lt.concat(this_row, \"column\"))\n stitched = lt.concat(row_lts, \"row\")\n\n return lt.identity(stitched, name=scope)", "def patch(self, patch_center, patch_size, expand_patch=True):\n # x-axis corresponds to the columns of the image\n # y-axis corresponds to the rows of the image\n padding_x = int(patch_size[1]/2)\n padding_y = int(patch_size[0]/2)\n\n min_x = patch_center[0, 0] - padding_x\n max_x = patch_center[0, 0] + padding_x + patch_size[1] % 2\n min_y = patch_center[1, 0] - padding_y\n max_y = patch_center[1, 0] + padding_y + patch_size[0] % 2\n\n # Initialize the patch with 0.0\n patch = np.zeros(patch_size + self._image.shape[2:], dtype=np.float32)\n\n # Save some space by creating local copies with single letter names\n h, w = self.height, self.width\n\n # If the patch is inside the image boundaries return it as it is\n if min_x >= 0 and min_y >= 0 and max_x <= w and max_y <= h:\n patch[:, :] = self._image[min_y:max_y, min_x:max_x]\n\n # otherwise copy part (or nothing) from the image into the empty patch\n elif expand_patch:\n p_min_x = min(w, max(0, min_x))\n p_max_x = max(0, min(w, max_x))\n p_min_y = min(h, max(0, min_y))\n p_max_y = max(0, min(h, max_y))\n\n s_min_x = min(patch_size[1], max(0, 0 - min_x))\n s_max_x = max(0, min(patch_size[1], patch_size[1] + w - max_x))\n s_min_y = min(patch_size[0], max(0, 0 - min_y))\n s_max_y = max(0, min(patch_size[0], patch_size[0] + h - max_y))\n\n patch[s_min_y:s_max_y, s_min_x:s_max_x] = \\\n self._image[p_min_y:p_max_y, p_min_x:p_max_x]\n else:\n patch.fill(-1.)\n\n return patch", "def extractImage(self, data, offset=0.0, subpixel=False):\n offset = self.imageOffset + offset * self.sampleRate / self.downsample\n intOffset = int(np.floor(offset))\n fracOffset = offset - intOffset\n\n shape = self.imageShape\n stride = self.imageStride\n\n if subpixel and fracOffset != 0:\n print(fracOffset)\n interp = data[:-1] * (1.0 - fracOffset) + data[1:] * fracOffset\n image = pg.subArray(interp, intOffset, shape, stride) \n else:\n image = pg.subArray(data, intOffset, shape, stride)\n\n if self.bidirectional:\n image = image.copy()\n image[:, 1::2] = image[:, 1::2, ::-1]\n\n return image", "def get_test_pattern(img_size=(2048, 2048)):\n ny, nx = img_size\n # mask = np.zeros((ny, nx))\n\n # patterns with variable spacing\n periods = range(2, 20, 2)\n # vcounter = 0\n for ii, p in enumerate(periods):\n cell = np.zeros((p, nx))\n on_pix = int(np.ceil(p / 2))\n cell[:on_pix, :] = 1\n cell = np.tile(cell, [4, 1])\n\n if ii == 0:\n mask = cell\n else:\n mask = np.concatenate((mask, cell), axis=0)\n\n mask = mask[:, :mask.shape[0]]\n\n mask_block = np.concatenate((mask, np.rot90(mask)), axis=1)\n mask_block2 = np.concatenate((np.rot90(mask), mask), axis=1)\n\n mask_superblock = np.concatenate((mask_block, mask_block2))\n\n ny_reps = int(np.ceil(ny / mask_superblock.shape[0]))\n nx_reps = int(np.ceil(nx / mask_superblock.shape[1]))\n mask = np.tile(mask_superblock, [ny_reps, nx_reps])\n mask = mask[0:ny, 0:nx]\n\n return mask", "def extract_microfossils(grayscale_image, min_microfossil_pixel_size, crop_dims, remove_side_particles):\n # Blurring the image helps with getting a more consistent binary image\n blurred_image = cv2.bilateralFilter(grayscale_image, d=0, sigmaColor=40, sigmaSpace=2)\n binary_image = get_binary_image(blurred_image)\n marked = find_connected_components(binary_image)\n coords, all_coords = get_image_objects(marked, min_microfossil_pixel_size)\n M, N = grayscale_image.shape\n\n # Computing the void intensity around the connected components\n average_void_intensity = compute_average_void_intensity(grayscale_image, marked, all_coords)\n\n # Getting the crops\n filtered_crops, unfiltered_crops = [], []\n for cc_id in coords:\n obj_row, obj_col = coords[cc_id]\n from_x = int(obj_col - crop_dims[1] / 2)\n from_y = int(obj_row - crop_dims[0] / 2)\n valid_y = from_y >= 0 and from_y + crop_dims[0] < M\n valid_x = from_x >= 0 and from_x + crop_dims[1] < N\n if valid_x and valid_y:\n crop_img = grayscale_image[from_y:from_y+crop_dims[0], from_x:from_x+crop_dims[1]]\n unfiltered_crops.append(crop_img)\n if remove_side_particles:\n crop_cc = marked[from_y:from_y+crop_dims[0], from_x:from_x+crop_dims[1]]\n filtered_crop = remove_side_objects(crop_img, crop_cc, cc_id, average_void_intensity)\n filtered_crops.append(filtered_crop)\n\n return unfiltered_crops, filtered_crops", "def add_gt_pixels(self, original_blob, blob, patch_center, patch_size):\n # Case 1: crop boundaries is intersecting with data\n nonzero_idx = np.array(np.where(blob['data'][0, ..., 0] > 0.0)).T # N x 3\n border_idx = nonzero_idx[np.any(np.logical_or(nonzero_idx == 0, nonzero_idx == self.cfg.IMAGE_SIZE - 1), axis=1)]\n\n # Case 2: crop is partially outside of original data (thus padded)\n # if patch_center is within patch_size of boundaries of original blob\n # boundary intesecting with data\n padded_idx = nonzero_idx[np.any(np.logical_or(nonzero_idx + patch_center - patch_size / 2.0 >= self.cfg.IMAGE_SIZE - 2, nonzero_idx + patch_center - patch_size / 2.0 <= 1), axis=1)]\n # dbscan on all found voxels from case 1 and 2\n coords = np.concatenate([border_idx, padded_idx], axis=0)\n artificial_gt_pixels = []\n if coords.shape[0]:\n db = DBSCAN(eps=10, min_samples=3).fit_predict(coords)\n for v in np.unique(db):\n cluster = coords[db == v]\n artificial_gt_pixels.append(cluster[np.argmax(blob['data'][0, ..., 0][cluster.T[0], cluster.T[1], cluster.T[2]]), :])\n\n artificial_gt_pixels = np.concatenate([artificial_gt_pixels, np.ones((len(artificial_gt_pixels), 1))], axis=1)\n\n return np.array(artificial_gt_pixels)", "def getPatches(slide, mask, numPatches=0, dims=(0,0), dirPath='', slideNum='', plot=False, plotMask=False):\n # extractPatchByXMLLabeling \n w,h = dims \n levelDims = slide.level_dimensions\n Xratio, Yratio = calculateRatio(levelDims)\n\n i = 0\n while i < numPatches:\n firstLoop = True # Boolean to ensure while loop runs at least once. \n\n while firstLoop: # or not mask[rr,cc].all(): # True if it is the first loop or if all pixels are in the mask \n firstLoop = False\n x, y = chooseRandPixel(mask) # Get random top left pixel of patch. \n xVertices = np.array([x, x+(w/Xratio), x+(w/Xratio), x, x])\n yVertices = np.array([y, y, y-(h/Yratio), y-(h/Yratio), y])\n rr, cc = polygon(xVertices, yVertices)\n\n image = slide.read_region((x*Xratio, y*Yratio), 0, (w,h))\n \n isWhite = checkWhiteSlide(image)\n newPath = 'other' if isWhite else dirPath\n if not isWhite: i += 1\n\n slideName = '_'.join([slideNum, 'x'.join([str(x*Xratio),str(y*Yratio)])])\n image.save(os.path.join(newPath, slideName+\".png\"))\n\n if plot: \n plotImage(image)\n if plotMask: mask[rr,cc] = 0\n\n if plotMask:\n plotImage(mask)", "def img_crop(im, w, h):\n list_patches = []\n imgwidth = im.shape[0]\n imgheight = im.shape[1]\n is_2d = len(im.shape) < 3\n for i in range(0,imgheight,h):\n for j in range(0,imgwidth,w):\n if is_2d:\n im_patch = im[j:j+w, i:i+h]\n else:\n im_patch = im[j:j+w, i:i+h, :]\n list_patches.append(im_patch)\n return list_patches", "def extract_convolution_patches(inputs,\n filter_shape,\n padding,\n strides=None,\n dilation_rate=None,\n name=None,\n data_format=None):\n if not is_data_format_channel_last(data_format):\n raise ValueError(\"Channel must be last dimension.\")\n with tf.name_scope(name, \"extract_convolution_patches\",\n [inputs, filter_shape, padding, strides, dilation_rate]):\n batch_size = inputs.shape.as_list()[0]\n in_channels = inputs.shape.as_list()[-1]\n\n # filter_shape = spatial_filter_shape + [in_channels, out_channels]\n spatial_filter_shape = filter_shape[:-2]\n if in_channels != filter_shape[-2]:\n raise ValueError(\"inputs and filter_shape must agree on in_channels.\")\n\n # Map each input feature to a location in the output.\n out_channels = np.prod(spatial_filter_shape) * in_channels\n filters = tf.eye(out_channels, dtype=inputs.dtype)\n filters = tf.reshape(\n filters,\n list(spatial_filter_shape) + [in_channels, out_channels])\n\n if strides is not None and len(strides) == len(inputs.shape):\n strides = strides[1:-1] # remove batch and channel dimension\n\n if dilation_rate is not None and len(dilation_rate) == len(inputs.shape):\n dilation_rate = dilation_rate[1:-1] # remove batch and channel dimension\n\n result = tf.nn.convolution(\n inputs,\n filters,\n padding=padding,\n strides=strides,\n dilation_rate=dilation_rate)\n spatial_output_shape = result.shape.as_list()[1:-1]\n result = tf.reshape(result, [batch_size or -1] + spatial_output_shape +\n list(spatial_filter_shape) + [in_channels])\n\n return result", "def get_downsampled_patch(points, h, w, patch=[.75, 1.0, .10, 0.9], ds=[10, 10]):\n t0 = time.time()\n pc_image = points.reshape((h, w, 3))\n ys = int(patch[0] * h)\n ye = int(patch[1] * h)\n xs = int(patch[2] * w)\n xe = int(patch[3] * w)\n patch = pc_image[ys:ye:ds[0], xs:xe:ds[1]]\n patch = patch.reshape(patch.size // 3, 3)\n patch = filter_zero(patch)\n\n # print(f\"Downampled Patch: {(time.time() - t0) * 1000:.1f} ms\")\n\n return patch", "def insert_patch_subpixel(img, patch, p):\n ths = patch.shape[0] / 2\n xpmin = p[0] - ths\n ypmin = p[1] - ths\n Ho = np.array([[1, 0, xpmin],\n [0, 1, ypmin],\n [0, 0, 1]], dtype=float)\n\n w = img.shape[0]\n h = img.shape[1]\n img2 = cv2.warpPerspective(patch, Ho, (h, w), dst=img,\n flags=cv2.INTER_LINEAR,\n borderMode=cv2.BORDER_TRANSPARENT)\n return img2", "def create_all_mask(mask, num, stride):\n scale_factor = 1.0 / stride\n small_mask = cv2.resize(mask, (0, 0), fx=scale_factor, fy=scale_factor, interpolation=cv2.INTER_CUBIC)\n small_mask = small_mask[:, :, np.newaxis]\n return np.repeat(small_mask, num, axis=2)", "def mask_to_objects_2d(mask, background=0, offset=None):\n if mask.ndim != 2:\n raise ValueError(\"Cannot handle image with ndim different from 2 ({} dim. given).\".format(mask.ndim))\n if offset is None:\n offset = (0, 0)\n # opencv only supports contour extraction for binary masks: clean mask and binarize\n mask_cpy = np.zeros(mask.shape, dtype=np.uint8)\n mask_cpy[mask != background] = 255\n # create artificial separation between adjacent touching each other + clean\n contours = dilation(mask, square(3)) - mask\n mask_cpy[np.logical_and(contours > 0, mask > 0)] = background\n mask_cpy = clean_mask(mask_cpy, background=background)\n # extract polygons and labels\n polygons = _locate(mask_cpy, offset=offset)\n objects = list()\n for polygon in polygons:\n # loop for handling multipart geometries\n for curr in flatten_geoms(polygon.geoms) if hasattr(polygon, \"geoms\") else [polygon]:\n x, y = get_polygon_inner_point(curr)\n objects.append(AnnotationSlice(polygon=curr, label=mask[y - offset[1], x - offset[0]]))\n return objects", "def _find_masks(batch, min_size=10):\n result = []\n for b in batch:\n assert b.shape[0] == 1\n patch = b[0]\n z_sum = patch.sum(axis=(1, 2))\n coords = np.where(z_sum > min_size)[0]\n if len(coords) > 0:\n ind = coords[len(coords) // 2]\n result.append(b[:, ind:ind + 1, ...])\n else:\n ind = b.shape[1] // 2\n result.append(b[:, ind:ind + 1, ...])\n\n return np.stack(result, axis=0)", "def flood_fill_edges(img, stride):\n\n black = 0\n white = 255\n (rows, cols) = img.shape\n msk = np.zeros((rows+2, cols+2, 1), np.uint8)\n\n # Left and right edges\n i = 0\n while i < rows:\n if img[i, 0] == white:\n cv2.floodFill(img, msk, (0, i), black)\n if img[i, cols-1] == white:\n cv2.floodFill(img, msk, (cols-1, i), black)\n i += stride\n\n # Top and bottom edges\n i = 0\n while i < cols:\n if img[0, i] == white:\n cv2.floodFill(img, msk, (i, 0), black)\n if img[rows-1, i] == white:\n cv2.floodFill(img, msk, (i, rows-1), black)\n i += stride", "def extract_patches_tumor(self, bounding_boxes):\n mag_factor = pow(2, self.level_used)\n\n print('No. of ROIs to extract patches from: %d' % len(bounding_boxes))\n\n for i, bounding_box in enumerate(bounding_boxes):\n b_x_start = int(bounding_box[0]) * mag_factor\n b_y_start = int(bounding_box[1]) * mag_factor\n b_x_end = (int(bounding_box[0]) + int(bounding_box[2])) * mag_factor\n b_y_end = (int(bounding_box[1]) + int(bounding_box[3])) * mag_factor\n# X = np.random.random_integers(b_x_start, high=b_x_end, size=500)\n# Y = np.random.random_integers(b_y_start, high=b_y_end, size=500)\n # X = np.arange(b_x_start, b_x_end-256, 5)\n # Y = np.arange(b_y_start, b_y_end-256, 5)\n\n for x in range(b_x_start,b_x_end,PATCH_SIZE):\n for y in range(b_y_start,b_y_end,PATCH_SIZE):\n patch = self.wsi_image.read_region((x, y), 0, (PATCH_SIZE, PATCH_SIZE))\n mask = self.mask_image.read_region((x, y), 0, (PATCH_SIZE, PATCH_SIZE))\n mask_gt = np.array(mask)\n # mask_gt = cv2.cvtColor(mask_gt, cv2.COLOR_BGR2GRAY)\n mask_gt = cv2.cvtColor(mask_gt, cv2.COLOR_BGR2GRAY)\n patch_array = np.array(patch)\n \n white_pixel_cnt_gt = cv2.countNonZero(mask_gt)\n \n if white_pixel_cnt_gt == 0: # mask_gt does not contain tumor area\n patch_hsv = cv2.cvtColor(patch_array, cv2.COLOR_BGR2HSV)\n lower_red = np.array([20, 20, 20])\n upper_red = np.array([200, 200, 200])\n mask_patch = cv2.inRange(patch_hsv, lower_red, upper_red)\n white_pixel_cnt = cv2.countNonZero(mask_patch)\n \n if white_pixel_cnt > ((PATCH_SIZE * PATCH_SIZE) * 0.50):\n # mask = Image.fromarray(mask)\n patch.save(PROCESSED_PATCHES_TUMOR_NEGATIVE_PATH + PATCH_NORMAL_PREFIX+'_'+str(x)+'_'+str(y)+'.jpg', 'JPEG')\n # mask.save(PROCESSED_PATCHES_NORMAL_PATH + PATCH_NORMAL_PREFIX + str(self.patch_index),\n # 'PNG')\n self.negative_patch_index += 1\n else: # mask_gt contains tumor area\n if white_pixel_cnt_gt >= ((PATCH_SIZE * PATCH_SIZE) * 0.85):\n patch.save(PROCESSED_PATCHES_POSITIVE_PATH + PATCH_TUMOR_PREFIX +'_'+str(x)+'_'+str(y)+'.jpg', 'JPEG')\n self.positive_patch_index += 1\n \n patch.close()\n mask.close()", "def get_patches(points, h, w, patches=[[450, 100, 10, 10], [450, 500, 10, 10]], min_valid_percent=0.75):\n pc_image = points.reshape((h, w, 3))\n pc_patches = []\n for patch in patches:\n possible_patch = pc_image[patch[0]:patch[0] + patch[2], patch[1]:patch[1] + patch[3]]\n possible_patch = possible_patch.reshape(possible_patch.size // 3, 3)\n possible_patch = filter_zero(possible_patch)\n if possible_patch.shape[0] > min_valid_percent * (patch[2] * patch[3]):\n pc_patches.append(possible_patch)\n return pc_patches", "def get_output_from_patches_with_mean(patches_list, output_shape):\n\n if output_shape[0] == training_size:\n nb_matrix_by_row = 2\n step_length = (training_size - img_patch_size)\n\n else:\n nb_matrix_by_row = 3\n step_length = (test_size - img_patch_size) // 2\n\n nb_elem_by_patch = nb_matrix_by_row ** 2\n images = []\n for number in range(patches_list.shape[0] // nb_elem_by_patch):\n reconstructed_images = np.zeros(output_shape)\n nb_elem_images = np.zeros(output_shape)\n for i in range(nb_matrix_by_row):\n for j in range(nb_matrix_by_row):\n reconstructed_images[i * step_length: i * step_length + img_patch_size,\n j * step_length: j * step_length + img_patch_size] += patches_list[\n number * nb_elem_by_patch + i * nb_matrix_by_row + j]\n nb_elem_images[i * step_length: i * step_length + img_patch_size,\n j * step_length: j * step_length + img_patch_size] += 1\n reconstructed_images = np.divide(reconstructed_images, nb_elem_images)\n images.extend([reconstructed_images])\n\n return images", "def filterBankPatch(img, width=5):\n half = width / 2 # e.g. for 5, it's 2\n imgE = Views.extendBorder(img)\n ops = [offset(imgE, [x, y]) for x in xrange(-half, half + 1) for y in xrange(-half, half + 1)]\n return ops", "def reconstruct_from_patches(patches, indices, array_shape, default_value=0, average=True):\n if not _is_sequence_of_integers(array_shape):\n raise TypeError(\"'array_shape' should be a sequence of integer values\")\n\n array = np.ones(array_shape) * default_value\n counter = np.zeros(array_shape, dtype=np.uint32)\n\n patch_start = np.zeros_like(array_shape)\n patch_stop = np.zeros_like(array_shape)\n\n array_start = np.zeros_like(array_shape)\n array_stop = np.zeros_like(array_shape)\n\n array_shape = np.array(array_shape)\n for patch, index in zip(patches, indices):\n start = np.array(index)\n stop = np.array([i + s for i, s in zip(start, patch.shape)])\n\n # Skip the patch located outside the array\n if np.any(stop < 0) or np.any(start > array_shape):\n continue\n \n # Crop the patch if it is partly outside the array\n patch_start.fill(0)\n patch_stop[:] = patch.shape\n patch_start[start < 0] = np.abs(start[start < 0])\n patch_stop[stop > array_shape] -= (stop - array_shape)[stop > array_shape]\n\n array_start.fill(0)\n array_stop[:] = array_shape\n array_start[start > 0] = start[start > 0]\n array_stop[stop < array_shape] = stop[stop < array_shape]\n\n # Make ROIs\n patch_roi = tuple(slice(b, e) for b, e in zip(patch_start, patch_stop))\n array_roi = tuple(slice(b, e) for b, e in zip(array_start, array_stop))\n\n # Update array\n if average:\n array[array_roi] = array[array_roi] * (counter[array_roi] > 0) + patch[patch_roi]\n counter[array_roi] += 1\n else:\n array[array_roi] = patch[patch_roi]\n \n # Average values if required\n if average:\n array = array / counter\n\n return array", "def patchGenerator(gen, patch_size=128, patch_batch_size=1):\n \n for imgs, masks in gen: # For each batch\n img_list = []\n mask_list = []\n for i in range(0, imgs.shape[0]): # For each image in a batch\n patch_x = patchify(imgs[i], (patch_size, patch_size, imgs[i].shape[-1]), step=patch_size) # split image into 4*4 small 128*128 patches.\n img_p = patch_x.reshape(-1, *patch_x.shape[-3:])\n img_list.append(img_p)\n\n mask_y = patchify(masks[i], (patch_size, patch_size, 1), step=patch_size) # split mask into 4*4 small 128*128 patches.\n mask_p = mask_y.reshape(-1, *mask_y.shape[-3:])\n mask_list.append(mask_p)\n \n if (patch_batch_size == 1):\n for j in range(0, img_p.shape[0]): # For each patch in a image\n yield img_p[j][np.newaxis, :], mask_p[j][np.newaxis, :]\n \n if (patch_batch_size > 1):\n image_patches = np.concatenate(img_list)\n mask_patches = np.concatenate(mask_list)\n patch_batch_counter = 0\n for idx in range(0, patch_batch_size):\n image_patch_batch = image_patches[patch_batch_counter:patch_batch_counter + patch_batch_size]\n mask_patch_batch = mask_patches[patch_batch_counter:patch_batch_counter + patch_batch_size]\n shuffled_images, shuffled_masks = randomize(image_patch_batch, mask_patch_batch)\n yield shuffled_images, shuffled_masks", "def subtractor(img, dilsize: int = 15, blursize: int = 59, kernelshape: str = \"ellipse\",\n bluriter: int = 1, fix_blursize: bool = False, blurfilter: str = \"Gaussian\",\n textdilation: bool = True, contrast: bool = False, verbose: bool = False):\n rgb_planes = cv2.split(img)\n result_planes = []\n\n # Only odd blurkernelsize are valid\n blursize = blursize + 1 if blursize % 2 == 0 else blursize\n\n for idx, plane in enumerate(rgb_planes[:3]):\n dilated_img = plane\n kshape = {\"rect\": cv2.MORPH_RECT, \"ellipse\": cv2.MORPH_ELLIPSE, \"cross\": cv2.MORPH_CROSS}.get(kernelshape,\n cv2.MORPH_ELLIPSE)\n # Reduce influence of the text by dilation (round kernel produce atm the best results)\n if textdilation:\n dil_kernel = cv2.getStructuringElement(kshape, (int(dilsize / 2), dilsize))\n dilated_img = cv2.dilate(plane, dil_kernel, iterations=3)\n dil_kernel = cv2.getStructuringElement(kshape, (int(dilsize / 2) + 1, dilsize + 1))\n dilated_img = cv2.erode(dilated_img, dil_kernel, iterations=1)\n else:\n dil_kernel = cv2.getStructuringElement(kshape, (dilsize, dilsize))\n dilated_img = cv2.dilate(dilated_img, dil_kernel)\n\n bg_img = dilated_img\n for ksize in np.linspace(blursize, 1, num=bluriter):\n if not fix_blursize:\n if blurfilter == \"Gaussian\":\n bg_img = cv2.GaussianBlur(bg_img,\n (int(ksize) + (1 + int(ksize) % 2), int(ksize) + (1 + int(ksize) % 2)), 0)\n else:\n bg_img = cv2.medianBlur(bg_img, (int(ksize) + (1 + int(ksize) % 2)))\n else:\n if blurfilter == \"Gaussian\":\n bg_img = cv2.GaussianBlur(bg_img, (blursize, blursize), 0)\n else:\n bg_img = cv2.medianBlur(bg_img, blursize)\n\n if verbose:\n cv2.imwrite(f\"Filtered_{idx}.jpg\", bg_img)\n cv2.imwrite(f\"Dilate_{idx}.jpg\", dilated_img)\n\n # Subtract bg from fg\n diff_img = 255 - cv2.absdiff(plane, bg_img)\n norm_img = cv2.normalize(diff_img, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)\n\n # Increases the contrast\n if contrast:\n diff_img = cv2.add(norm_img, plane * contrast, dtype=cv2.CV_8U)\n # Normalize the final image to the range 0-255\n norm_img = cv2.normalize(diff_img, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)\n\n result_planes.append(norm_img)\n\n return cv2.merge(result_planes)" ]
[ "0.69615763", "0.6910621", "0.6902872", "0.6756077", "0.6684931", "0.6669656", "0.66367173", "0.66181797", "0.6613878", "0.6597331", "0.65837264", "0.65633184", "0.65623057", "0.652115", "0.6456871", "0.6398756", "0.6369198", "0.6359843", "0.63577175", "0.6351932", "0.6345157", "0.63389385", "0.6328715", "0.6301238", "0.628669", "0.6277554", "0.6260962", "0.62547845", "0.6247685", "0.62453574", "0.6242009", "0.6223413", "0.62233883", "0.6149656", "0.60847616", "0.6069595", "0.60562515", "0.60279626", "0.5985102", "0.5957666", "0.5944085", "0.591692", "0.59158343", "0.5890937", "0.5873911", "0.58720315", "0.5868223", "0.58375", "0.58359796", "0.5832957", "0.5817977", "0.58150464", "0.58127636", "0.58016616", "0.5795392", "0.5785702", "0.5778222", "0.5742143", "0.57234436", "0.5702023", "0.5697698", "0.5684467", "0.5683492", "0.5683478", "0.56826943", "0.56691784", "0.56665224", "0.5664737", "0.5627607", "0.56275886", "0.56103134", "0.55949503", "0.5581512", "0.5581025", "0.5561894", "0.5553899", "0.5548419", "0.55261856", "0.5518491", "0.55171245", "0.5516242", "0.5513092", "0.55076545", "0.5501328", "0.5474285", "0.54680246", "0.544683", "0.54399425", "0.5427696", "0.5418534", "0.5415955", "0.54102635", "0.53960985", "0.53894913", "0.5381706", "0.53793114", "0.5374157", "0.53642094", "0.5356354", "0.5350908" ]
0.7973168
0
Rebuild an image from a set of patches by averaging The reconstructed image will have different dimensions than the original image if the strides and offsets of the patches were changed from the defaults!
def reconstruct_from_grayscale_patches( patches, origin, epsilon=1e-12 ): patch_width = patches.shape[2] patch_height = patches.shape[1] img_width = np.max( origin[1] ) + patch_width img_height = np.max( origin[0] ) + patch_height out = np.zeros( (img_height,img_width) ) wgt = np.zeros( (img_height,img_width) ) for i in range(patch_height): for j in range(patch_width): out[origin[0]+i,origin[1]+j] += patches[:,i,j] wgt[origin[0]+i,origin[1]+j] += 1.0 return out/np.maximum( wgt, epsilon ), wgt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reconstruct_avg(img, nnf, patch_size=5):\r\n\r\n final = np.zeros_like(img)\r\n for i in range(img.shape[0]):\r\n for j in range(img.shape[1]):\r\n\r\n dx0 = dy0 = patch_size // 2\r\n dx1 = dy1 = patch_size // 2 + 1\r\n dx0 = min(j, dx0)\r\n dx1 = min(img.shape[0] - j, dx1)\r\n dy0 = min(i, dy0)\r\n dy1 = min(img.shape[1] - i, dy1)\r\n\r\n patch = nnf[i - dy0:i + dy1, j - dx0:j + dx1]\r\n\r\n lookups = np.zeros(shape=(patch.shape[0], patch.shape[1], img.shape[2]), dtype=np.float32)\r\n\r\n for ay in range(patch.shape[0]):\r\n for ax in range(patch.shape[1]):\r\n x, y = patch[ay, ax]\r\n lookups[ay, ax] = img[y, x]\r\n\r\n if lookups.size > 0:\r\n value = np.mean(lookups, axis=(0, 1))\r\n final[i, j] = value\r\n\r\n return final", "def recreate_from_patches(data):\n overlap_height = (PATCHES * PATCH_HEIGHT - IMG_HEIGHT) // (PATCHES - 1) # Overlap of patches along y axis\n step_size_height = PATCH_HEIGHT - overlap_height # Step size along y axis\n\n overlap_width = (PATCHES * PATCH_WIDTH - IMG_WIDTH) // (PATCHES - 1) # Overlap of patches along x axis\n step_size_width = PATCH_WIDTH - overlap_width # Step size along x axis\n\n whole_images = []\n i = 0\n while i < len(data):\n image = np.zeros((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS)) # Create an empty image to pin patches on\n\n for h in range(PATCHES - 1):\n for w in range(PATCHES - 1):\n # Insert patches into image starting from top left corner, without the patches touching right or bottom border\n if h > 0: # First row has no overlap with patches above them\n if overlap_height > 0:\n # Create array of overlap along y axis with mean values from overlapping patches\n mean_overlap_height = cv2.addWeighted(data[i][:overlap_height], 0.5,\n data[i - PATCHES][step_size_height:], 0.5, 0)\n\n # Insert into patch where it overlaps\n rest = data[i][overlap_height:]\n data[i] = np.append(mean_overlap_height, rest, axis=0)\n\n # Insert patch into image\n image = insert_patch_subpixel(image, data[i], (w * step_size_width + PATCH_WIDTH / 2,\n h * step_size_height + PATCH_HEIGHT / 2))\n\n if w == PATCHES - 2: # If we are at the second to last patch, overlap may be calculated different\n i += 1\n continue\n\n else:\n i += 1\n if overlap_width > 0:\n # Create array of overlap with mean values from overlapping patches\n mean_overlap_width = cv2.addWeighted(data[i][:, [i for i in range(0, overlap_width)]], 0.5,\n data[i - 1][:,\n [i for i in range(PATCH_WIDTH - overlap_width,\n PATCH_WIDTH)]], 0.5, 0)\n # Insert into next patch\n rest = data[i][:, [i for i in range(overlap_width, PATCH_WIDTH)]]\n data[i] = np.append(mean_overlap_width, rest, axis=1)\n\n # Insert patch which touches right border on this height, may overlap more\n overlap_last_width = (PATCH_WIDTH + (PATCHES - 2) * step_size_width) - (IMG_WIDTH - PATCH_WIDTH)\n\n if overlap_last_width > 0:\n # Create array of overlap with mean values from overlapping patches\n mean_overlap_width = cv2.addWeighted(data[i][:, [i for i in range(0, overlap_last_width)]], 0.5,\n data[i - 1][:, [i for i in range(PATCH_WIDTH - overlap_last_width,\n PATCH_WIDTH)]], 0.5, 0)\n # Insert array of overlap into patch, where it overlaps\n rest = data[i][:, [i for i in range(overlap_last_width, PATCH_WIDTH)]]\n data[i] = np.append(mean_overlap_width, rest, axis=1)\n\n # Insert patch into image\n image = insert_patch_subpixel(image, data[i], (IMG_WIDTH - PATCH_WIDTH / 2,\n h * step_size_height + PATCH_HEIGHT / 2))\n i += 1\n\n for w in range(PATCHES - 1):\n # Insert patches from the bottom border, may overlap more\n overlap_last_height = (PATCH_HEIGHT + (PATCHES - 2) * step_size_height) - (IMG_HEIGHT - PATCH_HEIGHT)\n\n if overlap_last_height > 0:\n # Create array of overlap with mean values from overlapping patches\n mean_overlap_height = cv2.addWeighted(data[i][:overlap_last_height], 0.5,\n data[i - PATCHES][PATCH_HEIGHT - overlap_last_height:], 0.5, 0)\n\n # Insert array of overlap into patch where it overlaps\n rest = data[i][overlap_last_height:]\n data[i] = np.append(mean_overlap_height, rest, axis=0)\n\n # Insert patch into image\n image = insert_patch_subpixel(image, data[i], (w * step_size_width + PATCH_WIDTH / 2,\n IMG_HEIGHT - PATCH_HEIGHT / 2))\n i += 1\n\n # Insert patch in the bottom right corner, may overlap more\n overlap_last_width = (PATCH_WIDTH + (PATCHES - 2) * step_size_width) - (IMG_WIDTH - PATCH_WIDTH)\n\n if overlap_last_width > 0:\n # Create array of overlap along x axis with mean values form overlapping patches\n mean_overlap_width = cv2.addWeighted(data[i][:, [i for i in range(0, overlap_last_width)]], 0.5,\n data[i - 1][:, [i for i in range(PATCH_WIDTH - overlap_last_width,\n PATCH_WIDTH)]], 0.5, 0)\n\n # Insert array of overlap into patch\n rest = data[i][:, [i for i in range(overlap_last_width, PATCH_WIDTH)]]\n data[i] = np.append(mean_overlap_width, rest, axis=1)\n\n overlap_last_height = (PATCH_HEIGHT + (PATCHES - 2) * step_size_height) - (IMG_HEIGHT - PATCH_HEIGHT)\n\n if overlap_last_height > 0:\n # Create array of overlap along y axis with mean values from overlapping patches\n mean_overlap_height = cv2.addWeighted(data[i][:overlap_last_height], 0.5,\n data[i - PATCHES][PATCH_HEIGHT - overlap_last_height:], 0.5, 0)\n\n # Insert array of overlap into patch where it overlaps\n rest = data[i][overlap_last_height:]\n data[i] = np.append(mean_overlap_height, rest, axis=0)\n\n image = insert_patch_subpixel(image, data[i], (IMG_WIDTH - PATCH_WIDTH / 2, IMG_HEIGHT - PATCH_HEIGHT / 2))\n i += 1\n whole_images.append(\n image) # All corresponding patches are pinned inside the image, therefore this image is finished\n\n return whole_images", "def patches_to_img(patches, stride, img_shape):\r\n if len(img_shape) > 2:\r\n channels = [patches_to_img(patches[:, :, :, i], stride, img_shape[:2]) for i in range(3)]\r\n return np.concatenate(channels, axis=2)\r\n\r\n h, w = img_shape\r\n patch_size = patches.shape[1]\r\n n_stride = (h - patch_size) // stride + 1\r\n\r\n assert h == w, \"only squared image are accepted\"\r\n assert (h - patch_size) % stride == 0, \"The stride must be adapted on image and patch size\"\r\n assert len(patches) == n_stride ** 2, \"They must be the right number of patches per image\"\r\n\r\n pred_final = np.zeros(img_shape + (1,)) # Accumulator for the final prediction\r\n pred_normalizer = np.zeros(img_shape + (1,)) # Counter of the patch per prediction per pixel\r\n\r\n for i in range(n_stride):\r\n for j in range(n_stride):\r\n x_from, x_to = i * stride, i * stride + patch_size\r\n y_from, y_to = j * stride, j * stride + patch_size\r\n idx = i * n_stride + j\r\n pred_final[x_from: x_to, y_from: y_to] += patches[idx].reshape(patch_size, patch_size, 1)\r\n pred_normalizer[x_from: x_to, y_from: y_to] += 1\r\n return pred_final / pred_normalizer", "def reconstruct_from_patches(patches, indices, array_shape, default_value=0, average=True):\n if not _is_sequence_of_integers(array_shape):\n raise TypeError(\"'array_shape' should be a sequence of integer values\")\n\n array = np.ones(array_shape) * default_value\n counter = np.zeros(array_shape, dtype=np.uint32)\n\n patch_start = np.zeros_like(array_shape)\n patch_stop = np.zeros_like(array_shape)\n\n array_start = np.zeros_like(array_shape)\n array_stop = np.zeros_like(array_shape)\n\n array_shape = np.array(array_shape)\n for patch, index in zip(patches, indices):\n start = np.array(index)\n stop = np.array([i + s for i, s in zip(start, patch.shape)])\n\n # Skip the patch located outside the array\n if np.any(stop < 0) or np.any(start > array_shape):\n continue\n \n # Crop the patch if it is partly outside the array\n patch_start.fill(0)\n patch_stop[:] = patch.shape\n patch_start[start < 0] = np.abs(start[start < 0])\n patch_stop[stop > array_shape] -= (stop - array_shape)[stop > array_shape]\n\n array_start.fill(0)\n array_stop[:] = array_shape\n array_start[start > 0] = start[start > 0]\n array_stop[stop < array_shape] = stop[stop < array_shape]\n\n # Make ROIs\n patch_roi = tuple(slice(b, e) for b, e in zip(patch_start, patch_stop))\n array_roi = tuple(slice(b, e) for b, e in zip(array_start, array_stop))\n\n # Update array\n if average:\n array[array_roi] = array[array_roi] * (counter[array_roi] > 0) + patch[patch_roi]\n counter[array_roi] += 1\n else:\n array[array_roi] = patch[patch_roi]\n \n # Average values if required\n if average:\n array = array / counter\n\n return array", "def get_output_from_patches_with_mean(patches_list, output_shape):\n\n if output_shape[0] == training_size:\n nb_matrix_by_row = 2\n step_length = (training_size - img_patch_size)\n\n else:\n nb_matrix_by_row = 3\n step_length = (test_size - img_patch_size) // 2\n\n nb_elem_by_patch = nb_matrix_by_row ** 2\n images = []\n for number in range(patches_list.shape[0] // nb_elem_by_patch):\n reconstructed_images = np.zeros(output_shape)\n nb_elem_images = np.zeros(output_shape)\n for i in range(nb_matrix_by_row):\n for j in range(nb_matrix_by_row):\n reconstructed_images[i * step_length: i * step_length + img_patch_size,\n j * step_length: j * step_length + img_patch_size] += patches_list[\n number * nb_elem_by_patch + i * nb_matrix_by_row + j]\n nb_elem_images[i * step_length: i * step_length + img_patch_size,\n j * step_length: j * step_length + img_patch_size] += 1\n reconstructed_images = np.divide(reconstructed_images, nb_elem_images)\n images.extend([reconstructed_images])\n\n return images", "def patches_to_image(patches, H, W, overlap=False):\n image = np.zeros((H, W))\n patch_size = int(np.sqrt(np.shape(patches)[0]))\n overlap_step = 1 if overlap else patch_size\n count = 0\n dev_mask = np.zeros_like(image)\n for i in np.arange(H - patch_size + 1, step=overlap_step):\n for j in np.arange(W - patch_size + 1, step=overlap_step):\n image[i : i + patch_size, j : j + patch_size] += np.reshape(\n patches[:, count], (patch_size, patch_size)\n )\n dev_mask[i : i + patch_size, j : j + patch_size] += 1\n count += 1\n if overlap:\n image = image / dev_mask\n return image", "def get_patches(rimage, gimage, mimage, num_patches=48, patch_size=80, patch_stride=80):\n num_FSpatches = 16\n num_RApatches = 32\n rpatches = []\n gpatches = []\n mpatches = []\n #R_imgs = ((rimage+1)*127.5).astype(np.uint8)\n #scipy.misc.imsave('results'+'/' + 'rainy.jpg', R_imgs[0,:,:,:])\n for i in range(int(math.sqrt(num_FSpatches))):\n for j in range(int(math.sqrt(num_FSpatches))):\n point_x = patch_stride*i\n point_y = patch_stride*j\n rpatch = rimage[:,(point_x):(point_x+patch_size), (point_y):(point_y+patch_size),:]\n #print(point_x)\n #print(point_y)\n #print(point_y+patch_size)\n #P_imgs = ((rpatch+1)*127.5).astype(np.uint8)\n #scipy.misc.imsave('results'+'/' + 'patch_%d_%d.jpg'%(i,j), P_imgs[0,:,:,:])\n #print(rpatch.shape)\n rpatches.append(rpatch)\n #print(np.array(rpatches).shape)\n gpatch = gimage[:,(point_x):(point_x+patch_size), (point_y):(point_y+patch_size),:]\n gpatches.append(gpatch)\n mpatch = mimage[:,(point_x):(point_x+patch_size), (point_y):(point_y+patch_size),:]\n mpatches.append(mpatch)\n\n for k in range(num_RApatches):\n point1 = random.randint(0,240) # 116 comes from the image source size (320) - the patch dimension (80)\n point2 = random.randint(0,240)\n #rpatch = tf.image.crop_to_bounding_box(rimage, point1, point2, patch_size, patch_size)\n rpatch = rimage[:,(point1):(point1+patch_size), (point2):(point2+patch_size),:]\n #P_imgs = ((rpatch+1)*127.5).astype(np.uint8)\n #scipy.misc.imsave('results'+'/' + 'patch_%d.jpg'%i, P_imgs[0,:,:,:])\n #print(rpatch.shape)\n rpatches.append(rpatch)\n #print(np.array(rpatches).shape)\n gpatch = gimage[:,(point1):(point1+patch_size), (point2):(point2+patch_size),:]\n gpatches.append(gpatch)\n mpatch = mimage[:,(point1):(point1+patch_size), (point2):(point2+patch_size),:]\n mpatches.append(mpatch)\n\n rpatches = np.array(rpatches)\n rpatches = np.squeeze(rpatches)\n #print(rpatches.shape)\n gpatches = np.array(gpatches)\n gpatches = np.squeeze(gpatches)\n mpatches = np.array(mpatches)\n mpatches = np.squeeze(mpatches)\n #assert rpatches.get_shape().dims == [num_patches, patch_size, patch_size, 3]\n assert rpatches.shape == (num_patches, patch_size, patch_size, 3)\n return rpatches, gpatches, mpatches", "def combine_patches_to_image(patches, target_height, target_width):\n\n counter = 0\n patch_size = patches.shape[1]\n coordinates = _get_top_left_coordinates(target_height, target_width, patch_size)\n\n if len(patches.shape) == 3: # channel dimension is missing\n patches = np.expand_dims(patches, -1)\n\n # The last channel is the number of overlapping patches for a given pixel,\n # used for averaging predictions from multiple windows.\n combined = np.zeros((target_height, target_width, patches.shape[-1] + 1))\n\n for top, left in coordinates:\n patch = combined[top:top + patch_size, left:left + patch_size, :-1]\n overlaps = combined[top:top + patch_size, left:left + patch_size, -1:]\n patch = (patch * overlaps + patches[counter]) / (overlaps + 1)\n combined[top:top + patch_size, left:left + patch_size, :-1] = patch\n overlaps += 1.\n counter += 1\n\n return np.squeeze(combined[..., :-1])", "def combine_patches_to_image(y_pred, img, stride):\n\n counter = 0\n height, width = img.shape[:2]\n output_size = y_pred.shape[1]\n\n # The last channel is the number of overlapping patches for a given pixel,\n # used for averaging predictions from multiple windows.\n combined = np.zeros((height, width, y_pred.shape[-1] + 1))\n\n for i in range(0, height - output_size + 1, stride):\n for j in range(0, width - output_size + 1, stride):\n patch = combined[i:i + output_size, j:j + output_size, :-1]\n overlaps = combined[i:i + output_size, j:j + output_size, -1:]\n patch = (patch * overlaps + y_pred[counter]) / (overlaps + 1)\n combined[i:i + output_size, j:j + output_size, :-1] = patch\n overlaps += 1.\n counter += 1\n\n return combined[:height, :width, :-1]", "def apply_patch_on_the_image(img, patch, count=5, offset=150):\n mask = np.zeros(shape=img.shape)\n boxes = []\n prev = (0, 0)\n gen = gencoordinates(img.shape[0], img.shape[1])\n for i in range(count):\n rnd = random.choice([x for x in range(100)])\n x_offset = rnd + patch.shape[0]\n y_offset = rnd + patch.shape[1]\n x_offset += prev[0]\n y_offset += prev[1]\n if y_offset < patch.shape[1]:\n y_offset = patch.shape[1]\n if x_offset < patch.shape[0]:\n x_offset = patch.shape[0]\n img[y_offset:y_offset+patch.shape[0], x_offset:x_offset+patch.shape[1]] = patch\n mask[y_offset:y_offset+patch.shape[0], x_offset:x_offset+patch.shape[1]] = 1\n boxes.append((y_offset, patch.shape[0], x_offset, patch.shape[1]))\n prev = (x_offset, y_offset)\n return img, mask, boxes", "def prepare_train_patches(images_path, labels_path, indices, patch_size, overlap, overlap_amount, aug_config):\n\n # Load images and labels\n images = extract_images(images_path, indices)\n labels = extract_images(labels_path, indices)\n\n # Get patches\n if overlap:\n image_patches = [patch for im in images for patch in patchify_overlap(im, patch_size, overlap_amount)]\n label_patches = [patch for label in labels for patch in patchify_overlap(label, patch_size, overlap_amount)]\n else:\n image_patches = [patch for im in images for patch in patchify(im, patch_size)]\n label_patches = [patch for label in labels for patch in patchify(label, patch_size)]\n \n if not aug_config:\n return image_patches, label_patches\n\n patches = zip(image_patches, label_patches)\n\n # Rotation needs to be applied on whole image\n if aug_config.do_rotation:\n images_rot = rotate_images(images, aug_config.rotation_angles)\n labels_rot = rotate_images(labels, aug_config.rotation_angles)\n\n for im, label in zip(images_rot, labels_rot):\n p = patchify_no_corner(im, label, patch_size, overlap, overlap_amount)\n image_patches.extend(p[0])\n label_patches.extend(p[1])\n\n # Flip each patch horizontally\n images_flipped = []\n labels_flipped = []\n if aug_config.do_flip:\n flip_hor = iaa.Fliplr(0.5).to_deterministic()\n flip_ver = iaa.Flipud(0.5).to_deterministic()\n images_flipped.extend(flip_hor.augment_images(image_patches))\n images_flipped.extend(flip_ver.augment_images(image_patches))\n labels_flipped.extend(flip_hor.augment_images(label_patches))\n labels_flipped.extend(flip_ver.augment_images(label_patches))\n\n image_patches.extend([im.copy() for im in images_flipped])\n label_patches.extend([im.copy() for im in labels_flipped])\n\n # For all the patches (even new ones), augment channels\n if aug_config.augment_channels:\n image_patches = augment_channels(image_patches, aug_config)\n\n return image_patches, label_patches", "def patches_to_image(patch_centers: np.ndarray,\n patch_lt: lt.LabeledTensor,\n name: Optional[str] = None) -> lt.LabeledTensor:\n with tf.compat.v1.name_scope(name, \"patches_to_image\", [patch_lt]) as scope:\n patch_lt = lt.transpose(patch_lt, [\"batch\", \"row\", \"column\", \"channel\"])\n\n num_extracted_rows = len(set([l[0] for l in patch_centers]))\n num_extracted_columns = len(set([l[1] for l in patch_centers]))\n assert num_extracted_rows * num_extracted_columns == patch_centers.shape[0]\n\n batch_size = len(patch_lt.axes[\"batch\"])\n assert batch_size % len(patch_centers) == 0\n output_batch_size = batch_size // len(patch_centers)\n\n # TODO(ericmc): This will fail if the stride is not homogeneous.\n if patch_centers.shape[0] == 1:\n stride = 0\n else:\n [row_0, column_0] = patch_centers[0]\n [row_1, column_1] = patch_centers[1]\n if row_0 == row_1:\n stride = column_1 - column_0\n else:\n stride = row_1 - row_0\n assert stride > 0\n assert abs(round(stride) - stride) < 0.0001\n stride = int(round(stride))\n\n patch_lt = lt.reshape(patch_lt, [\"batch\"],\n [(\"batch\", output_batch_size),\n (\"patch_row\", num_extracted_rows),\n (\"patch_column\", num_extracted_columns)])\n tf.compat.v1.logging.info(\"%r\", patch_lt.axes)\n\n row_lts = []\n for r in range(num_extracted_rows):\n this_row = []\n for c in range(num_extracted_columns):\n this_row.append(patch_lt[:, r, c, :, :, :])\n row_lts.append(lt.concat(this_row, \"column\"))\n stitched = lt.concat(row_lts, \"row\")\n\n return lt.identity(stitched, name=scope)", "def sample_patches(images, psize=(8, 8), n=10000, remove_mean=True):\n d = psize[0] * psize[1]\n patches = np.zeros((d, n))\n standardized = grayscale_and_standardize(images, remove_mean)\n\n shapes = []\n for pic in standardized:\n shapes.append(pic.shape)\n\n rand_pic_num = np.random.randint(0, len(standardized), n)\n rand_x = np.random.rand(n)\n rand_y = np.random.rand(n)\n\n for i in range(n):\n pic_id = rand_pic_num[i]\n pic_shape = shapes[pic_id]\n x = int(np.ceil(rand_x[i] * (pic_shape[0] - psize[1])))\n y = int(np.ceil(rand_y[i] * (pic_shape[1] - psize[0])))\n patches[:, i] = np.reshape(np.ascontiguousarray(\n standardized[pic_id][x:x + psize[0], y:y + psize[1]]), d)\n\n return patches", "def extract_patch_from_img(array, patch_index, patch_size, z_offset=0, mean=None, std=None):\n patch_index[0] -= z_offset\n patch_index[1] -= z_offset\n\n z, x, y = array.shape\n ww = [patch_size[0], patch_size[1], patch_size[2]]\n\n ret = np.zeros(ww)\n temp_patch_index = np.array(patch_index).copy()\n ww = [0, patch_size[0], 0, patch_size[1], 0, patch_size[2]]\n\n # if patch overlaps image boundry (needs 0 padding) offset image index\n if temp_patch_index[0] < 0:\n ww[0] -= temp_patch_index[0]\n temp_patch_index[0] = 0\n if temp_patch_index[2] < 0:\n ww[2] -= temp_patch_index[2]\n temp_patch_index[2] = 0\n if temp_patch_index[4] < 0:\n ww[4] -= temp_patch_index[4]\n temp_patch_index[4] = 0\n\n if temp_patch_index[1] > z:\n ww[1] -= temp_patch_index[1] - z\n temp_patch_index[1] = z\n if temp_patch_index[3] > x:\n ww[3] -= temp_patch_index[3] - x\n temp_patch_index[3] = x\n if temp_patch_index[5] > y:\n ww[5] -= temp_patch_index[5] - y\n temp_patch_index[5] = y\n if temp_patch_index[0] >= temp_patch_index[1]:\n temp_patch_index[0] = temp_patch_index[1] - 1\n\n insert = array[temp_patch_index[0]:temp_patch_index[1],\n temp_patch_index[2]:temp_patch_index[3],\n temp_patch_index[4]:temp_patch_index[5]]\n\n # normalize patch\n if not (mean is None or std is None):\n insert = np.divide(insert - mean, std)\n\n ret[ww[0]:ww[1], ww[2]:ww[3], ww[4]:ww[5]] = insert\n\n return ret", "def gather_patches(raw_data, grid_shape=None):\n num_examples = raw_data.shape[0]\n img_h = raw_data.shape[1]\n img_w = raw_data.shape[2]\n img_c = raw_data.shape[3]\n\n if grid_shape is None:\n grid_shape = get_grid_shape(num_examples)\n\n expected_examples = grid_shape[0] * grid_shape[1]\n padding_pattern = (((0, expected_examples\n - num_examples),)\n + ((0, 0),) * 3)\n padded_data = numpy.pad(\n raw_data,\n pad_width=padding_pattern,\n mode='constant',\n constant_values=0\n )\n\n image = padded_data.view().reshape((\n grid_shape[1], grid_shape[0] * img_h, img_w, img_c)\n ).transpose(\n (1, 0, 2, 3)\n ).reshape(\n (grid_shape[0] * img_h,\n grid_shape[1] * img_w,\n img_c)\n ).copy()\n\n image *= 0.5\n image += 0.5\n image *= 255\n\n return image", "def rebin_image(self):\r\n\r\n # bin the image down to smaller size by combining groups of bins\r\n\r\n print('Rebinning image')\r\n\r\n sh = self.imageData.shape\r\n\r\n if self.binsize > 1 or self.zbinsize > 1:\r\n\r\n nredx = int(sh[1]/self.binsize)\r\n\r\n nredy = int(sh[2]/self.binsize)\r\n\r\n nredz = int(self.imageData.shape[0]/self.zbinsize)\r\n print('nredx,nredy,nredz: ',[nredx,nredy,nredz])\r\n\r\n self.imageData = self.bin_ndarray(self.imageData, new_shape=(nredz, nredx, nredy), operation='mean')\r\n\r\n if nredz > 1:\r\n\r\n beforeFrames = self.nFrames\r\n\r\n self.nFrames = self.imageData.shape[0]\r\n\r\n self.framerate = self.nFrames/(self.nrepetitions*self.period)\r\n\r\n self.times = np.arange(0, self.nFrames/self.framerate, 1.0/self.framerate)\r\n\r\n print(' Image Rebinned')\r\n\r\n self.print_image_info()", "def sample_patches(images, npatches, patch_sz):\n\tnimages, nrows, ncols = images.shape\n\timg_index = np.random.randint(0, nimages, npatches)\n\trow_index = np.random.randint(0, nrows-patch_sz, npatches)\n\tcol_index = np.random.randint(0, ncols-patch_sz, npatches)\n\tpatches = np.empty((npatches, patch_sz, patch_sz))\n\tfor i, (img, row, col) in enumerate(zip(img_index, row_index, col_index)):\n\t\tpatches[i] = images[img, row:row+patch_sz, col:col+patch_sz]\n\treturn patches", "def rescale_images(self, patches = [], n_pixel_elements = 42, flip = True, save = False):\t\t\t\n\t\t\n\t\trescaled_patches = []\n\t\tfor i in patches:\n\t\t\tpatch = plt.imread(os.path.join(self.project.base_dir,'patches','image'+str(i)+'.png'))\n\t\t\tif flip == True:\n\t\t\t\tpatch = np.flipud(patch)\n\t\t\tpatch = patch[:,patch.shape[1]/2 - patch.shape[0]/2:patch.shape[1]/2 + patch.shape[0]/2,0] # visual field 1080 by 1080\n\t\t\t\n\t\t\tscaled_patch = []\n\t\t\tscale = patch.shape[0]/n_pixel_elements\n\t\t\tfor x in range(n_pixel_elements):\n\t\t\t\tfor y in range (n_pixel_elements):\n\t\t\t\t\t# michelson_contrast\n\t\t\t\t\tscaled_patch.append(np.max(patch[scale*x:scale*x + scale,scale*y:scale*y + scale]) - np.min(patch[scale*x:scale*x + scale,scale*y:scale*y + scale]))\n\t\t\tscaled_patch = np.asarray(scaled_patch).reshape([n_pixel_elements,n_pixel_elements],order = 'C')\n\t\t\tif save == True:\n\t\t\t\timshow(scaled_patch)\n\t\t\t\tplt.savefig(os.path.join(self.stageFolder(stage = 'processed/mri/figs'), 'heatmap_patch' + str(i)))\t\n\t\t\trescaled_patches.append(scaled_patch)\n\n\t\treturn rescaled_patches", "def make_flat_avg(images, out):\n image = Image(avg_images(images, out))\n image.normalise()\n return out", "def patches_sampling(self, image, patch_size, stride):\n h, w = image.shape[2:4]\n patches = []\n for i in range(0, h - patch_size + 1, stride):\n for j in range(0, w - patch_size + 1, stride):\n patches.append(image[:, :, i:i + patch_size, j:j + patch_size])\n patches = torch.cat(patches, dim=0).to(self.device)\n return patches", "def create_patches_from_mask(image, mask, patchSize=32, pad=32, depth=1, searchSlices=None):\n rois = []\n images = []\n labels = []\n searchSlices = range(len(mask)) if searchSlices is None else searchSlices\n for i in searchSlices:\n # For each voxel, generate a ROI centered there\n if not np.any(mask[i]):\n continue\n xS, yS = np.nonzero(mask[i, :, :])\n xS -= xS % patchSize\n yS -= yS % patchSize\n allPatches = set(zip(xS, yS))\n for x, y in allPatches:\n patch = np.copy(\n # agafem el patch que ens interessa i agafem un contorn per si de cas (padding)\n # potser seria interessant reduir el padding (la quantitat de marge que deixem)\n # ara mateix tenim patches de 96, quan ens interessa el centre de 32 d'aquests\n image[i - depth: i + 1 + depth, x - pad:x + patchSize + pad, y - pad:y + patchSize + pad]\n )\n label = np.copy(\n # quan fem rotacio al fer data augmentation, ens volem assegurar d'estar treballant amb\n # el mateix\n mask[i: i + 1, x - pad: x + patchSize + pad, y - pad:y + patchSize + pad]\n )\n\n rois.append(np.array([x, y, i]))\n images.append(patch)\n labels.append(label)\n return rois, images, labels", "def iter_patch(\n arr: NdarrayOrTensor,\n patch_size: Sequence[int] | int = 0,\n start_pos: Sequence[int] = (),\n overlap: Sequence[float] | float = 0.0,\n copy_back: bool = True,\n mode: str | None = NumpyPadMode.WRAP,\n **pad_opts: dict,\n) -> Generator[tuple[NdarrayOrTensor, np.ndarray], None, None]:\n\n from monai.transforms.croppad.functional import pad_nd # needs to be here to avoid circular import\n\n # ensure patchSize and startPos are the right length\n patch_size_ = get_valid_patch_size(arr.shape, patch_size)\n start_pos = ensure_tuple_size(start_pos, arr.ndim)\n\n # set padded flag to false if pad mode is None\n padded = bool(mode)\n is_v = [bool(p) for p in ensure_tuple_size(patch_size, arr.ndim)] # whether a valid patch size provided\n _pad_size = tuple(p if v and padded else 0 for p, v in zip(patch_size_, is_v)) # pad p if v else 0\n _overlap = [op if v else 0.0 for op, v in zip(ensure_tuple_rep(overlap, arr.ndim), is_v)] # overlap if v else 0.0\n # pad image by maximum values needed to ensure patches are taken from inside an image\n if padded:\n arrpad = pad_nd(arr, to_pad=[(p, p) for p in _pad_size], mode=mode, **pad_opts) # type: ignore\n # choose a start position in the padded image\n start_pos_padded = tuple(s + p for s, p in zip(start_pos, _pad_size))\n\n # choose a size to iterate over which is smaller than the actual padded image to prevent producing\n # patches which are only in the padded regions\n iter_size = tuple(s + p for s, p in zip(arr.shape, _pad_size))\n else:\n arrpad = arr\n start_pos_padded = start_pos\n iter_size = arr.shape\n\n for slices in iter_patch_slices(iter_size, patch_size_, start_pos_padded, _overlap, padded=padded):\n # compensate original image padding\n if padded:\n coords_no_pad = tuple((coord.start - p, coord.stop - p) for coord, p in zip(slices, _pad_size))\n else:\n coords_no_pad = tuple((coord.start, coord.stop) for coord in slices)\n yield arrpad[slices], np.asarray(coords_no_pad) # data and coords (in numpy; works with torch loader)\n\n # copy back data from the padded image if required\n if copy_back:\n slices = tuple(slice(p, p + s) for p, s in zip(_pad_size, arr.shape))\n arr[...] = arrpad[slices] # type: ignore", "def generate_patches_from_img(img, patch_size=128):\n\n new_width, new_height, channels = img.shape\n\n if img.shape[0] % 128 != 0:\n new_width = img.shape[0] + (128 - img.shape[0] % 128)\n\n if img.shape[1] % 128 != 0:\n new_height = img.shape[1] + (128 - img.shape[1] % 128)\n\n resized_img = resize(img, (new_width, new_height))\n\n block_shape = (128, 128, 3)\n img_blocks = view_as_blocks(resized_img, block_shape=block_shape)\n\n img_patches = {}\n\n for r in range(img_blocks.shape[0]):\n for c in range(img_blocks.shape[1]):\n img = img_blocks[r, c]\n img = np.reshape(img, (128, 128, 3))\n img_patches[(r, c)] = img\n\n return img_patches", "def divide_image_to_patches(img, patch_size, stride=None):\n\n stride = stride or patch_size\n if not 0 < stride <= patch_size:\n raise ValueError(\n 'stride should be positive and smaller than or equal to patch_size')\n\n if len(img.shape) == 2: # this is a mask\n img = np.expand_dims(img, -1)\n\n height, width, n_channels = img.shape\n\n # Sometimes we need to extend the original image so that the sliding window\n # won't move out of the image\n ext_height, ext_width = _get_extended_image_size(\n height, width, patch_size, stride)\n ext_img = np.zeros((ext_height, ext_width, n_channels))\n ext_img[:height, :width] = img\n\n x = []\n\n for i in range(0, ext_height - patch_size + 1, stride):\n for j in range(0, ext_width - patch_size + 1, stride):\n x.append(ext_img[i:i + patch_size, j:j + patch_size, :])\n\n return np.array(x).astype('uint8')", "def patches_to_images(patches, stride, img_shape):\r\n h = img_shape[0]\r\n patch_size = patches.shape[1]\r\n n_stride = (h - patch_size) // stride + 1\r\n assert len(patches) % n_stride ** 2 == 0, \"They must be the right number of patches per image\"\r\n\r\n n_images = len(patches) // (n_stride ** 2)\r\n\r\n images = []\r\n for i in range(n_images):\r\n n_patches = n_stride ** 2\r\n img = patches_to_img(patches[i * n_patches:(i + 1) * n_patches], stride, img_shape)\r\n images.append(img)\r\n\r\n return np.array(images)", "def _normalize_patches(patches):\n patches = array_ops.concat(patches, 0)\n mean, variance = nn.moments(patches, [1, 2, 3], keep_dims=True)\n patches = (patches - mean) / math_ops.sqrt(variance)\n return array_ops.reshape(patches, [array_ops.shape(patches)[0], -1])", "def extract_patches(image_list, mask_src, image_src, mask_dst, image_dst, patch_size):\n class_counts = defaultdict(lambda: 0)\n skipped = 0\n total = 0\n for im in tqdm(image_list):\n img = cv2.imread(os.path.join(image_src, im))\n msk = cv2.imread(os.path.join(mask_src, im), 0)\n \n assert (img.shape[0] == msk.shape[0]) \\\n and (img.shape[1] == msk.shape[1]), \"Mismatch!\"\n\n img_patches = patchify(img, (patch_size, patch_size, 3), step=patch_size)\n msk_patches = patchify(msk, (patch_size, patch_size), step=patch_size)\n img_patches = img_patches.reshape((-1, patch_size, patch_size, 3))\n msk_patches = msk_patches.reshape((-1, patch_size, patch_size))\n # Step = 256 for patch size means no overlap\n for i in range(img_patches.shape[0]):\n # Replace class labels\n mask_patch = replace_classes(msk_patches[i])\n unique, counts = np.unique(mask_patch, return_counts=True)\n # If outside of RoI takes > 90% and there is only 1 class, ignore the patch.\n outside = np.mean(mask_patch == 0) > 0.9\n if outside and (len(unique) < 2):\n skipped += 1\n continue\n for x, y in enumerate(unique):\n class_counts[y] += counts[x].item()\n img_patch = img_patches[i]\n filename = im.split(\".png\")[0] + \"_\" + str(i) + \".png\"\n cv2.imwrite(os.path.join(image_dst, filename), img_patch)\n cv2.imwrite(os.path.join(mask_dst, filename), mask_patch)\n total += 1\n print('Skipped: {} / {}'.format(skipped, total))\n return class_counts", "def reconstruct_image(patch_list, patch_nb=2):\n line_list = []\n for i in range(0, patch_nb ** 2 - 1, patch_nb):\n line_list.append(cv2.hconcat(patch_list[i : i + patch_nb]))\n final_img = cv2.vconcat(line_list)\n return final_img", "def extract_image_patches(images, ksizes, strides, rates, padding='same'):\n assert len(images.size()) == 4\n assert padding in ['same', 'valid']\n batch_size, channel, height, width = images.size()\n\n if padding == 'same':\n images = same_padding(images, ksizes, strides, rates)\n elif padding == 'valid':\n pass\n else:\n raise NotImplementedError('Unsupported padding type: {}.\\\n Only \"same\" or \"valid\" are supported.'.format(padding))\n\n unfold = torch.nn.Unfold(kernel_size=ksizes,\n dilation=rates,\n padding=0,\n stride=strides)\n patches = unfold(images)\n return patches # [N, C*k*k, L], L is the total number of such blocks", "def generate_patches(scaled_imgs, constants, all_patches):\n patch_size = constants.PATCH_SIZE\n step = 1 if all_patches else 2\n patches = []\n for k, sc in enumerate(scaled_imgs):\n img_patches = []\n for i in range(0, sc.shape[0] - patch_size, step):\n for j in range(0, sc.shape[1] - patch_size, step):\n raw_patch = sc[i:i + patch_size, j:j + patch_size, :]\n patch = Patch(\n raw_patch=raw_patch,\n patch_size=patch_size,\n )\n patch.store(sc, [i, j])\n img_patches.append(patch)\n patches.append(img_patches)\n return patches", "def smoothen(scaled_imgs, patches, constants):\n patch_size = constants.PATCH_SIZE\n\n for k in range(len(patches)):\n img = scaled_imgs[k]\n patch = patches[k]\n\n # We assume that alternate patches have been extracted in the initial step\n length_sd_array = int(round((img.shape[0] - patch_size) / 2))\n width_sd_array = int(round((img.shape[1] - patch_size) / 2))\n\n std_database = np.reshape(map(lambda x: x.std_dev, patch), [length_sd_array, width_sd_array])\n blur = np.reshape(cv2.GaussianBlur(std_database, (7, 7), sigmaX=6, sigmaY=6), [-1])\n map(lambda (i, x): setattr(x, 'std_dev', blur[i]), enumerate(patch))", "def assemble_softmax_3d(\n patches, spacing, orig_shape, n_classes, method=\"crop\", border=(16, 16, 16)\n):\n\n initial_array = np.zeros([orig_shape[0], orig_shape[1], orig_shape[2], n_classes])\n x_spacing = spacing[0]\n y_spacing = spacing[1]\n z_spacing = spacing[2]\n x_spacing[1:] += 1\n y_spacing[1:] += 1\n z_spacing[1:] += 1\n\n idx = 0\n\n if method == \"sum\":\n for x in x_spacing:\n for y in y_spacing:\n for z in z_spacing:\n patch = patches[idx]\n idx += 1\n\n if x == x_spacing[-1]:\n patch = patch[:-1, :, :, :]\n if y == y_spacing[-1]:\n patch = patch[:, :-1, :, :]\n if z == z_spacing[-1]:\n patch = patch[:, :, :-1, :]\n\n initial_array[\n x : x + patch.shape[0],\n y : y + patch.shape[1],\n z : z + patch.shape[2],\n ] += patch\n if method == \"crop\":\n assert border\n\n for x in x_spacing:\n for y in y_spacing:\n for z in z_spacing:\n patch = patches[idx]\n idx += 1\n patch = patch[\n border[0] : -border[0],\n border[1] : -border[1],\n border[2] : -border[2],\n ]\n\n if x == x_spacing[-1]:\n patch = patch[:-1, :, :, :]\n if y == y_spacing[-1]:\n patch = patch[:, :-1, :, :]\n if z == z_spacing[-1]:\n patch = patch[:, :, :-1, :]\n\n initial_array[\n x : x + patch.shape[0],\n y : y + patch.shape[1],\n z : z + patch.shape[2],\n ] += patch\n\n return initial_array", "def update_mean(img, clustermask):\n flat = img.flatten()\n flat.reshape((int(flat.shape[0] / 3), 3))\n w, h, _ = clustermask.shape\n cluster_assignees={}\n for cid,_ in enumerate(current_cluster_centers):\n cluster_assignees[cid] = []\n for x in range(w):\n for y in range(h):\n cid = clustermask[x, y][0]\n cluster_assignees[cid].append(img[x,y])\n for cid, pixels in cluster_assignees.items():\n current_cluster_centers[cid] = np.mean(np.array(pixels),axis=0)\n return clustermask", "def blending_example1():\n pic_desert = read_image(relpath(\"./externals/pic_desert.jpg\"), 2)\n pic_pool = read_image(relpath(\"./externals/pic_swim.jpg\"), 2)\n mask = read_image(relpath(\"./externals/mask_desert.jpg\"), 1)\n # making the mask binary (normalizing 2 original values)\n mask = strech_helper(mask).astype(np.bool)\n print(pic_desert.shape[2])\n [R1, G1, B1] = np.dsplit(pic_desert, pic_desert.shape[2])\n [R2, G2, B2] = np.dsplit(pic_pool, pic_pool.shape[2])\n R1 = np.reshape(R1, (512,1024))\n R2 = np.reshape(R2, (512,1024))\n G1 = np.reshape(G1, (512,1024))\n G2 = np.reshape(G2, (512,1024))\n B1 = np.reshape(B1, (512,1024))\n B2 = np.reshape(B2, (512,1024))\n\n blend1 = pyramid_blending(R2, R1, mask, 3, 3, 3)\n blend2 = pyramid_blending(G2, G1, mask, 3, 3, 3)\n blend3 = pyramid_blending(B2, B1, mask, 3, 3, 3)\n\n blend1 = np.reshape(blend1, (blend1.shape[0], blend1.shape[1], 1))\n blend2 = np.reshape(blend2, (blend2.shape[0], blend3.shape[1], 1))\n blend3 = np.reshape(blend3, (blend3.shape[0], blend3.shape[1], 1))\n\n new_pic = np.concatenate((blend1, blend2, blend3), axis=2)\n # plotting the images\n fig = plt.figure()\n ax1 = fig.add_subplot(221)\n ax2 = fig.add_subplot(222)\n ax3 = fig.add_subplot(223)\n ax4 = fig.add_subplot(224)\n ax1.imshow(pic_desert)\n ax2.imshow(pic_pool)\n ax3.imshow(mask, cmap='gray')\n ax4.imshow(new_pic)\n plt.show()\n\n return pic_desert, pic_pool, mask, new_pic", "def divide_image_to_patches(img, patch_size):\n\n assert len(img.shape) == 3 and img.shape[-1] == 3\n\n height, width, n_channels = img.shape\n coordinates = _get_top_left_coordinates(height, width, patch_size)\n\n patches = []\n\n for top, left in coordinates:\n patches.append(img[top:top + patch_size, left:left + patch_size])\n\n return np.array(patches).astype('uint8')", "def _sample_patches(imgs, \n labelimgs, \n patch_size, \n patchgroup, \n padding_mode, \n padding_values, \n ignore_labels,\n startidx=0):\n samplelist = []\n \n # number of bands should be constant, therefore the dimensionality can be read from any \n # sub img\n bands = imgs[0].shape[-1]\n\n # calculate remapping for labels when removing `ignore_labels`\n # flatten labelimgs and convert to numpy array to use np.unique function on it\n flattened_labelimgs = np.concatenate([labelimg.reshape(-1) for labelimg in labelimgs])\n max_label = np.unique(flattened_labelimgs).max()\n remaining_labels = np.setdiff1d(np.arange(max_label+1), ignore_labels)\n label_remap = np.full((max_label+1), -1)\n for i, val in enumerate(remaining_labels):\n label_remap[val] = i\n\n valid_sample_count = 0\n for labelimg in labelimgs:\n valid_sample_count += np.invert(np.isin(labelimg, ignore_labels)).sum()\n print(f'Extracting {valid_sample_count} valid samples...')\n \n if ('data' in patchgroup) and ('labels' in patchgroup):\n # resize existing dataset to append patches from test set\n patchgroup['data'].resize((patchgroup['data'].shape[0] + valid_sample_count), axis=0)\n patchgroup['labels'].resize((patchgroup['labels'].shape[0] + valid_sample_count), axis=0)\n else:\n patchgroup.create_dataset('data', (valid_sample_count, patch_size, patch_size, bands)\n , chunks=(1, patch_size, patch_size, bands)\n , maxshape=(None, patch_size, patch_size, bands)\n , dtype=imgs[0].dtype) # datatype should be the same for all imgs\n patchgroup.create_dataset('labels', (valid_sample_count,1)\n , chunks=True, maxshape=(None, 1)\n , dtype=labelimgs[0].dtype) # datatype should be the same for all labelimgs\n \n idx = startidx\n with tqdm(total=valid_sample_count) as pbar:\n for img, labelimg in zip(imgs, labelimgs):\n\n # pad along spatial axes\n margin = int((patch_size - 1) / 2)\n X = np.pad(img, ((margin, margin), (margin, margin), (0,0)), \n mode=padding_mode, constant_values=padding_values) \n\n # split patches\n for r in range(margin, X.shape[0] - margin):\n for c in range(margin, X.shape[1] - margin):\n patchlabel = labelimg[r-margin, c-margin]\n\n # do not create a sample for 'ignore_labels'\n if patchlabel in ignore_labels:\n continue\n else :\n # correct label\n patchlabel = label_remap[patchlabel]\n\n patch = X[r - margin:r + margin + 1, c - margin:c + margin + 1]\n # store sample in hdf file\n patchgroup['data'][idx] = patch\n patchgroup['labels'][idx] = patchlabel\n\n # update\n idx += 1\n pbar.update(1)\n\n patchgroup.attrs['patch_size'] = patch_size\n patchgroup.attrs['padding_mode'] = padding_mode\n patchgroup.attrs['padding_values'] = padding_values\n patchgroup.attrs['ignore_labels'] = ignore_labels\n\n return valid_sample_count", "def create_patches(data, patch_shape):\n\n imgs = []\n\n if data[0].shape[0] == test_size:\n step_length = (test_size - patch_shape[0]) // 2 # 176\n else:\n step_length = (training_size - patch_shape[0])\n\n for i in range(data.shape[0]):\n if len(patch_shape) == 3: # RGB images\n patches = patchify(data[i], patch_shape, step=step_length)\n patches = patches.reshape((-1, patch_shape[0], patch_shape[1], patch_shape[2]))\n imgs.extend(patches)\n else:\n patches = patchify(data[i], patch_shape, step=step_length)\n patches = patches.reshape((-1, patch_shape[0], patch_shape[1]))\n imgs.extend(patches)\n\n return np.asarray(imgs)", "def extract_patches_single_scale(\n patch_size: int,\n stride: int,\n image_lt: lt.LabeledTensor,\n name: str = None,\n) -> Tuple[np.ndarray, lt.LabeledTensor]:\n with tf.compat.v1.name_scope(name, \"extract_patches_single_scale\", [image_lt]) as scope:\n image_lt = lt.transpose(image_lt, [\"batch\", \"row\", \"column\", \"channel\"])\n image_lt = tensorcheck.bounds(0.0, 1.0, image_lt)\n\n logging.info(\"extract_patches_single_scale: Input axes: %s\", image_lt.axes)\n\n batch_size = len(image_lt.axes[\"batch\"])\n num_rows = len(image_lt.axes[\"row\"])\n num_columns = len(image_lt.axes[\"column\"])\n\n row_offsets = range(0, num_rows - patch_size + 1, stride)\n if not row_offsets:\n raise ValueError(\"num_rows - patch_size + 1 must be >= 1\")\n expected_num_rows = _num_extracted_rows_and_columns(num_rows, patch_size,\n stride, 1, 2)\n assert len(row_offsets) == expected_num_rows, (len(row_offsets),\n expected_num_rows,\n (num_rows, patch_size,\n stride))\n\n column_offsets = range(0, num_columns - patch_size + 1, stride)\n assert column_offsets\n expected_num_columns = _num_extracted_rows_and_columns(\n num_columns, patch_size, stride, 1, 2)\n assert len(column_offsets) == expected_num_columns, (len(column_offsets),\n expected_num_columns,\n (num_rows, patch_size,\n stride))\n\n offsets = [(r, c) for r in row_offsets for c in column_offsets]\n\n patch_lts = []\n for b in range(batch_size):\n for (row, column) in offsets:\n patch_lt = lt.slice(\n image_lt, {\n \"batch\": slice(b, b + 1),\n \"row\": slice(row, row + patch_size),\n \"column\": slice(column, column + patch_size)\n })\n patch_lts.append(patch_lt)\n\n pack_lt = lt.concat(patch_lts, \"batch\")\n reshape_lt = lt.reshape(pack_lt, [\"batch\"], [\n image_lt.axes[\"batch\"], (\"patch_row\", len(row_offsets)),\n (\"patch_column\", len(column_offsets))\n ])\n\n reshape_lt = tensorcheck.shape(reshape_lt)\n reshape_lt = tensorcheck.bounds(0.0, 1.0, reshape_lt, name=scope)\n\n centers = [\n (r + patch_size / 2.0, c + patch_size / 2.0) for (r, c) in offsets\n ]\n\n logging.info(\"extract_patches_single_scale: Output axes: %s\",\n reshape_lt.axes)\n\n return np.array(centers), reshape_lt", "def extract_patches(data,patch_dim):\n \n m = data.shape[0]\n im_x = data.shape[1]\n im_y = data.shape[2]\n \n assert im_x%float(patch_dim)==0 and im_y%float(patch_dim)==0, \\\n \"patch_size must divide x and y dimensions of image\"\n\n numpatchs = m*(im_x/patch_dim)*(im_y/patch_dim)\n patch_size = patch_dim**2\n\n patches = np.empty((patch_size,numpatchs))\n p=0\n for i in range(data.shape[0]):\n image = data[i,...]\n for x in np.r_[0:im_x:patch_dim]:\n for y in np.r_[0:im_y:patch_dim]:\n patch = image[x:x+patch_dim,y:y+patch_dim]\n patches[:,p] = patch.ravel()\n p+=1\n \n return patches", "def extract_patches(data, idxs=None, patch_size=3, min_val=0, ctx_radius=(3,5,7), economy_patch=True, mean=False):\n # catch errors and setup the initialize values of required variables\n if idxs is None:\n idxs = np.where(data > min_val)\n if patch_size % 2 != 1 and patch_size > 0:\n raise SynthError('Patch size must be odd or zero')\n if len(idxs) != 3:\n raise SynthError('Data must be 3-dimensional.')\n ctx_radius = ctx_radius if ctx_radius[0] > 0 else []\n\n # initialize patch data structure based on user input\n if economy_patch and patch_size > 1:\n patch_len = 7 + len(ctx_radius) * 6\n elif patch_size == 0:\n patch_len = len(ctx_radius) * 6\n elif not economy_patch or patch_size == 1:\n patch_len = patch_size**3 + (len(ctx_radius) * 6)\n else:\n raise SynthError('patch_size and ctx_radius must both be non-negative and at least '\n 'one must be positive ({}, {} invalid)'.format(patch_size, ctx_radius))\n patches = np.zeros((len(idxs[0]), patch_len))\n\n # extract the patches in an optimal fashion based on user input\n if patch_size == 1 and not ctx_radius:\n patches[:, 0] = data[idxs]\n elif patch_size == 0:\n for n, (i, j, k) in enumerate(zip(*idxs)):\n patches[n, :] = np.concatenate([patch_context(data, i, j, k, r) for r in ctx_radius])\n else:\n h = int(np.floor(patch_size / 2))\n for n, (i, j, k) in enumerate(zip(*idxs)):\n patch = get_patch(data, i, j, k, h, economy_patch).flatten()\n ctx = [patch_context(data, i, j, k, r) for r in ctx_radius]\n patches[n, :] = np.concatenate((patch, *ctx))\n if mean:\n patches = np.mean(patches, axis=1)[:, np.newaxis]\n return patches", "def _extract_patches_and_positions_from_image(\n image, patch_size, patch_stride, hse_grid_size,\n n_crops, h, w,\n c, scale_id, max_seq_len):\n p = tf.image.extract_patches(\n image, [1, patch_size, patch_size, 1], [1, patch_stride, patch_stride, 1],\n [1, 1, 1, 1],\n padding='SAME')\n\n p = tf.reshape(p, [n_crops, -1, patch_size * patch_size * c])\n\n count_h = _ceil_divide_int(h, patch_stride)\n count_w = _ceil_divide_int(w, patch_stride)\n\n # Shape (num_patches, 1)\n spatial_p = get_hashed_spatial_pos_emb_index(hse_grid_size, count_h, count_w)\n # Shape (1, num_patches, 1)\n spatial_p = tf.expand_dims(spatial_p, axis=0)\n # Shape (n_crops, num_patches, 1)\n spatial_p = tf.tile(spatial_p, (n_crops, 1, 1))\n spatial_p = tf.cast(spatial_p, dtype=p.dtype)\n # Shape (n_crops, num_patches, 1)\n scale_p = tf.ones_like(spatial_p, dtype=p.dtype) * scale_id\n # Shape (n_crops, num_patches, 1)\n mask_p = tf.ones_like(spatial_p, dtype=p.dtype)\n\n # Concatenating is a hacky way to pass both patches, positions and input\n # mask to the model.\n # Shape (n_crops, num_patches, patch_size * patch_size * c + 3)\n out = tf.concat([p, spatial_p, scale_p, mask_p], axis=2)\n if max_seq_len >= 0:\n out = _pad_or_cut_to_max_seq_len(out, max_seq_len)\n out = tf.reshape(out,\n [n_crops, max_seq_len, c * patch_size * patch_size + 3])\n else:\n out = tf.reshape(out, [n_crops, -1, c * patch_size * patch_size + 3])\n return out", "def generatePatch(self):\n\n image_processor = ImageProcessor()\n\n # Load the network______________________________________________________________________________________________\n # - g_input: Input to the generator\n # - g_output_patch_only: Patch generated\n # - surrounding_region: Region surrounding the masked image to be merged with the generated patch\n # - training: Whether the model is training or not. When invoking the model, False should be passed in\n\n network = Network()\n d_input, g_input, g_output, g_output_patch_only, d_optimizer, g_optimizer, surrounding_region, \\\n patch_ground_truth, d_cost_fake, d_cost_real, g_cost, training = network.network(batch_size)\n\n\n # Create a new TensorFlow session\n sess = tf.InteractiveSession()\n sess.run(tf.global_variables_initializer())\n\n\n # Get the paths of all the files within the test dataset location and shuffle the images\n file_paths = np.array(glob.glob(self.test_dataset_location))\n number_of_instances = len(file_paths)\n indexes = np.random.permutation(number_of_instances)\n file_paths = file_paths[indexes]\n\n\n # Load learnt model\n mi.load_checkpoint(sess)\n\n\n # Iterate through each batch of images\n for i in range(number_of_instances // batch_size):\n\n # Retrieve batch of training images\n batch_file_paths = file_paths[i * batch_size: i * batch_size + batch_size]\n _, g_batch, image_full, surrounding_region_batch, _ = image_processor.create_batch(batch_file_paths)\n\n # Generate patches for the batch of images\n generated_patches = sess.run(g_output_patch_only, feed_dict={g_input: g_batch,\n surrounding_region: surrounding_region_batch, training: False})\n\n # Save the completed images. Both the ground truth (1) and images with the generated patch using unsharp\n # intensities of the default 2.5 and 0.4 are saved\n for k in range(0, batch_size):\n img_id = batch_size * i + k\n\n image_processor.save_image(image_full[k], img_id, 1)\n\n generated_patch = generated_patches[k]\n\n sharpened_patch = image_processor.unsharp_mask(generated_patch)\n sharpened_image = image_processor.merge_patch_with_image(sharpened_patch, image_full[k],\n patch_startX, patch_startY)\n image_processor.save_image(sharpened_image, img_id, 2)\n\n sharpened_patch = image_processor.unsharp_mask(generated_patch, 0.5)\n sharpened_image = image_processor.merge_patch_with_image(sharpened_patch, image_full[k],\n patch_startX, patch_startY)\n image_processor.save_image(sharpened_image, img_id, 3)\n\n print(i * batch_size)", "def extract_patch(n, patch_size, imgs):\n # Extract patches from input images\n img_patches = [img_crop(imgs[i], patch_size, patch_size) for i in range(n)]\n #gt_patches = [img_crop(gt_imgs[i], patch_size, patch_size) for i in range(n)]\n\n # Linearize list of patches\n img_patches = np.asarray([img_patches[i][j] for i in range(len(img_patches)) for j in range(len(img_patches[i]))])\n #gt_patches = np.asarray([gt_patches[i][j] for i in range(len(gt_patches)) for j in range(len(gt_patches[i]))])\n \n return img_patches #,gt_patches", "def _extract_patches(img, patch_s):\n def np_extract_patches(img):\n orig = np.array(img.shape[:2])\n new = patch_s[0] * np.ceil(orig / patch_s[0]).astype(int)\n points = new - orig\n img = np.pad(img, [(0, points[0]), (0, points[1]), (0, 0)],\n mode='constant')\n patches = view_as_blocks(img, tuple(patch_s)).astype(np.float32)\n patches = patches.reshape(-1, *patch_s)\n return patches\n\n patches = tf.numpy_function(np_extract_patches, [img], tf.float32)\n return patches", "def patchGenerator(gen, patch_size=128, patch_batch_size=1):\n \n for imgs, masks in gen: # For each batch\n img_list = []\n mask_list = []\n for i in range(0, imgs.shape[0]): # For each image in a batch\n patch_x = patchify(imgs[i], (patch_size, patch_size, imgs[i].shape[-1]), step=patch_size) # split image into 4*4 small 128*128 patches.\n img_p = patch_x.reshape(-1, *patch_x.shape[-3:])\n img_list.append(img_p)\n\n mask_y = patchify(masks[i], (patch_size, patch_size, 1), step=patch_size) # split mask into 4*4 small 128*128 patches.\n mask_p = mask_y.reshape(-1, *mask_y.shape[-3:])\n mask_list.append(mask_p)\n \n if (patch_batch_size == 1):\n for j in range(0, img_p.shape[0]): # For each patch in a image\n yield img_p[j][np.newaxis, :], mask_p[j][np.newaxis, :]\n \n if (patch_batch_size > 1):\n image_patches = np.concatenate(img_list)\n mask_patches = np.concatenate(mask_list)\n patch_batch_counter = 0\n for idx in range(0, patch_batch_size):\n image_patch_batch = image_patches[patch_batch_counter:patch_batch_counter + patch_batch_size]\n mask_patch_batch = mask_patches[patch_batch_counter:patch_batch_counter + patch_batch_size]\n shuffled_images, shuffled_masks = randomize(image_patch_batch, mask_patch_batch)\n yield shuffled_images, shuffled_masks", "def update_mean(img: np.ndarray, clustermask: np.ndarray):\n\n for k in range(numclusters):\n current_cluster_centers[k, 0, :] = np.mean(img[clustermask==k], axis=0)", "def image_patch(self, image):\n height, width = image.shape\n self.current_image = image\n H_out, W_out = output_shape(height, width, self.filter_size, self.padding, self.stride)\n for j in range(H_out):\n for k in range(W_out):\n image_patch = image[j*self.stride : (j*self.stride + self.filter_size), k*self.stride:(k*self.stride+self.filter_size)]\n yield image_patch, j, k", "def get_patches_non_overlap(array, patch_height, patch_width): \n total_patches_in_height = array.shape[0]//patch_height\n total_patches_in_width = array.shape[1]//patch_width\n # print(\"total patches in height from supplied image array : {}\".format(total_patches_in_height))\n # print(\"total patches in width from supplied image array : {}\".format(total_patches_in_width))\n \n total_patches = total_patches_in_height * total_patches_in_width\n # print(\"total patches from supplied image array : {}\".format(total_patches))\n patches = np.empty(shape=(total_patches, 1, patch_height, patch_width), dtype=np.uint8)\n \n patch_no = 0\n for i in range(0, array.shape[0], patch_height):\n for j in range(0, array.shape[1], patch_width):\n if (i+patch_height <= array.shape[0]+1) and (j+patch_width <= array.shape[1]+1):\n patches[patch_no, 0, :, :] = array[i:i+patch_height, j:j+patch_width]\n patch_no += 1\n return patches", "def extract_grayscale_patches( img, shape, offset=(0,0), stride=(1,1) ):\n px, py = np.meshgrid( np.arange(shape[1]),np.arange(shape[0]))\n l, t = np.meshgrid(\n np.arange(offset[1],img.shape[1]-shape[1]+1,stride[1]),\n np.arange(offset[0],img.shape[0]-shape[0]+1,stride[0]) )\n l = l.ravel()\n t = t.ravel()\n x = np.tile( px[None,:,:], (t.size,1,1)) + np.tile( l[:,None,None], (1,shape[0],shape[1]))\n y = np.tile( py[None,:,:], (t.size,1,1)) + np.tile( t[:,None,None], (1,shape[0],shape[1]))\n return img[y.ravel(),x.ravel()].reshape((t.size,shape[0],shape[1])), (t,l)", "def _update_avg(self):\n if self._data_type == 'coords':\n # default averaging is supported only for 'matrix' dataTypes\n return\n elif self._data_type == 'image':\n\n x, y = self._averaging, self._averaging\n\n if (x,y) == (1, 1):\n self.vectors = self._original_data\n # calling original data\n return\n\n tempdat = self._original_data\n range_x = tempdat.shape[0]\n range_y = tempdat.shape[1]\n x_offset = int((x - 1) / 2)\n y_offset = int((y - 1) / 2)\n\n kernel = np.ones(shape=(x, y)) / (x*y)\n\n output_mat = np.zeros_like(tempdat)\n output_mat_x = signal.convolve2d(tempdat[:, :, 0], kernel,\n mode='same', boundary='wrap')\n output_mat_y = signal.convolve2d(tempdat[:, :, 1], kernel,\n mode='same', boundary='wrap')\n\n output_mat[:, :, 0] = output_mat_x\n output_mat[:, :, 1] = output_mat_y\n\n self.vectors = (output_mat[x_offset:range_x-x_offset:x,\n y_offset:range_y-y_offset:y])", "def image_to_patches(image, patch_size=8, overlap=False, is_mask=False):\n H, W = np.shape(image)\n num_patches = (\n (H - patch_size + 1) * (W - patch_size + 1)\n if overlap\n else int(H / patch_size) * int(W / patch_size)\n )\n patches = (\n np.zeros((patch_size ** 2, patch_size ** 2, num_patches))\n if is_mask\n else np.zeros((patch_size ** 2, num_patches))\n )\n overlap_step = 1 if overlap else patch_size\n count = 0\n for i in np.arange(H - patch_size + 1, step=overlap_step):\n for j in np.arange(W - patch_size + 1, step=overlap_step):\n if is_mask:\n patches[:, :, count] = np.diag(\n np.reshape(image[i : i + patch_size, j : j + patch_size], (-1))\n )\n else:\n patches[:, count] = np.reshape(image[i : i + patch_size, j : j + patch_size], (-1))\n count += 1\n return patches", "def _calc_avg_img(self, data: Union[Sequence[np.ndarray],\n Sequence[Sequence[np.ndarray]]]\n ) -> np.ndarray:\n summed = None\n cnt = 0\n for seq in data:\n if isinstance(seq, np.ndarray) and seq.ndim == 2:\n # seq is a single image, turn it into a sequence\n seq = [seq]\n\n for img in seq:\n # Sequence of image sequences\n norm_img = self._normalize_image(img)\n if summed is None:\n summed = norm_img\n else:\n summed += norm_img\n cnt += 1\n\n ret = summed / cnt\n return ret", "def train_patches(lmks, imgs, ref, psize, ssize, var=1.0, lmbda=1e-6, mu_init=1e-3, nsamples=1000):\n\n if isinstance(psize, int):\n psize = (psize, psize)\n if isinstance(ssize, int):\n ssize = (ssize, ssize)\n\n n = len(ref) // 2\n ximg = psize[1] + ssize[1]\n yimg = psize[0] + ssize[0]\n wsize = (yimg, ximg)\n\n patches = []\n\n # train each patch model\n for i in range(n):\n print('patch', i+1, 'of', n, '...')\n images = []\n for j in range(lmks.shape[1]):\n im = imgs[j]\n pt = lmks[:,j]\n S = calc_simil(pt, ref)\n A = np.empty((2,3))\n A[:2,:2] = S[:2,:2]\n A[0,2] = pt[2*i] - (A[0,0] * (ximg-1)/2 + A[0,1] * (yimg-1)/2)\n A[1,2] = pt[2*i+1] - (A[1,0] * (ximg-1)/2 + A[1,1] * (yimg-1)/2)\n I = cv2.warpAffine(im, A, wsize, flags=cv2.INTER_LINEAR+cv2.WARP_INVERSE_MAP)\n images.append(I)\n\n patch = train_patch(images, psize, var, lmbda, mu_init, nsamples)\n patches.append(patch)\n\n return np.array(patches)", "def img_to_patches(img, win, stride=1):\n k = 0\n endc = img.shape[0]\n endw = img.shape[1]\n endh = img.shape[2]\n if endw<win or endh<win:\n return np.zeros([endc,win,win,0])\n patch = img[:, 0:endw-win+0+1:stride, 0:endh-win+0+1:stride]\n total_pat_num = patch.shape[1] * patch.shape[2]\n res = np.zeros([endc, win*win, total_pat_num], np.float32)\n for i in range(win):\n for j in range(win):\n patch = img[:, i:endw-win+i+1:stride, j:endh-win+j+1:stride]\n res[:, k, :] = np.array(patch[:]).reshape(endc, total_pat_num)\n k = k + 1\n return res.reshape([endc, win, win, total_pat_num])", "def load_pixel_sparse(n_imgs=5, n_patches=100000, patch_x=4, patch_y=4):\n #n = np.random.randn(n_patches, patch_x*patch_y)\n #patches_unnorm = n**3\n #patches = patches_unnorm / np.std(patches_unnorm)\n patches = np.random.laplace(size=(n_patches, patch_x*patch_y))\n #patches = np.random.standard_cauchy(size=(n_patches, patch_x*patch_y))\n W_X = np.eye(patch_x*patch_y)\n # DEBUG why is this different from what's expected of load_van_hateren\n #return patches, W_X\n return patches", "def get_multiscale_patches(\n image,\n patch_size,\n patch_stride,\n hse_grid_size,\n longer_side_lengths,\n max_seq_len_from_original_res = None):\n # Sorting the list to ensure a deterministic encoding of the scale position.\n longer_side_lengths = sorted(longer_side_lengths)\n\n # Input channels.\n c = 3\n if len(image.get_shape().as_list()) == 3:\n n_crops = 1\n h, w = tf.shape(image)[0], tf.shape(image)[1]\n image = tf.expand_dims(image, axis=0)\n else:\n n_crops, h, w = (tf.shape(image)[0], tf.shape(image)[1], tf.shape(image)[2])\n\n outputs = []\n for scale_id, longer_size in enumerate(longer_side_lengths):\n resized_image, rh, rw = resize_preserve_aspect_ratio(\n image, h, w, longer_size)\n\n max_seq_len = int(np.ceil(longer_size / patch_stride)**2)\n out = _extract_patches_and_positions_from_image(resized_image, patch_size,\n patch_stride, hse_grid_size,\n n_crops, rh, rw, c,\n scale_id, max_seq_len)\n outputs.append(out)\n\n if max_seq_len_from_original_res is not None:\n out = _extract_patches_and_positions_from_image(\n image, patch_size, patch_stride, hse_grid_size, n_crops, h, w, c,\n len(longer_side_lengths), max_seq_len_from_original_res)\n outputs.append(out)\n\n # Shape: (n_crops, num_total_patches, patch_size * patch_size * c + 3)\n outputs = tf.concat(outputs, axis=1)\n if n_crops == 1:\n # Shape: (num_total_patches, patch_size * patch_size * c + 3).\n # Training mode. 4 dim wasn't handled by loss.\n outputs = outputs[0]\n return outputs", "def img_to_patches(img, patch_size, stride, overlapping=True):\r\n h, w, _ = img.shape\r\n\r\n assert h == w, 'height should be equal to width ({} != {})'.format(h, w)\r\n assert overlapping or patch_size % stride == 0, 'cannot have non overlapping patches with {} % {} != 0' \\\r\n .format(patch_size, stride)\r\n assert (h - patch_size) % stride == 0, 'height - patch_size should be dividable by stride but {} % {} != 0' \\\r\n .format(h - patch_size, stride)\r\n\r\n n_stride = (h - patch_size) // stride + 1\r\n patches = []\r\n for i in range(n_stride):\r\n if overlapping or i * stride % patch_size == 0:\r\n for j in range(n_stride):\r\n if overlapping or j * stride % patch_size == 0:\r\n patch = img[i * stride: i * stride + patch_size, j * stride: j * stride + patch_size]\r\n patches.append(patch)\r\n return np.array(patches)", "def pool(images, kernel_shape, stride, mode='max'):\n m, h, w, c = images.shape\n kh, kw = kernel_shape\n sh, sw = stride\n\n output_h = int(np.floor((h - kh) / sh) + 1)\n output_w = int(np.floor((w - kw) / sw) + 1)\n\n output = np.zeros((m, output_h, output_w, c))\n\n for x in range(output_h):\n for y in range(output_w):\n if mode == \"max\":\n output[:, x, y, :] = np.max(\n images[:, x * sh:kh + x * sh, y * sw:kw + y * sw, :],\n axis=(1, 2))\n if mode == \"avg\":\n output[:, x, y, :] = np.average(\n images[:, x*sh:kh + x*sh, y*sw:kw + y*sw, :],\n axis=(1, 2))\n return output", "def pool(images, kernel_shape, stride, mode='max'):\n m = images.shape[0]\n h = images.shape[1]\n w = images.shape[2]\n c = images.shape[3]\n kh = kernel_shape[0]\n kw = kernel_shape[1]\n sh = stride[0]\n sw = stride[1]\n\n output_h = int(1 + ((h - kh) / sh))\n output_w = int(1 + ((w - kw) / sw))\n\n out = np.zeros((m, output_h, output_w, c))\n\n image = np.arange(m)\n\n for i in range(output_h):\n for j in range(output_w):\n if mode == 'max':\n out[image, i, j] = (np.max(images[image,\n i * sh:((i * sh) + kh),\n j * sw:((j * sw) + kw)],\n axis=(1, 2)))\n elif mode == 'avg':\n out[image, i, j] = (np.mean(images[image,\n i * sh:((i * sh) + kh),\n j * sw:((j * sw) + kw)],\n axis=(1, 2)))\n return out", "def kmeans_005():\n n_patches_vals = [500000, 600000, 700000]\n include_test_images = [False, True]\n\n scores = []\n for n_patches in n_patches_vals:\n for incl in include_test_images:\n s = 15\n crop = 150\n n_centroids = 1600\n rf_size = 5\n logger.info(\"Training with n_patches {}, with test images {}\".format(n_patches, incl))\n\n train_x_crop_scale = CropScaleImageTransformer(training=True,\n result_path='data/data_train_crop_{}_scale_{}.npy'.format(crop, s),\n crop_size=crop,\n scaled_size=s,\n n_jobs=-1,\n memmap=True)\n test_x_crop_scale = CropScaleImageTransformer(training=False,\n result_path='data/data_test_crop_{}_scale_{}.npy'.format(crop, s),\n crop_size=crop,\n scaled_size=s,\n n_jobs=-1,\n memmap=True)\n\n kmeans_generator = KMeansFeatureGenerator(n_centroids=n_centroids,\n rf_size=rf_size,\n result_path='data/mdl_kmeans_005_patches_{}_test{}'.format(n_patches, incl),\n n_iterations=20,\n n_jobs=-1,)\n\n patch_extractor = models.KMeansFeatures.PatchSampler(n_patches=n_patches,\n patch_size=rf_size,\n n_jobs=-1)\n images = train_x_crop_scale.transform()\n if incl:\n test_images = test_x_crop_scale.transform()\n images = np.vstack([images, test_images])\n logger.info(\"Extracting patches from images ndarray shape: {}\".format(images.shape))\n\n patches = patch_extractor.transform(images)\n logger.info(\"Patches ndarray shape: {}\".format(patches.shape))\n\n kmeans_generator.fit(patches)\n\n del patches\n gc.collect()\n\n # Reload the original images\n images = train_x_crop_scale.transform()\n logger.info(\"Generating features on images ndarray shape: {}\".format(images.shape))\n train_x = kmeans_generator.transform(images, save_to_file='data/data_kmeans_features_005_patches_{}_test_{}.npy'.format(n_patches, incl), memmap=True)\n train_y = classes.train_solutions.data\n # Unload some objects\n del images\n gc.collect()\n\n wrapper = ModelWrapper(models.Ridge.RidgeRFEstimator, {'alpha': 500, 'n_estimators': 250}, n_jobs=-1)\n wrapper.cross_validation(train_x, train_y, n_folds=2, parallel_estimator=True)\n\n score = (n_patches, incl, wrapper.cv_scores)\n logger.info(\"Score: {}\".format(score))\n scores.append(score)\n\n del wrapper\n gc.collect()", "def _batch_to_patches(batch, patches_per_image, patch_size):\n\n def py_func_random_patches(batch):\n \"\"\"Numpy wrapper.\"\"\"\n batch_size, height, width, channels = batch.shape\n patch_count = patches_per_image * batch_size\n hs = patch_size // 2\n # Randomly pick patches.\n patch_id, y, x, chan = np.ogrid[0:patch_count, -hs:hs + 1, -hs:hs + 1, 0:3]\n img_id = patch_id // patches_per_image\n # pylint: disable=g-no-augmented-assignment\n # Need explicit addition for broadcast to work properly.\n y = y + np.random.randint(hs, height - hs, size=(patch_count, 1, 1, 1))\n x = x + np.random.randint(hs, width - hs, size=(patch_count, 1, 1, 1))\n # pylint: enable=g-no-augmented-assignment\n idx = ((img_id * height + y) * width + x) * channels + chan\n patches = batch.flat[idx]\n return patches\n\n patches = script_ops.py_func(\n py_func_random_patches, [batch], batch.dtype, stateful=False)\n return patches", "def batch_image_mask(patch_R, patch_C):\n\n conf = configparser.ConfigParser()\n conf.read(os.path.join(current_path, \"..\", \"sys.ini\"))\n image_dir = conf.get(\"UTILS_MASK\", \"IMAGE_DIR\")\n images = glob.glob(os.path.join(image_dir, \"*.png\"))\n images = sorted(images)\n\n info_logger = get_logger(level=\"info\")\n error_logger = get_logger(level=\"error\")\n\n DEVICE = \"/gpu:1\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n with tf.device(DEVICE):\n seg_model = load_maskrcnn_model()\n for image in images:\n try:\n image_mask(image, patch_R, patch_C, seg_model)\n info_logger.info(f\"Create mask {image} success\")\n except Exception as e:\n error_logger.error(f\"Create mask {image} error\", exc_info=True)", "def image_mask(image, patch_R, patch_C, seg_model):\n\n im = Image.open(image)\n im_name = os.path.basename(image).split('.')[0]\n im_width, im_height = im.width, im.height\n\n N = patch_R // patch_C\n\n W_ps_NI = im_width // patch_C # 31782 // 256 = 124\n # W_ps_NR = slide_width % patch_C # 31782 % 256 = 38\n H_ps_NI = im_height // patch_R # 24529 // 1024 = 23\n # H_ps_NR = slide_height % patch_R # 24529 % 1024 = 977\n\n cell_ratio = 0.85 # the threshold that decide the patch is background or not\n\n output_dir = os.path.join(current_path, \"..\", \"output\", \"output_mask\")\n if not os.path.isdir(output_dir): os.makedirs(output_dir)\n\n np_im = np.array(im)[:, :, 0:3] # exclude alpha\n for w in range(W_ps_NI):\n for h in range(H_ps_NI):\n subHIC = np_im[h * patch_R: (h+1) * patch_R, w * patch_C:(w+1) * patch_C, :]\n\n # rgb three channels value that >200 and <40 are ignored segment\n rgb_s = (abs(subHIC[:, :, 0] - 120) >= 80) & (abs(subHIC[:, :, 1] - 120) >= 80) & (\n abs(subHIC[:, :, 2] - 120) >= 80) # >200 <40\n\n if np.sum(rgb_s) <= (patch_R * patch_C) * cell_ratio:\n # segment\n subHIC = np.where(rgb_similarity(subHIC, 15, 195), 250, subHIC)\n # adjust equalization histogram and adjust brightness\n for k in range(subHIC.shape[2]):\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(N * 4, 4))\n subHIC[:, :, k] = clahe.apply(subHIC[:, :, k])\n subHIC = exposure.adjust_gamma(subHIC, gamma=1.5)\n subHIC = subHIC.reshape(N, patch_C, patch_C, 3)\n\n subHIC = subHIC.reshape(N, patch_C, patch_C, 3)\n allmask_prob_list = maskrcnn_detection(seg_model, subHIC)\n\n for i in range(len(allmask_prob_list)):\n for layer in range(allmask_prob_list[i].shape[2]):\n image, cnts, hierarchy = cv2.findContours(allmask_prob_list[i][:, :, layer],\n cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n np_im[h * patch_R + i * patch_C: h * patch_R + (i + 1) * patch_C, w * patch_C:(w + 1) * patch_C,\n :] = cv2.drawContours(np_im[h * patch_R + i*patch_C: h*patch_R+(i+1)*patch_C, w * patch_C:(w + 1) * patch_C, :],\n cnts, -1, (0, 255, 0), 1)\n\n # np_im[h * patch_R + i*patch_C: h*patch_R+(i+1)*patch_C, w * patch_C:(w + 1) * patch_C, :] = subHIC[i]\n\n # plt.savefig(os.path.join(output_dir, f\"{im_name}w{w}h{h}N{i}.png\"))\n\n io.imsave(os.path.join(output_dir, f\"{im_name}.png\"), np_im)", "def _mask_and_avg(values, padding_mask):\n\n dec_lens = tf.reduce_sum(padding_mask, axis=1) # shape batch_size. float32\n values_per_step = [v * padding_mask[:,dec_step] for dec_step,v in enumerate(values)]\n values_per_ex = sum(values_per_step)/dec_lens # shape (batch_size); normalized value for each batch member\n return tf.reduce_mean(values_per_ex) # overall average", "def make_average(self, arr):\n\n if not self.degen:\n self.get_degen()\n\n nkpt, nband = arr.shape[-2:]\n \n for ikpt in range(nkpt):\n for group in self.degen[ikpt]:\n average = copy(arr[...,ikpt,group[0][1]])\n for ispin, iband in group[1:]:\n average += arr[...,ikpt,iband]\n \n average /= len(group)\n for ispin, iband in group:\n arr[...,ikpt,iband] = average\n \n return arr", "def extract_patches(image, patchshape, overlap_allowed=0.1, cropvalue=None, crop_fraction_allowed=0.1):\r\n jump_cols = int(patchshape[1] * overlap_allowed)\r\n jump_rows = int(patchshape[0] * overlap_allowed)\r\n\r\n # Restrict ourselves to the rectangle containing non-cropped pixels\r\n if cropvalue is not None:\r\n rows, cols = np.where(image != cropvalue)\r\n rows.sort()\r\n cols.sort()\r\n active = image[rows[0]:rows[-1], cols[0]:cols[-1]]\r\n else:\r\n active = image\r\n\r\n rowstart = 0\r\n colstart = 0\r\n\r\n # Array tracking where we've already taken patches.\r\n covered = np.zeros(active.shape, dtype=bool)\r\n patches = []\r\n regions = []\r\n while rowstart <= active.shape[0] - patchshape[0]:\r\n # Record whether or not e've found a patch in this row,\r\n # so we know whether to skip ahead.\r\n got_a_patch_this_row = False\r\n colstart = 0\r\n while colstart <= active.shape[1] - patchshape[1]:\r\n # Slice tuple indexing the region of our proposed patch\r\n region = (slice(rowstart, rowstart + patchshape[0]),\r\n slice(colstart, colstart + patchshape[1]))\r\n\r\n # The actual pixels in that region.\r\n patch = active[region]\r\n\r\n # The current mask value for that region.\r\n cover_p = covered[region]\r\n if cropvalue is None or \\\r\n frac_eq_to(patch, cropvalue) <= crop_fraction_allowed and \\\r\n frac_eq_to(cover_p, True) <= overlap_allowed:\r\n # Accept the patch.\r\n patches.append(patch)\r\n regions.append(region)\r\n # Mask the area.\r\n covered[region] = True\r\n\r\n # Jump ahead in the x direction.\r\n colstart += jump_cols\r\n got_a_patch_this_row = True\r\n # print \"Got a patch at %d, %d\" % (rowstart, colstart)\r\n else:\r\n # Otherwise, shift window across by one pixel.\r\n colstart += 1\r\n\r\n if got_a_patch_this_row:\r\n # Jump ahead in the y direction.\r\n rowstart += jump_rows\r\n else:\r\n # Otherwise, shift the window down by one pixel.\r\n rowstart += 1\r\n\r\n # Return a 3D array of the patches with the patch index as the first\r\n # dimension (so that patch pixels stay contiguous in memory, in a\r\n # C-ordered array).\r\n return np.concatenate([pat[np.newaxis, ...] for pat in patches], axis=0),regions", "def get_identical_patches(imgs, patch_size):\n ih, iw = imgs[0].shape[:2]\n tp = patch_size\n ix = np.random.randint(0, iw - patch_size)\n iy = np.random.randint(0, ih - patch_size)\n imgs = []\n for i in range(len(imgs)):\n imgs.append(imgs[i][iy:iy + tp, ix:ix + tp, :])\n return imgs", "def make_vector_patches(data, nbatches, batch_size, field_size):\n patches = np.zeros((nbatches, batch_size, field_size ** 2))\n for i in xrange(nbatches):\n xs = np.zeros((batch_size, field_size ** 2))\n for j in xrange(batch_size):\n RAND_im = np.random.randint(data['images'].shape[0])\n RAND_x = np.random.randint(data['images'].shape[1] - field_size)\n RAND_y = np.random.randint(data['images'].shape[2] - field_size)\n xs[j, :] = np.reshape(data['images'][RAND_im, RAND_x:RAND_x\n + field_size, RAND_y:RAND_y + field_size, :],\n (field_size ** 2,))\n patches[i] = xs\n return patches", "def mask_the_images(working_path,set_name):\n\n file_list=glob('/media/talhassid/My Passport/haimTal/test_images_0b8afe447b5f1a2c405f41cf2fb1198e.npy')\n out_images = [] #final set of images for all patients\n for fname in file_list:\n out_images_per_patient = []\n print (\"working on file \", fname)\n imgs_to_process = np.load(fname.replace(\"lungmask\",\"images\")) # images of one patient\n masks = np.load(fname)\n for i in range(len(imgs_to_process)):\n mask = masks[i]\n img = imgs_to_process[i]\n new_size = [512,512] # we're scaling back up to the original size of the image\n img= mask*img # apply lung mask\n #\n # renormalizing the masked image (in the mask region)\n #\n new_mean = np.mean(img[mask>0])\n new_std = np.std(img[mask>0])\n #\n # Pulling the background color up to the lower end\n # of the pixel range for the lungs\n #\n old_min = np.min(img) # background color\n img[img==old_min] = new_mean-1.2*new_std # resetting backgound color\n img = img-new_mean\n img = img/new_std\n #make image bounding box (min row, min col, max row, max col)\n labels = measure.label(mask)\n regions = measure.regionprops(labels)\n #\n # Finding the global min and max row over all regions\n #\n min_row = 512\n max_row = 0\n min_col = 512\n max_col = 0\n for prop in regions:\n B = prop.bbox\n if min_row > B[0]:\n min_row = B[0]\n if min_col > B[1]:\n min_col = B[1]\n if max_row < B[2]:\n max_row = B[2]\n if max_col < B[3]:\n max_col = B[3]\n width = max_col-min_col\n height = max_row - min_row\n if width > height:\n max_row=min_row+width\n else:\n max_col = min_col+height\n #\n # cropping the image down to the bounding box for all regions\n # (there's probably an skimage command that can do this in one line)\n #\n img = img[min_row:max_row,min_col:max_col]\n mask = mask[min_row:max_row,min_col:max_col]\n if max_row-min_row <5 or max_col-min_col<5: # skipping all images with no god regions\n pass\n else:\n # moving range to -1 to 1 to accomodate the resize function\n mean = np.mean(img)\n img = img - mean\n min = np.min(img)\n max = np.max(img)\n img = img/(max-min)\n new_img = resize(img,[512,512], mode='constant')\n out_images_per_patient.append(new_img)\n\n id = re.sub(r'.*_images_(.*)\\.npy',r'\\1',fname)\n patient_images_and_id = (out_images_per_patient,id)\n out_images.append(patient_images_and_id)\n print (\"Delete files: {} \\n\\t {} \".format(fname,re.sub(\"lungmask\",\"images\",fname)))\n os.remove(fname)\n os.remove(fname.replace(\"images\",\"lungmask\")) # images of one patient\n\n\n np.save(working_path+\"{}Images.npy\".format(set_name),out_images)", "def patch(imgs, big):\r\n bg = Background(big.size[1], big.size[0], size=imgs[0].size)\r\n a = np.asarray(big, dtype=np.float64)\r\n means = [np.asarray(img, dtype=np.float64).mean(axis=(0,1)) for img in imgs if img.mode=='RGB']\r\n for i in range(big.size[1]):\r\n for j in range(big.size[0]):\r\n p = a[i, j, :]\r\n k = np.argmin([LA.norm(p-m) for m in means])\r\n bg.paste(imgs[k], (j, i))\r\n return bg.image", "def apply_mask(components):\n img = components[10]\n mask = components[11]\n if mask is not None:\n img[:, :, 0] = img[:, :, 0] * mask\n img[:, :, 1] = img[:, :, 1] * mask\n img[:, :, 2] = img[:, :, 2] * mask\n img[img == 0] = 128\n return components", "def patch(self, patch_center, patch_size, expand_patch=True):\n # x-axis corresponds to the columns of the image\n # y-axis corresponds to the rows of the image\n padding_x = int(patch_size[1]/2)\n padding_y = int(patch_size[0]/2)\n\n min_x = patch_center[0, 0] - padding_x\n max_x = patch_center[0, 0] + padding_x + patch_size[1] % 2\n min_y = patch_center[1, 0] - padding_y\n max_y = patch_center[1, 0] + padding_y + patch_size[0] % 2\n\n # Initialize the patch with 0.0\n patch = np.zeros(patch_size + self._image.shape[2:], dtype=np.float32)\n\n # Save some space by creating local copies with single letter names\n h, w = self.height, self.width\n\n # If the patch is inside the image boundaries return it as it is\n if min_x >= 0 and min_y >= 0 and max_x <= w and max_y <= h:\n patch[:, :] = self._image[min_y:max_y, min_x:max_x]\n\n # otherwise copy part (or nothing) from the image into the empty patch\n elif expand_patch:\n p_min_x = min(w, max(0, min_x))\n p_max_x = max(0, min(w, max_x))\n p_min_y = min(h, max(0, min_y))\n p_max_y = max(0, min(h, max_y))\n\n s_min_x = min(patch_size[1], max(0, 0 - min_x))\n s_max_x = max(0, min(patch_size[1], patch_size[1] + w - max_x))\n s_min_y = min(patch_size[0], max(0, 0 - min_y))\n s_max_y = max(0, min(patch_size[0], patch_size[0] + h - max_y))\n\n patch[s_min_y:s_max_y, s_min_x:s_max_x] = \\\n self._image[p_min_y:p_max_y, p_min_x:p_max_x]\n else:\n patch.fill(-1.)\n\n return patch", "def prepare_data(labels, classes, patch_shape):\n locations = [set() for _ in range(len(classes) - 1)]\n w, h, d, _ = patch_shape\n bounds = np.array(labels.shape) - np.array((w, h, d))\n for x, y, z in product(range(0, bounds[0], 3), range(0, bounds[1], 3), range(0, bounds[2], 1)):\n mx, my, mz = x + w // 2, y + h // 2, z + d // 2\n if labels[mx, my, mz] == 0: continue\n #Get data, and compute active voxel ratio\n data = labels[x:x + w, y:y + h, z:z + d]\n active = np.mean(data == labels[mx, my, mz])\n #Bin or resample\n index, = np.searchsorted(classes, [active], side = 'left')\n locations[index - 1].add((x, y, z))\n return equalize(locations)", "def eval_on_images(self, shading_image_arr, pixel_labels_dir, thres_list, photo_id, bl_filter_size, mode):\n\n shading_image_grayscale = shading_image_arr\n shading_image_grayscale[shading_image_grayscale < 1e-4] = 1e-4\n shading_image_grayscale = np.log(shading_image_grayscale)\n\n shading_gradmag = saw_utils.compute_gradmag(shading_image_grayscale)\n shading_gradmag = np.abs(shading_gradmag)\n\n if bl_filter_size:\n shading_gradmag_max = maximum_filter(shading_gradmag, size=bl_filter_size)\n\n\n # We have the following ground truth labels:\n # (0) normal/depth discontinuity non-smooth shading (NS-ND)\n # (1) shadow boundary non-smooth shading (NS-SB)\n # (2) smooth shading (S)\n # (100) no data, ignored\n y_true = saw_utils.load_pixel_labels(pixel_labels_dir=pixel_labels_dir, photo_id=photo_id)\n\n # Add-------------------------------------\n\n # diffuclut and harder dataset\n srgb_img = saw_utils.load_img_arr(photo_id)\n srgb_img = np.mean(srgb_img, axis = 2)\n img_gradmag = saw_utils.compute_gradmag(srgb_img)\n\n smooth_mask = (y_true == 2)\n average_gradient = np.zeros_like(img_gradmag)\n # find every connected component\n labeled_array, num_features = label(smooth_mask)\n for j in range(1, num_features+1):\n # for each connected component, compute the average image graident for the region\n avg = np.mean(img_gradmag[labeled_array == j])\n average_gradient[labeled_array == j] = avg\n\n average_gradient = np.ravel(average_gradient)\n # Add-------------------------------------\n \n y_true = np.ravel(y_true)\n ignored_mask = y_true == 100\n\n # If we don't have labels for this photo (so everything is ignored), return\n # None\n if np.all(ignored_mask):\n print(\"no labels\")\n return [None] * len(thres_list)\n\n ret = []\n for thres in thres_list:\n y_pred = (shading_gradmag < thres).astype(int)\n y_pred_max = (shading_gradmag_max < thres).astype(int)\n y_pred = np.ravel(y_pred)\n y_pred_max = np.ravel(y_pred_max)\n # Note: y_pred should have the same image resolution as y_true\n assert y_pred.shape == y_true.shape\n\n # confusion_matrix = saw_utils.grouped_confusion_matrix(y_true[~ignored_mask], y_pred[~ignored_mask], y_pred_max[~ignored_mask])\n if mode < 0.1:\n confusion_matrix = saw_utils.grouped_confusion_matrix(y_true[~ignored_mask], y_pred[~ignored_mask], y_pred_max[~ignored_mask])\n else:\n confusion_matrix = saw_utils.grouped_weighted_confusion_matrix(y_true[~ignored_mask], y_pred[~ignored_mask], y_pred_max[~ignored_mask], average_gradient[~ignored_mask])\n\n ret.append(confusion_matrix)\n\n return ret", "def create_patches(self, image):\n images = tf.expand_dims(image, axis=0)\n patches = tf.extract_image_patches(\n images,\n ksizes=[1, self.patch_h, self.patch_w, 1],\n strides=[1, self.strides_rows, self.strides_cols, 1],\n rates=[1, 1, 1, 1],\n padding='VALID',\n name=None\n )\n patches = tf.reshape(\n patches,\n (self.n_rows * self.n_cols, self.patch_h,\n self.patch_w, self.col_channels))\n return patches", "def patches(self, patch_centers, patch_size):\n assert patch_centers.shape[0] > patch_centers.shape[1]\n # x-axis corresponds to the columns of the image\n # y-axis corresponds to the rows of the image\n padding_x = int(patch_size[1]/2)\n padding_y = int(patch_size[0]/2)\n\n min_x = patch_centers[:, 0] - padding_x\n max_x = patch_centers[:, 0] + padding_x + patch_size[1] % 2\n min_y = patch_centers[:, 1] - padding_y\n max_y = patch_centers[:, 1] + padding_y + patch_size[0] % 2\n\n # Save some space by creating local copies with single letter names\n h, w = self.height, self.width\n # Get the patch_centers that are inside the image boundaries\n patches_inside_boundaries = np.logical_and(\n np.logical_and(min_x >= 0, min_y >= 0),\n np.logical_and(max_x <= w, max_y <= h)\n )\n\n # If a single patch is outside the boundaries return None to avoid\n # useless computations\n if ~np.all(patches_inside_boundaries):\n return None\n\n # Initialize the patch with 0.0\n N = patch_centers.shape[0]\n patch_shape = (N,) + patch_size + self._image.shape[2:]\n patches = np.ones(patch_shape, dtype=np.float32)\n\n idxs = np.arange(N)\n for pi in idxs[patches_inside_boundaries]:\n patches[pi] = self._image[min_y[pi]:max_y[pi], min_x[pi]:max_x[pi]]\n\n return patches", "def combine_images(args):\n\n # Read all images into a cube (TODO: think about the RAM)\n with fits.open(args.input[0]) as im0:\n lx, ly = im0[0].data.shape\n ref_hdr = im0[0].header\n\n headers = [fits.open(im_name)[0].header for im_name in args.input]\n cube = numpy.ma.zeros((len(args.input), lx, ly))\n cube.mask = numpy.zeros_like(cube.data)\n for ii, im_name in enumerate(args.input):\n with astroim.Astroim(im_name) as im:\n cube.data[ii, :,:] = im.chips[0].data\n if im.chips[0].mask is not None:\n cube.mask[ii,:,:] = im.chips[0].mask\n\n # Scale images\n scale_functions = {\"median\": numpy.ma.median,\n \"mean\": numpy.ma.mean,\n \"mode\": scipy.stats.mstats.mode,\n \"none\": lambda x: 1}\n for ii, im_name in enumerate(args.input):\n func = scale_functions[args.scale.lower()]\n cube[ii,:,:] /= func(cube[ii,:,:])\n\n\n # Reproject all images to the ref_hdr\n for ii, _ in enumerate(args.input):\n if ii == 0:\n continue\n cube.data[ii,:,:], footprint = reproject_interp((cube.data[ii,:,:], headers[ii]), ref_hdr)\n cube.mask[ii,:,:], footprint = reproject_interp((cube.mask[ii,:,:], headers[ii]), ref_hdr)\n #whr = numpy.isnan(cube.data[ii,:,:])\n #cube.mask[ii,:,:][whr] = True\n\n # Do average\n average_functions = {\"median\": numpy.ma.median, \"mean\": numpy.ma.mean, \"sum\": numpy.ma.sum}\n func = average_functions[args.average.lower()]\n final_image = func(cube, axis=0)\n ref_hdr[\"NCOMBINE\"] = len(args.input)\n\n mask_name = utilities.replace_extension(args.output, \".fits.msk\")\n mask_name_header = utilities.replace_extension(os.path.basename(args.output), \".fits.msk\")\n ref_hdr[\"MASK\"] = mask_name_header\n fits.writeto(args.output, final_image.data, ref_hdr, clobber=True )\n fits.writeto(mask_name, numpy.array(final_image.mask, dtype=int), clobber=True)\n\n return args.output", "def _overlay_bubble_means(self, img, means):\n for (i, j) in product(*map(range, self.size)):\n i0, i1, j0, j1 = self.coords[i, j, :]\n img[i0:i1, j0:j1] = means[i, j]\n\n return img", "def set_avg_rgb_group(poly_cstr, lyrsql, updatesql, rast_cstr):\n ds = gdal.Open(rast_cstr)\n georef = ds.GetGeoTransform()\n rgb = ds.ReadAsArray()\n assert rgb.shape[0] == 3\n img_shape = rgb.shape[1:]\n extent = get_extent(georef, img_shape)\n LOG.info(\"Extent: %s\", extent)\n vec_ds, lyr = open(poly_cstr, layersql=lyrsql, extent=extent, open_for_update=True)\n ldefn = lyr.GetLayerDefn()\n int_attr_name = ldefn.GetFieldDefn(0).name\n id_attr_name = ldefn.GetFieldDefn(1).name\n mask = just_burn_layer(lyr, georef, img_shape, attr=int_attr_name, dtype=np.int32, all_touched=False)\n LOG.info(\"Done burning - setting attr in %d features\", lyr.GetFeatureCount())\n n_ok = 0\n for n, feat in enumerate(lyr):\n if n % 100 == 0:\n LOG.info(\"Done: %d, ok: %d\", n, n_ok)\n int_id = feat[int_attr_name]\n group_id = feat[id_attr_name]\n I, J = np.where(mask == int_id)\n if I.size > 0:\n n_ok += 1\n r = int(round(np.sqrt(((rgb[0, I, J].astype(np.float64) ** 2).mean()))))\n g = int(round(np.sqrt(((rgb[1, I, J].astype(np.float64) ** 2).mean()))))\n b = int(round(np.sqrt(((rgb[2, I, J].astype(np.float64) ** 2).mean()))))\n if n_ok % 100 == 0:\n LOG.info(\"size: %d, sq-mean was %d, while raw mean red is: %.1f\",\n I.size, r, rgb[0, I, J].astype(np.float64).mean())\n rgb_str = '{},{},{}'.format(r, g, b)\n vec_ds.ExecuteSQL(updatesql.format(rgb_str, group_id))", "def average(img, size=3):\n \n size = int(size)\n kernel = np.ones((size,size)) / float(size**2)\n\n return ndi.convolve(img,kernel)", "def patch_image(image, bboxes=None, offset_height=0, offset_width=0,\n target_height=None, target_width=None):\n # Make this function safe with respect to senseless inputs (i.e\n # having an offset_height that's larger than tf.shape(image)[0], etc.)\n # As of now we only use it inside random_patch, which already makes sure\n # the arguments are legal.\n im_shape = tf.shape(image)\n if target_height is None:\n target_height = (im_shape[0] - offset_height - 1)\n if target_width is None:\n target_width = (im_shape[1] - offset_width - 1)\n\n new_image = tf.image.crop_to_bounding_box(\n image,\n offset_height=offset_height, offset_width=offset_width,\n target_height=target_height, target_width=target_width\n )\n patch_shape = tf.shape(new_image)\n\n # Return if we didn't have bboxes.\n if bboxes is None:\n # Resize the patch to the original image's size. This is to make sure\n # we respect restrictions in image size in the models.\n new_image_resized = tf.image.resize_images(\n new_image, im_shape[:2],\n method=tf.image.ResizeMethod.BILINEAR\n )\n return_dict = {'image': new_image_resized}\n return return_dict\n\n # Now we will remove all bboxes whose centers are not inside the cropped\n # image.\n\n # First get the x and y coordinates of the center of each of the\n # bboxes.\n bboxes_center_x = tf.reduce_mean(\n tf.concat(\n [\n # bboxes[:, 0] gets a Tensor with shape (20,).\n # We do this to get a Tensor with shape (20, 1).\n bboxes[:, 0:1],\n bboxes[:, 2:3]\n ],\n axis=1\n )\n )\n bboxes_center_y = tf.reduce_mean(\n tf.concat(\n [\n bboxes[:, 1:2],\n bboxes[:, 3:4]\n ],\n axis=1\n ),\n axis=1\n )\n\n # Now we get a boolean tensor holding for each of the bboxes' centers\n # wheter they are inside the patch.\n center_x_is_inside = tf.logical_and(\n tf.greater(\n bboxes_center_x,\n offset_width\n ),\n tf.less(\n bboxes_center_x,\n tf.add(target_width, offset_width)\n )\n )\n center_y_is_inside = tf.logical_and(\n tf.greater(\n bboxes_center_y,\n offset_height\n ),\n tf.less(\n bboxes_center_y,\n tf.add(target_height, offset_height)\n )\n )\n center_is_inside = tf.logical_and(\n center_x_is_inside,\n center_y_is_inside\n )\n\n # Now we mask the bboxes, removing all those whose centers are outside\n # the patch.\n masked_bboxes = tf.boolean_mask(bboxes, center_is_inside)\n # We move the bboxes to the right place, clipping them if\n # necessary.\n new_bboxes_unclipped = tf.concat(\n [\n tf.subtract(masked_bboxes[:, 0:1], offset_width),\n tf.subtract(masked_bboxes[:, 1:2], offset_height),\n tf.subtract(masked_bboxes[:, 2:3], offset_width),\n tf.subtract(masked_bboxes[:, 3:4], offset_height),\n ],\n axis=1,\n )\n # Finally, we clip the boxes and add back the labels.\n new_bboxes = tf.concat(\n [\n tf.to_int32(\n clip_boxes(\n new_bboxes_unclipped,\n imshape=patch_shape[:2]\n ),\n ),\n masked_bboxes[:, 4:]\n ],\n axis=1\n )\n # Now resize the image to the original size and adjust bboxes accordingly\n new_image_resized = tf.image.resize_images(\n new_image, im_shape[:2],\n method=tf.image.ResizeMethod.BILINEAR\n )\n # adjust_bboxes requires height and width values with dtype=float32\n new_bboxes_resized = adjust_bboxes(\n new_bboxes,\n old_height=tf.to_float(patch_shape[0]),\n old_width=tf.to_float(patch_shape[1]),\n new_height=tf.to_float(im_shape[0]),\n new_width=tf.to_float(im_shape[1])\n )\n\n # Finally, set up the return dict, but only update the image and bboxes if\n # our patch has at least one bbox in it.\n update_condition = tf.greater_equal(\n tf.shape(new_bboxes_resized)[0],\n 1\n )\n return_dict = {}\n return_dict['image'] = tf.cond(\n update_condition,\n lambda: new_image_resized,\n lambda: image\n )\n return_dict['bboxes'] = tf.cond(\n update_condition,\n lambda: new_bboxes_resized,\n lambda: bboxes\n )\n return return_dict", "def dilationPatches2(rawPatches, dilationIter=20, borderWidth=1): # pixel width of the border after dilation\r\n\r\n total_area = ni.binary_dilation(rawPatches, iterations=dilationIter).astype(np.int)\r\n patchBorder = total_area - rawPatches\r\n\r\n # thinning patch borders\r\n patchBorder = sm.skeletonize(patchBorder)\r\n\r\n # thickening patch borders\r\n if borderWidth > 1:\r\n patchBorder = ni.binary_dilation(patchBorder, iterations=borderWidth - 1).astype(np.int)\r\n\r\n # genertating new patches\r\n newPatches = np.multiply(-1 * (patchBorder - 1), total_area)\r\n\r\n # removing small edges\r\n labeledPatches, patchNum = ni.label(newPatches)\r\n\r\n newPatches2 = np.zeros(newPatches.shape, dtype=np.int)\r\n\r\n for i in range(1, patchNum + 1):\r\n currPatch = np.zeros(labeledPatches.shape, dtype=np.int)\r\n currPatch[labeledPatches == i] = 1\r\n currPatch[labeledPatches != i] = 0\r\n\r\n if (np.sum(np.multiply(currPatch, rawPatches)[:]) > 0):\r\n # currPatch = ni.binary_closing(currPatch,\r\n # structure = np.ones((borderWidth+2,borderWidth+2))).astype(np.int)\r\n newPatches2[currPatch == 1] = 1\r\n\r\n return newPatches2", "def _mask_and_avg(values, padding_mask):\n\tdec_lens = torch.sum(padding_mask,dim=1)\n\tlosses = torch.stack(values, dim=1)\n\tlosses = losses * padding_mask\n\tvalues_per_ex = torch.sum(losses, dim=1)/dec_lens\n\treturn torch.sum(values_per_ex)", "def blending_example2():\n pic_earth = read_image(relpath(\"./externals/pic_earth.jpg\"), 2)\n pic_asteroid = read_image(relpath(\"./externals/pic_asteroid.jpg\"), 2)\n mask = read_image(relpath(\"./externals/mask_asteroid.jpg\"), 1)\n # making the mask binary (normalizing 2 original values)\n mask = strech_helper(mask).astype(np.bool)\n [R1, G1, B1] = np.dsplit(pic_earth, pic_earth.shape[2])\n [R2, G2, B2] = np.dsplit(pic_asteroid, pic_asteroid.shape[2])\n R1 = np.reshape(R1, (1024,1024))\n R2 = np.reshape(R2, (1024,1024))\n G1 = np.reshape(G1, (1024,1024))\n G2 = np.reshape(G2, (1024,1024))\n B1 = np.reshape(B1, (1024,1024))\n B2 = np.reshape(B2, (1024,1024))\n\n blend1 = pyramid_blending(R2, R1, mask, 3, 3, 3)\n blend2 = pyramid_blending(G2, G1, mask, 3, 3, 3)\n blend3 = pyramid_blending(B2, B1, mask, 3, 3, 3)\n\n blend1 = np.reshape(blend1, (blend1.shape[0], blend1.shape[1], 1))\n blend2 = np.reshape(blend2, (blend2.shape[0], blend3.shape[1], 1))\n blend3 = np.reshape(blend3, (blend3.shape[0], blend3.shape[1], 1))\n\n new_pic = np.concatenate((blend1, blend2, blend3), axis=2)\n # plotting the images\n fig = plt.figure()\n ax1 = fig.add_subplot(221)\n ax2 = fig.add_subplot(222)\n ax3 = fig.add_subplot(223)\n ax4 = fig.add_subplot(224)\n ax1.imshow(pic_earth)\n ax2.imshow(pic_asteroid)\n ax3.imshow(mask, cmap='gray')\n ax4.imshow(new_pic)\n plt.show()\n\n return pic_earth, pic_asteroid, mask, new_pic", "def slice_patches(data, wanted_height: int, wanted_width: int):\n patches = []\n for _ in range(len(data)):\n current_height = data[_].shape[0]\n current_width = data[_].shape[1]\n\n # If patches fit image perfectly, no overflow handling required\n if PATCHES * wanted_height == current_height and PATCHES * wanted_width == current_width:\n fitting_patches_height = PATCHES\n step_size_height = wanted_height\n\n fitting_patches_width = PATCHES\n step_size_width = wanted_width\n\n for nmr_patch_height in range(fitting_patches_height):\n for nmr_patch_width in range(fitting_patches_width):\n patch = [\n data[_][i][nmr_patch_width * step_size_width:nmr_patch_width * step_size_width + wanted_width]\n for i in\n range(nmr_patch_height * step_size_height, nmr_patch_height * step_size_height + wanted_height)]\n patches.append(patch)\n\n # If patches don't fit along height, y-axis\n elif PATCHES * wanted_height > current_height and PATCHES * wanted_width == current_width:\n fitting_patches_height = PATCHES - 1 # Last patch may not fit with the same step size\n overflow_height = PATCHES * wanted_height - current_height\n overlap_height = overflow_height // fitting_patches_height\n step_size_height = wanted_height - overlap_height\n\n fitting_patches_width = PATCHES\n step_size_width = wanted_width\n\n for nmr_patch_height in range(fitting_patches_height):\n for nmr_patch_width in range(fitting_patches_width):\n patch = [\n data[_][i][nmr_patch_width * step_size_width:nmr_patch_width * step_size_width + wanted_width]\n for i in\n range(nmr_patch_height * step_size_height, nmr_patch_height * step_size_height + wanted_height)]\n patches.append(patch)\n\n # Patches, which may not fit with same step size may overlap more along y axis\n for nmr_patch_width in range(fitting_patches_width):\n patch = [\n data[_][i][nmr_patch_width * step_size_width:nmr_patch_width * step_size_width + wanted_width] for i\n in range(-wanted_height, 0)]\n patches.append(patch)\n\n # If patches don't fit along width, x axis\n elif PATCHES * wanted_height == current_height and PATCHES * wanted_width > current_width:\n fitting_patches_height = PATCHES\n step_size_height = wanted_height\n\n fitting_patches_width = PATCHES - 1\n overflow_width = PATCHES * wanted_width - current_width\n overlap_width = overflow_width // fitting_patches_width\n step_size_width = wanted_width - overlap_width\n\n for nmr_patch_height in range(fitting_patches_height):\n for nmr_patch_width in range(fitting_patches_width):\n patch = [\n data[_][i][nmr_patch_width * step_size_width:nmr_patch_width * step_size_width + wanted_width]\n for i in\n range(nmr_patch_height * step_size_height, nmr_patch_height * step_size_height + wanted_height)]\n patches.append(patch)\n\n # Patch which may not fit with same step size, overlaps more along x axis\n patch = [data[_][i][-wanted_width:] for i in\n range(nmr_patch_height * step_size_height,\n nmr_patch_height * step_size_height + wanted_height)]\n patches.append(patch)\n\n # If patches don't fit along neither height nor width\n elif PATCHES * wanted_height > current_height and PATCHES * wanted_width > current_width:\n fitting_patches_height = PATCHES - 1 # Last patch may not fit with the same step size\n overflow_height = PATCHES * wanted_height - current_height\n overlap_height = overflow_height // fitting_patches_height\n step_size_height = wanted_height - overlap_height\n\n fitting_patches_width = PATCHES - 1\n overflow_width = PATCHES * wanted_width - current_width\n overlap_width = overflow_width // fitting_patches_width\n step_size_width = wanted_width - overlap_width\n\n for nmr_patch_height in range(fitting_patches_height):\n for nmr_patch_width in range(fitting_patches_width):\n patch = [\n data[_][i][nmr_patch_width * step_size_width:nmr_patch_width * step_size_width + wanted_width]\n for i in\n range(nmr_patch_height * step_size_height, nmr_patch_height * step_size_height + wanted_height)]\n patches.append(patch)\n\n # Patch which may not fit with same step size, overlaps more\n patch = [data[_][i][-wanted_width:] for i in\n range(nmr_patch_height * step_size_height,\n nmr_patch_height * step_size_height + wanted_height)]\n patches.append(patch)\n\n for nmr_patch_width in range(fitting_patches_width):\n patch = [\n data[_][i][nmr_patch_width * step_size_width:nmr_patch_width * step_size_width + wanted_width] for i\n in range(-wanted_height, 0)]\n patches.append(patch)\n\n patch = [data[_][i][-wanted_width:] for i in\n range(-wanted_height, 0)] # Last patch which may not fit neither height nor width\n patches.append(patch)\n\n return np.array(patches)", "def get_patches(image_mat, stride):\n window_shape = (128, 128, 3)\n windows = view_as_windows(image_mat, window_shape, step=stride)\n patches = []\n for m in range(windows.shape[0]):\n for n in range(windows.shape[1]):\n patches += [windows[m][n][0]]\n return patches", "def convertCluttered(original_images, finalImgSize, initImgSize=28, number_patches=4, clutter_size=8, batch_size=None):\n\n images, imgCoord = convertTranslated(original_images, batch_size=batch_size, initImgSize=initImgSize, finalImgSize=finalImgSize)\n if batch_size is None:\n batch_size = len(images)\n size_diff = finalImgSize - clutter_size\n clutter_size_diff = initImgSize - clutter_size/2\n cluttered_images = np.zeros([batch_size, finalImgSize*finalImgSize])\n\n for k in range(batch_size):\n image = images[k, :]\n image = np.reshape(image, (finalImgSize, finalImgSize))\n cluttered_image = image\n for l in range(number_patches):\n\n original_image = original_images[random.randint(0, batch_size-1), :]\n original_image = np.reshape(original_image, (initImgSize, initImgSize))\n\n # generate and save random coordinates\n clutterX = random.randint(clutter_size/2, clutter_size_diff)\n clutterY = random.randint(clutter_size/2, clutter_size_diff)\n diff = np.int(clutter_size/2)\n clutter = original_image[clutterX-diff: clutterX+diff, clutterY-diff: clutterY+diff]\n # generate and save random coordinates\n randX = random.randint(0, size_diff)\n randY = random.randint(0, size_diff)\n # padding\n clutter = np.lib.pad(clutter, ((randX, size_diff - randX), (randY, size_diff - randY)), 'constant', constant_values = (0))\n cluttered_image = np.clip(cluttered_image + clutter, a_min=0, a_max=1)\n cluttered_images[k, :] = np.reshape(cluttered_image, (finalImgSize*finalImgSize))\n\n return cluttered_images, imgCoord", "def get_patches(image, label, coordmaps, sample, num_pos = 100, num_neg = 100, all_patches=False, patch_shape= (48,48,48), spacing=(24,24,24), start_idx = 0):\n image_shape = np.shape(image)\n cn_size = image_shape[0]\n sg_size = image_shape[1]\n cr_size = image_shape[2]\n ax_size = image_shape[3]\n\n if not all_patches:\n idx_pos = np.stack(np.where(label[0, ...] > 0))\n \n # Only include points not near boundary\n #sg_idx = np.where(((patch_shape[0]/2) < idx_pos[0]) & (idx_pos[0] < (sg_size - (patch_shape[0]/2))))\n #idx_pos = idx_pos[:,sg_idx[0]]\n #cr_idx = np.where(((patch_shape[1]/2) < idx_pos[1]) & (idx_pos[1] < (cr_size - (patch_shape[1]/2))))\n #idx_pos = idx_pos[:, cr_idx[0]]\n #ax_idx = np.where(((patch_shape[2]/2) < idx_pos[2]) & (idx_pos[2] < (ax_size - (patch_shape[2]/2))))\n #idx_pos = idx_pos[:, ax_idx[0]]\n \n idx_rand = np.random.choice(idx_pos[0].shape[0], num_pos, replace = False)\n cpts_pos_sampled = idx_pos[:, idx_rand] \n \n image_patch_list = []\n label_patch_list = []\n coordmaps_patch_list = []\n for i in range(num_pos):\n idx1_sg = cpts_pos_sampled[0][i] - int(patch_shape[0]/2)\n idx1_cr = cpts_pos_sampled[1][i] - int(patch_shape[1]/2)\n idx1_ax = cpts_pos_sampled[2][i] - int(patch_shape[2]/2)\n \n idx2_sg = idx1_sg + patch_shape[0]\n idx2_cr = idx1_cr + patch_shape[1]\n idx2_ax = idx1_ax + patch_shape[2]\n \n image_patch_orig = image[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n image_patch = p.standardize_image(image_patch_orig)\n #image_patch = p.Normalize(image_patch)\n label_patch = label[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n coordmaps_patch = coordmaps[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n \n image_patch_list.append(image_patch)\n label_patch_list.append(label_patch)\n coordmaps_patch_list.append(coordmaps_patch)\n \n #Write patch/image and control points to csv and save image\n write_patch_to_file(image_patch, label_patch, coordmaps_patch, sample, cpts_pos_sampled[:,i], start_idx + i)\n \n # For negative points\n idx_neg = np.stack(np.where(label[0, ...]==0), axis = 0)\n \n # Only include points not near boundary\n sg_idx = np.where(((patch_shape[0]/2) < idx_pos[0]) & (idx_pos[0] < (sg_size - (patch_shape[0]/2))))\n idx_neg = idx_neg[:,sg_idx[0]]\n cr_idx = np.where(((patch_shape[1]/2) < idx_pos[1]) & (idx_pos[1] < (cr_size - (patch_shape[1]/2))))\n idx_neg = idx_neg[:, cr_idx[0]]\n ax_idx = np.where(((patch_shape[2]/2) < idx_pos[2]) & (idx_pos[2] < (ax_size - (patch_shape[2]/2))))\n idx_neg = idx_neg[:, ax_idx[0]]\n \n idx_rand = np.random.choice(idx_neg[0].shape[0], num_neg, replace = False)\n cpts_neg_sampled = idx_neg[:, idx_rand] \n \n for i in range(num_neg):\n idx1_sg = cpts_pos_sampled[0][i] - int(patch_shape[0]/2)\n idx1_cr = cpts_pos_sampled[1][i] - int(patch_shape[1]/2)\n idx1_ax = cpts_pos_sampled[2][i] - int(patch_shape[2]/2)\n \n idx2_sg = idx1_sg + patch_shape[0]\n idx2_cr = idx1_cr + patch_shape[1]\n idx2_ax = idx1_ax + patch_shape[2]\n \n image_patch_orig = image[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n image_patch = p.standardize_image(image_patch_orig)\n #image_patch = p.Normalize(image_patch)\n label_patch = label[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n coordmaps_patch = coordmaps[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n \n image_patch_list.append(image_patch)\n label_patch_list.append(label_patch)\n coordmaps_patch_list.append(coordmaps_patch)\n \n #Write patch/image and control points to csv and save image\n write_patch_to_file(image_patch, label_patch, coordmaps_patch, sample, cpts_pos_sampled[:,i], start_idx + num_pos + i)\n \n cpts = np.concatenate((cpts_pos_sampled, cpts_neg_sampled), axis = 1)\n \n return image_patch_list, label_patch_list, coordmaps_patch_list, cpts, start_idx + num_pos + i\n\n else:\n \n idx = p.grid_center_points(image.shape[1:], spacing)\n \n # Only include points not near boundary\n sg_idx = np.where(((patch_shape[0]/2) < idx[0]) & (idx[0] < (sg_size - (patch_shape[0]/2))))\n idx = idx[:,sg_idx[0]]\n cr_idx = np.where(((patch_shape[1]/2) < idx[1]) & (idx[1] < (cr_size - (patch_shape[1]/2))))\n idx = idx[:, cr_idx[0]]\n ax_idx = np.where(((patch_shape[2]/2) < idx[2]) & (idx[2] < (ax_size - (patch_shape[2]/2))))\n idx = idx[:, ax_idx[0]]\n \n image_patch_list = []\n label_patch_list = []\n coordmaps_patch_list = []\n \n for i in range(idx.shape[1]):\n \n idx1_sg = idx[0][i] - int(patch_shape[0]/2)\n idx1_cr = idx[1][i] - int(patch_shape[1]/2)\n idx1_ax = idx[2][i] - int(patch_shape[2]/2)\n \n idx2_sg = idx1_sg + patch_shape[0]\n idx2_cr = idx1_cr + patch_shape[1]\n idx2_ax = idx1_ax + patch_shape[2]\n \n image_patch_orig = image[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n image_patch = p.standardize_image(image_patch_orig)\n #image_patch = p.Normalize(image_patch)\n label_patch = label[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n coordmaps_patch = coordmaps[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n \n image_patch_list.append(image_patch)\n label_patch_list.append(label_patch)\n coordmaps_patch_list.append(coordmaps_patch)\n \n return image_patch_list, label_patch_list, coordmaps_patch_list, idx, len(image_patch_list)", "def _produce_individual_star_masks(self, dilationWidth=4):\n # TODO: REWRITE THIS METHOD USING THE ASTROPY SEGMENTATION METHODS???\n # Yes, I THINK so...\n\n # Grab binning\n binX, binY = self.imageList[0].binning\n\n # Compute kernel shape\n medianKernShape = (np.int(np.ceil(9.0/binX)), np.int(np.ceil(9.0/binY)))\n\n # Grab the number of images (for user updates)\n numImg = self.numberOfImages\n\n # Construct a blank array to populate with masks\n starMasks = np.zeros(self.shape, dtype=int)\n\n # Loop through the images and compute individual star masks\n for imgNum, img in enumerate(self.imageList):\n print('Building star mask for image {0:g} of {1:g}'.format(imgNum + 1, numImg), end='\\r')\n # Grab the image array\n thisData = img.data.copy()\n\n # Replace bad values with zeros\n badInds = np.where(np.logical_not(np.isfinite(thisData)))\n thisData[badInds] = -1e6\n\n # Filter the image\n medImg = ndimage.median_filter(thisData, size = medianKernShape)\n\n # get stddev of image background\n mean, median, stddev = img.sigma_clipped_stats()\n\n # Look for deviates from the filter (positive values only)\n # starMask1 = np.logical_and(np.abs(thisData - medImg) > 2.0*stddev,\n # thisData > 0)\n starMask1 = (np.abs(thisData - medImg) > 2.0*stddev)\n\n # Use the scipy ndimage opening and closing to clean the mask\n starMask1 = ndimage.binary_opening(starMask1)\n starMask1 = ndimage.binary_closing(starMask1)\n\n # Clean out some edge effects.\n starMask1[:, -4:-1] = 0\n\n #\n # NOTE: This doesn't work when there are nebulae and galaxies in the image!\n #\n # starMask1 = make_source_mask(\n # thisData,\n # snr=2,\n # npixels=5,\n # dilate_size=11,\n # mask_value=-1e6\n # )\n\n # Try using guassian kernel convolution instead\n from astropy.convolution import convolve, convolve_fft, Gaussian2DKernel\n\n # Initalize a dilatingKernel\n gaussian_2D_kernel = Gaussian2DKernel(10.0)\n\n # Normalize the kernel\n gaussian_2D_kernel.normalize()\n\n # If the dialation kernel is larger than 10 pixels, then use FFT\n # convolution.\n starMask11 = convolve_fft(\n starMask1.astype(float),\n gaussian_2D_kernel\n )\n\n # Mask any pixels with values greater than 0.04 (which seems to\n # produce a reasonable result.)\n peakValue = 1/(200*np.pi)\n maskThreshold = 10 * peakValue * np.exp(-0.5*((dilationWidth+0.5)/10.0)**2)\n\n starMask1 = (starMask11 > maskThreshold).astype(np.int8)\n\n # TODO: delete this code if convolution works out\n #\n # # Finally, liberally EXPAND the mask with four dilations\n # starMask1 = ndimage.binary_dilation(\n # starMask1,\n # iterations=starMaskIters\n # ).astype(np.int8)\n\n # TODO: delete this code once I verify everything is working\n #\n # # Count the number of masked neighbors for each pixel\n # neighborCount = np.zeros(thisData.shape, dtype=int)\n # for dx in range(-1,2,1):\n # for dy in range(-1,2,1):\n # neighborCount += np.roll(np.roll(starMask1, dy, axis=0),\n # dx, axis=1).astype(np.int8)\n #\n # # Find pixels with more than two masked neighbor (including self)\n # # starMask1 = np.logical_and(starMask1, neighborCount > 2)\n # starMask1 = (neighborCount > 2).astype(np.int8)\n\n # Place the final mask into its respective slice of the 3D array\n starMasks[imgNum, :, :] = starMask1\n\n # Print a newline character to preserve star mask updates\n print('')\n\n # Once ALL of the star masks have been computed, return them to the user\n return starMasks", "def _post_process_masks_pt(\n self, masks, original_sizes, reshaped_input_sizes, mask_threshold=0.0, binarize=True, pad_size=None\n ):\n requires_backends(self, [\"torch\"])\n pad_size = self.pad_size if pad_size is None else pad_size\n target_image_size = (pad_size[\"height\"], pad_size[\"width\"])\n if isinstance(original_sizes, (torch.Tensor, np.ndarray)):\n original_sizes = original_sizes.tolist()\n if isinstance(reshaped_input_sizes, (torch.Tensor, np.ndarray)):\n reshaped_input_sizes = reshaped_input_sizes.tolist()\n output_masks = []\n for i, original_size in enumerate(original_sizes):\n if isinstance(masks[i], np.ndarray):\n masks[i] = torch.from_numpy(masks[i])\n elif not isinstance(masks[i], torch.Tensor):\n raise ValueError(\"Input masks should be a list of `torch.tensors` or a list of `np.ndarray`\")\n interpolated_mask = F.interpolate(masks[i], target_image_size, mode=\"bilinear\", align_corners=False)\n interpolated_mask = interpolated_mask[..., : reshaped_input_sizes[i][0], : reshaped_input_sizes[i][1]]\n interpolated_mask = F.interpolate(interpolated_mask, original_size, mode=\"bilinear\", align_corners=False)\n if binarize:\n interpolated_mask = interpolated_mask > mask_threshold\n output_masks.append(interpolated_mask)\n\n return output_masks", "def cut_image_strided(image, new_size):\n bands = image.shape[0]\n new_size_y, new_size_x = new_size\n old_size_y = image.shape[1]\n old_size_x = image.shape[2]\n nr_images_x = old_size_x // new_size[1]\n nr_images_y = old_size_y // new_size[0]\n if old_size_x % new_size_x != 0 or old_size_y % new_size_y != 0:\n print(\"The patch size is not a full multiple of the complete patch size\")\n\n return as_strided(image, shape=(nr_images_y, nr_images_x, bands, new_size_y, new_size_x),\n strides=(image.strides[1] * new_size_y, image.strides[2] * new_size_x, image.strides[0],\n image.strides[1], image.strides[2]))", "def split_valid_sampling(inpath,\n patch_size, \n train_prop,\n val_prop,\n outpath,\n padding_mode='constant', \n padding_values=0, \n ignore_labels=[0]):\n outdir = outpath.joinpath('valid_sampling')\n outpath = outdir.joinpath(f'{patch_size}x{patch_size}_{padding_mode}_{train_prop:.2f}_{val_prop:.2f}.h5')\n \n if outpath.is_file():\n warnings.warn('Sampled data already exist, remove directory'\n ' \"{}\" to resample data!'.format(outpath))\n return outpath\n\n outdir.mkdir(parents=True, exist_ok=True)\n \n with h5py.File(inpath, 'r') as in_file, h5py.File(outpath, 'w') as out_file:\n out_file.attrs.update(in_file.attrs) # copy attributes\n patchgroup = out_file.create_group('patches')\n\n data, labels = in_file['data'], in_file['labels']\n\n\n # split image into subimages of size patch_size x patch_size\n h, w, _ = data.shape\n num_subimg_h = ceil(h/patch_size) # patches along vertical axis\n num_subimg_w = ceil(w/patch_size) # patches along horizontal axis\n\n subimgs = []\n subimg_labels = []\n\n for i in range(num_subimg_h):\n for j in range(num_subimg_w):\n start_idx_h = i*patch_size\n start_idx_w = j*patch_size\n end_idx_h = (i+1)*patch_size\n end_idx_w = (j+1)*patch_size\n\n # end_idx_h and end_idx_w may be greater than height and width of data array\n if end_idx_h > h:\n end_idx_h = h\n if end_idx_w > w:\n end_idx_w = w\n\n subimgs.append(data[start_idx_h:end_idx_h, start_idx_w:end_idx_w])\n subimg_labels.append(labels[start_idx_h:end_idx_h, start_idx_w:end_idx_w])\n\n # shuffle samples\n samples = list(zip(subimgs, subimg_labels))\n np.random.shuffle(samples)\n subimgs, subimg_labels = zip(*samples)\n\n # count how many pixels have non 'ignore_labels' and use result to assign approximately\n # train_prop share of non zero data to train set, val_prop of non zero data to validation set\n # and (1-(train_prop+val_prop)) to test set.\n if ignore_labels:\n cum_nonzero_labels = np.cumsum(\n [np.invert(np.isin(lbls, ignore_labels)).sum() for lbls in subimg_labels])\n split_idx_train = 0\n split_idx_val = 0\n if cum_nonzero_labels[-1] == 0:\n raise RuntimeError('Labelimage only contains ignored labels.')\n while(True):\n if (cum_nonzero_labels[split_idx_train]/cum_nonzero_labels[-1]) < train_prop:\n split_idx_train += 1\n if (cum_nonzero_labels[split_idx_val]/cum_nonzero_labels[-1]) < (train_prop + val_prop):\n split_idx_val += 1\n else:\n break\n print(f'{cum_nonzero_labels[split_idx_train]} / {cum_nonzero_labels[-1]}')\n print(f'{cum_nonzero_labels[split_idx_val]} / {cum_nonzero_labels[-1]}')\n else :\n split_idx_train = int(len(subimgs)*train_prop)\n split_idx_val = int(len(subimgs)*(train_prop + val_prop))\n\n # sample test and training data patches\n train_subimgs = subimgs[:split_idx_train]\n train_subimg_labels = subimg_labels[:split_idx_train]\n val_subimgs = subimgs[split_idx_train:split_idx_val]\n val_subimg_labels = subimg_labels[split_idx_train:split_idx_val]\n test_subimgs = subimgs[split_idx_val:]\n test_subimg_labels = subimg_labels[split_idx_val:]\n train_samplecount = _sample_patches(train_subimgs, train_subimg_labels, \n patch_size, \n patchgroup, \n padding_mode, \n padding_values, \n ignore_labels)\n val_samplecount = _sample_patches(val_subimgs, val_subimg_labels,\n patch_size,\n patchgroup,\n padding_mode,\n padding_values,\n ignore_labels,\n startidx=train_samplecount)\n test_samplecount = _sample_patches(test_subimgs, test_subimg_labels, \n patch_size, \n patchgroup, \n padding_mode, \n padding_values, \n ignore_labels,\n startidx=(train_samplecount+val_samplecount))\n\n train_samples = np.arange(train_samplecount)\n val_samples = np.arange(train_samplecount, train_samplecount+val_samplecount)\n test_samples = np.arange((train_samplecount+val_samplecount), \n (train_samplecount+val_samplecount+test_samplecount))\n\n out_file.create_dataset('trainsample_list', data=train_samples)\n out_file.create_dataset('valsample_list', data=val_samples)\n out_file.create_dataset('testsample_list', data=test_samples)\n out_file.attrs['train_prop'] = train_prop\n out_file.attrs['val_prop'] = val_prop\n\n return outpath", "def cal_patches_norm(self):\n # norm of style image patches\n norm_array = torch.zeros(self.style_patches.shape[0])\n for i in range(self.style_patches.shape[0]):\n norm_array[i] = torch.pow(torch.sum(torch.pow(self.style_patches[i], 2)), 0.5)\n return norm_array.to(self.device)", "def extract_patches_tumor(self, bounding_boxes):\n mag_factor = pow(2, self.level_used)\n\n print('No. of ROIs to extract patches from: %d' % len(bounding_boxes))\n\n for i, bounding_box in enumerate(bounding_boxes):\n b_x_start = int(bounding_box[0]) * mag_factor\n b_y_start = int(bounding_box[1]) * mag_factor\n b_x_end = (int(bounding_box[0]) + int(bounding_box[2])) * mag_factor\n b_y_end = (int(bounding_box[1]) + int(bounding_box[3])) * mag_factor\n# X = np.random.random_integers(b_x_start, high=b_x_end, size=500)\n# Y = np.random.random_integers(b_y_start, high=b_y_end, size=500)\n # X = np.arange(b_x_start, b_x_end-256, 5)\n # Y = np.arange(b_y_start, b_y_end-256, 5)\n\n for x in range(b_x_start,b_x_end,PATCH_SIZE):\n for y in range(b_y_start,b_y_end,PATCH_SIZE):\n patch = self.wsi_image.read_region((x, y), 0, (PATCH_SIZE, PATCH_SIZE))\n mask = self.mask_image.read_region((x, y), 0, (PATCH_SIZE, PATCH_SIZE))\n mask_gt = np.array(mask)\n # mask_gt = cv2.cvtColor(mask_gt, cv2.COLOR_BGR2GRAY)\n mask_gt = cv2.cvtColor(mask_gt, cv2.COLOR_BGR2GRAY)\n patch_array = np.array(patch)\n \n white_pixel_cnt_gt = cv2.countNonZero(mask_gt)\n \n if white_pixel_cnt_gt == 0: # mask_gt does not contain tumor area\n patch_hsv = cv2.cvtColor(patch_array, cv2.COLOR_BGR2HSV)\n lower_red = np.array([20, 20, 20])\n upper_red = np.array([200, 200, 200])\n mask_patch = cv2.inRange(patch_hsv, lower_red, upper_red)\n white_pixel_cnt = cv2.countNonZero(mask_patch)\n \n if white_pixel_cnt > ((PATCH_SIZE * PATCH_SIZE) * 0.50):\n # mask = Image.fromarray(mask)\n patch.save(PROCESSED_PATCHES_TUMOR_NEGATIVE_PATH + PATCH_NORMAL_PREFIX+'_'+str(x)+'_'+str(y)+'.jpg', 'JPEG')\n # mask.save(PROCESSED_PATCHES_NORMAL_PATH + PATCH_NORMAL_PREFIX + str(self.patch_index),\n # 'PNG')\n self.negative_patch_index += 1\n else: # mask_gt contains tumor area\n if white_pixel_cnt_gt >= ((PATCH_SIZE * PATCH_SIZE) * 0.85):\n patch.save(PROCESSED_PATCHES_POSITIVE_PATH + PATCH_TUMOR_PREFIX +'_'+str(x)+'_'+str(y)+'.jpg', 'JPEG')\n self.positive_patch_index += 1\n \n patch.close()\n mask.close()", "def __call__(self, input_patch):\n input_patch = self._reshape_patch_to_5d(input_patch)\n\n output_patch = input_patch.astype(np.float32)\n #if np.issubdtype(patch.dtype, np.integer):\n # # normalize to 0-1 value range\n # output /= np.iinfo(patch.dtype).max\n\n output_patch = self._crop_output_patch(output_patch)\n \n # mask should be done in patch engine now\n output_patch *= self.output_patch_mask_numpy\n \n if self.num_output_channels > 1:\n output_patch = np.repeat(output_patch, \n self.num_output_channels, axis=1)\n\n return output_patch", "def regrid(self, *args, **kwargs):\n return _image.image_regrid(self, *args, **kwargs)", "def get_output_from_patches(patches_list, output_shape):\n\n if output_shape[0] == training_size:\n nb_matrix_by_row = 2\n else:\n nb_matrix_by_row = 3\n reconstructed_images = []\n nb_elem_by_patch = nb_matrix_by_row ** 2\n for i in range(patches_list.shape[0] // nb_elem_by_patch):\n reconstructed_image = unpatchify(\n patches_list[i * nb_elem_by_patch: (i + 1) * nb_elem_by_patch].reshape(nb_matrix_by_row, nb_matrix_by_row,\n img_patch_size, img_patch_size),\n output_shape)\n reconstructed_images.extend(reconstructed_image)\n\n return reconstructed_images", "def collect_patches_and_dict(\n data_path=None,\n patch_size=8,\n num_atoms=128,\n num_patches_train=10000,\n train_val_test_split=[0.8, 0.1, 0.1],\n out_path=None,\n remove_mean=True,\n):\n parent_dir = dirname(dirname(abspath(__file__)))\n if out_path is None:\n out_path = parent_dir + \"/adaptive_ista/data\"\n out_file_name = (\n out_path\n + \"/data_\"\n + str(patch_size)\n + \"x\"\n + str(patch_size)\n + \"_N_100000\"\n + \"_atoms_\"\n + str(num_atoms)\n )\n # Load the data\n npzfile = np.load(out_file_name + \".npz\", allow_pickle=True)\n y = npzfile[\"y\"].item()\n D = npzfile[\"D\"]\n avg_mean = npzfile[\"avg_mean\"]\n avg_std = npzfile[\"avg_std\"]\n return y, D, avg_mean, avg_std", "def load_images_from_folder(folder, n_cases,patch_size, mask_path, mask_type, mask_name,normalize=False, imrotate=False):\n\n# # Initialize the arrays:\n# if imrotate: # number of images is 4 * n_im\n# bigy = np.empty((n_im * 4, 64, 64))\n# bigx = np.empty((n_im * 4, 64, 64, 2))\n# else:\n# bigy = np.empty((n_im, 64, 64))\n# bigx = np.empty((n_im, 64, 64, 2))\n\n# im = 0 # image counter\n bigy = []\n filenames = os.listdir(folder)\n\n for filename in filenames[n_cases[0]:n_cases[1]]:\n if not filename.startswith('.'):\n temp = loadmat(os.path.join(folder, filename))['res']\n print temp.shape\n # Clean the STONE sense recon data\n row, col = temp.shape\n temp = np.reshape(temp, (row, col, -1))\n #valid_mask = (np.abs(np.squeeze(temp[int(row/2), int(col/2), :])) != 0)\n #final_images = temp[:,:,valid_mask]\n final_images = temp\n \n# # Resize images\n #final_images = np.abs(final_images)\n final_images_resized = np.zeros((patch_size,patch_size,final_images.shape[2]))\n for i in range(final_images.shape[2]):\n final_images_resized[:,:,i] = cv2.resize(final_images[:,:,i], (patch_size,patch_size))\n \n# # Only take a small part of the data\n# final_images = final_images[140:180,140:180,:]\n \n# # Convert to abs values\n# final_images = np.abs(final_images)\n# \n# # Normalize based on single patient case\n# final_images = (final_images - np.mean(final_images)) / np.std(final_images)\n \n# bigy_temp = cv2.imread(os.path.join(folder, filename),\n# cv2.IMREAD_GRAYSCALE)\n \n \n bigy.append(final_images_resized)\n \n bigy = np.asarray(bigy)\n cases, row, col, imgs = bigy.shape\n bigy = np.transpose(np.reshape(np.transpose(bigy, (1,2,3,0)), (row, col, -1)), (2,0,1))\n \n # convert to k-space\n imgs, row, col = bigy.shape\n bigx = np.empty((imgs, row, col, 2))\n mask = read_mask(mask_path=mask_path,mask_type=mask_type,mask_name=mask_name,patch_size=patch_size,show_image=False)\n for i in range(imgs):\n bigx[i, :, :, :] = create_x(np.squeeze(bigy[i,:,:]),mask)\n \n # convert bigx from complex to abs values\n bigy = np.abs(bigy)\n \n# im += 1\n# if imrotate:\n# for angle in [90, 180, 270]:\n# bigy_rot = im_rotate(bigy_temp, angle)\n# bigx_rot = create_x(bigy_rot, normalize)\n# bigy[im, :, :] = bigy_rot\n# bigx[im, :, :, :] = bigx_rot\n# im += 1\n\n# if imrotate:\n# if im > (n_im * 4 - 1): # how many images to load\n# break\n# else:\n# if im > (n_im - 1): # how many images to load\n# break\n\n# if normalize:\n# bigx = (bigx - np.amin(bigx)) / (np.amax(bigx) - np.amin(bigx))\n\n return bigx, bigy", "def _apply_patches(self) -> None:\n first_patch, last_patch = False, False\n patches = {}\n for patch_name, locations in self.patches.items():\n residues = []\n for location in locations:\n if location == \"FIRST\":\n location = 0\n first_patch = True\n elif location == \"LAST\":\n location = -1\n last_patch = True\n residue = self.residues[location]\n residues.append(residue)\n patches[patch_name] = residues\n if not first_patch:\n first_residue = self.residues[0]\n first_patch = first_residue.first\n patches[first_patch] = [first_residue]\n if not last_patch:\n last_residue = self.residues[-1]\n last_patch = last_residue.last\n patches[last_patch] = [last_residue]\n\n for patch_name, residues in patches.items():\n if patch_name == \"NONE\":\n continue\n patch = self.topology.patches[patch_name]\n self.topology_files.add(patch.rtf_file_name)\n patch.apply(*residues)" ]
[ "0.75404114", "0.6949689", "0.6838472", "0.68046606", "0.67437595", "0.66327584", "0.6593758", "0.6556427", "0.6351604", "0.6174992", "0.6122615", "0.60943955", "0.6007776", "0.5980942", "0.59666556", "0.59625", "0.5957962", "0.5952175", "0.59199303", "0.58981425", "0.5874043", "0.5867192", "0.5865334", "0.58637655", "0.58616894", "0.5833902", "0.58307046", "0.58137745", "0.5813036", "0.57659864", "0.5753119", "0.5738533", "0.5737779", "0.56980795", "0.56915116", "0.56823486", "0.5672781", "0.5668327", "0.56570256", "0.56263953", "0.560356", "0.559913", "0.5596066", "0.5578012", "0.5574426", "0.5568904", "0.55644083", "0.5564402", "0.5538626", "0.5518626", "0.55041766", "0.5483403", "0.5481523", "0.5477929", "0.54762936", "0.5475947", "0.54753345", "0.5461379", "0.5440412", "0.54396766", "0.5434246", "0.5418749", "0.5399094", "0.5395193", "0.5376091", "0.53748554", "0.53710806", "0.53691304", "0.53586584", "0.5356873", "0.53375405", "0.5326316", "0.5320845", "0.5305814", "0.5299939", "0.52977705", "0.52908295", "0.52800757", "0.52797437", "0.5276086", "0.5270519", "0.52644473", "0.5263523", "0.5252688", "0.52525544", "0.5248158", "0.52447426", "0.52400476", "0.5236213", "0.5233038", "0.52290624", "0.52276105", "0.52117395", "0.521149", "0.5206149", "0.520263", "0.52003783", "0.51995534", "0.51929396", "0.5181277" ]
0.64738786
8
This is the base "correct" case (they have enough money, are bidders, haven't passed, etc).
def testInitializeMove(self): bid_move = self._move() context = self._context() bfpc = BiddingForPrivateCompany() self.assertTrue(bfpc.run(bid_move, context), bfpc.errors())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_amount_not_enough(self):\n item, change, _ = give_item_and_change('coke', .50)\n self.assertIsNone(item)\n self.assertEqual(change, 0.5)", "def check_for_offer(self, bid, commodity, limit, actual, quantity, price):\n if bid:\n if len(self.trades[\"buys\"][commodity]) == 0:\n return 0\n else: # tally up how much trying to buy.\n total = 0.0\n total_price = 0.0\n for offer in self.trades[\"buys\"][commodity]:\n total += offer.quantity\n total_price += offer.price\n\n avg_price = total_price / len(self.trades[\"buys\"][commodity])\n\n # if total < limit:\n # #PLACE MORE BIDS.\n return total\n\n else:\n if len(self.trades[\"asks\"][commodity]) == 0:\n return 0\n else: # tally up how much trying to buy.\n total = 0.0\n total_price = 0.0\n for offer in self.trades[\"asks\"][commodity]:\n total += offer.quantity\n total_price += offer.price\n\n avg_price = total_price / len(self.trades[\"asks\"][commodity])\n #\n # if total < limit:\n # #PLACE MORE asks.\n # return total\n # if total < limit:\n # #PLACE MORE asks.\n return total # - limit", "def testConsistency(self):\n #self.assertAlmostEqual(self.fxlinkedcashflow.amount(),0)", "def check_money(drink, amount):\n if (drink == \"espresso\" and amount < MENU[drink][\"cost\"]) or (drink == \"latte\" and amount < MENU[drink][\"cost\"])\\\n or (drink == \"cappuccino\" and amount < MENU[drink][\"cost\"]):\n # if not enough money, start over\n print(f\"Sorry that's not enough money. Drink is ${MENU[drink]['cost']}. You gave ${amount}. Money refunded.\")\n return False\n else:\n return True", "def check_required_change(drink, amount):\n if (drink == \"espresso\" and amount > MENU[drink][\"cost\"]) or (drink == \"latte\" and amount > MENU[drink][\"cost\"])\\\n or (drink == \"cappuccino\" and amount > MENU[drink][\"cost\"]):\n return amount - MENU[drink][\"cost\"]\n else:\n return 0.00", "def check_risk(self, action, amount=None):\n if amount is None:\n # amount not specified, so determines max amount to trade\n if action == 'buy':\n amount = int((self.upper_bound-self.owned_value)/self.price) # A unit is 1 dollar here? TODO:\n elif action == 'sell':\n amount = int((self.lower_bound-self.owned_value)/self.price)\n else:\n raise ValueError(f\"action should be buy or sell, got {action}\")\n if action == 'buy':\n if self.owned_value + amount <= self.upper_bound:\n # Allowed to buy up to upper bound\n return True, amount\n else:\n # Trying to buy too much\n print(\"Trade not allowed, attempting to increase total amount to more than upper bound.\")\n return False, amount\n elif action == 'sell':\n if self.owned_value + amount >= self.lower_bound:\n # Allowed to buy down to lower_bound\n return True, amount\n else:\n print(\"Trade not allowed, attempting to increase debt to more than lower bound.\")\n return False, amount", "def test_has_enough_money_handles_insufficient_funds(self):\n # Params\n f_money_collected = 2.00\n f_chocolate_price = 2.25\n\n # Returns\n return_1 = 'Insufficient funds... Dispensing coins inserted.\\n'\n\n # Calls\n string_1 = has_enough_money(f_money_collected, f_chocolate_price)\n\n # Asserts\n self.assertEqual(string_1, return_1)", "def withdraw(amt) :\r\n global bal \r\n bal_in = bal\r\n\t#PREMISES FOR NEXT LINE: \r\n\t# (amt >= 0)\r\n\t# (bal >= 0)\r\n\t# (bal == bal_in)\r\n\t\"\"\"{1.OK amt >= 0\tpremise\r\n\t\t2.OK bal >= 0\tpremise\r\n\t\t3.OK bal == bal_in\tpremise\r\n\t}\"\"\"\r\n\t#PREMISES FOR NEXT LINE: \r\n\t# (bal == bal_in)\r\n\tif amt <= bal:\r\n\t\t#PREMISES FOR THEN-ARM: \r\n\t\t# (amt <= bal)\r\n\t\t# (bal == bal_in)\r\n\t\t\"\"\"{1.OK amt <= bal\tpremise\r\n\t\t\t4.OK bal == bal_in\tpremise\r\n\t\t}\"\"\"\r\n\t\t#PREMISES FOR NEXT LINE: \r\n\t\t# (bal == bal_in)\r\n\t\tbal = bal - amt\r\n\t\t#PREMISES FOR ATTACHED PROOF, IF ANY: \r\n\t\t# (bal == (bal_old - amt))\r\n\t\t# (bal_old == bal_in)\r\n\t\t\"\"\"{1.OK bal == bal_old - amt\tpremise\r\n\t\t\t2.OK amt <= bal_old\talgebra 1\r\n\t\t\t3.OK amt >= 0\talgebra 1\r\n\t\t\t4.OK bal_old >= 0\talgebra 1\r\n\t\t\t5.OK bal_old == bal_in\tpremise\r\n\t\t\t6.OK amt == bal_in - bal\talgebra 1 5\r\n\t\t}\"\"\"\r\n\t\t#PREMISES FOR NEXT LINE: \r\n\t\t# (amt == (bal_in - bal))\r\n\t\tcash = amt\r\n\t\t#PREMISES FOR ATTACHED PROOF, IF ANY: \r\n\t\t# (cash == amt)\r\n\t\t# (amt == (bal_in - bal))\r\n\t\t\"\"\"{1.OK amt == bal_in - bal\tpremise\r\n\t\t\t2.OK cash == amt\tpremise\r\n\t\t\t3.OK cash == bal_in - bal\t\tsubst 2 1\r\n\t\t\t4.OK bal >= 0\talgebra 1\r\n\t\t\t5.OK bal_in == bal + cash\talgebra 3\r\n\t\t}\"\"\"\r\n\t\t#PREMISES FOR NEXT LINE: \r\n\t\t# (bal_in == (bal + cash))\r\n\telse :\r\n\t\t#PREMISES FOR ELSE-ARM: \r\n\t\t# not (amt <= bal)\r\n\t\t# (bal == bal_in)\r\n\t\t\"\"\"{1.OK not(amt <= bal)\tpremise\r\n\t\t\t4.OK bal == bal_in\tpremise\r\n\t\t}\"\"\"\r\n\t\t#PREMISES FOR NEXT LINE: \r\n\t\t# (bal == bal_in)\r\n\t\tcash = 0\r\n\t\t#PREMISES FOR ATTACHED PROOF, IF ANY: \r\n\t\t# (cash == 0)\r\n\t\t# (bal == bal_in)\r\n\t\tassert not (amt <= bal) # UNABLE TO VERIFY\r\n\t\t\"\"\"{1.OK cash == 0\tpremise\r\n\t\t\t2.OK bal == bal_in\tpremise\r\n\t\t\t3.?? not(amt <= bal)\tpremise\r\n\t\t\t4.OK bal >= 0\talgebra 3\r\n\t\t}\"\"\"\r\n\t\t#PREMISES FOR NEXT LINE: \r\n\t\t# (bal >= 0)\r\n# ERROR: uneven indentation of commands\r\n # prove here that bal >= 0 and bal + cash == bal_in\r\n return cash\r\n #PREMISES FOR NEXT LINE: \r\n # (bal >= 0)\r\n # ((bal + cash) == bal_in)\r\n\t#PREMISES FOR NEXT LINE: \r\n\t# ((bal_in == (bal + cash)) or ((bal >= 0) and ((bal + cash) == bal_in)))\r\n\tassert (bal >= 0) # UNABLE TO VERIFY\r", "def deposit(amt) :\r\n\tglobal bal\r\n\tbal_in = bal\r\n\t#PREMISES FOR NEXT LINE: \r\n\t# (amt >= 0)\r\n\t# (bal >= 0)\r\n\t# (bal == bal_in)\r\n\tbal = bal + amt\r\n\t#PREMISES FOR ATTACHED PROOF, IF ANY: \r\n\t# (bal == (bal_old + amt))\r\n\t# (amt >= 0)\r\n\t# (bal_old >= 0)\r\n\t# (bal_old == bal_in)\r\n\t#PREMISES FOR NEXT LINE: \r\n\t# (amt >= 0)\r", "def test_return_goal_actual_weight_is_too_low(self):\n data_weight_user = {\"height\": \"1,60\", \"actual_weight\": \"45\",\n \"cruising_weight\": \"45\", \"weight_goal\": \"40\"}\n return_goal = self.new_weight_advice_goal.return_weight_advices_goal(data_weight_user)[0]\n\n user_goal = \"impossible\"\n self.assertEqual(return_goal, user_goal)", "def testInsufficientCash(self):\n\n bid_move = self._move()\n context = self._context()\n context.players[0].cash = 200\n bfpc = BiddingForPrivateCompany()\n\n self.assertFalse(bfpc.run(bid_move, context), bfpc.errors())", "async def process_bj_game(self, ctx, amount, user_id):\n if amount >= 0:\n if not await self.check_in_game(user_id, ctx):\n if amount > await ex.u_currency.get_balance(user_id):\n await ctx.send(f\"> **{ctx.author}, you can not bet more than your current balance.**\")\n else:\n return True\n else:\n await ctx.send(f\"> **{ctx.author}, you can not bet a negative number.**\")", "def _check(self):\n assert isinstance(self._price, int)\n assert self._price >= 0\n assert isinstance(self._units, int)\n assert self._units > 0\n assert self._side == OrderSide.BUY or self._side == OrderSide.SELL\n assert self._type == OrderType.LIMIT or self._type == OrderType.CANCEL\n assert isinstance(self._market, int)\n assert self._market > 0", "def check_funds(self, amount):\n if abs(amount)>self.get_balance(): return False\n else: return True", "def all_in():\r\n\r\n raise_bet(player.get_cash())", "def test_reasonable_auction(self):\n # I have no preferences\n bids = [Cost(ITEM1, ACTOR1, 1000),\n Cost(ITEM2, ACTOR1, 1000),\n Cost(ITEM3, ACTOR1, 1000),\n Cost(ITEM4, ACTOR1, 1000),\n Cost(ITEM5, ACTOR1, 1000),\n\n # I have linear preferences\n Cost(ITEM1, ACTOR2, 700),\n Cost(ITEM2, ACTOR2, 800),\n Cost(ITEM3, ACTOR2, 1000),\n Cost(ITEM4, ACTOR2, 1200),\n Cost(ITEM5, ACTOR2, 1300),\n\n # I have non-linear preferences\n Cost(ITEM1, ACTOR3, 400),\n Cost(ITEM2, ACTOR3, 800),\n Cost(ITEM3, ACTOR3, 1000),\n Cost(ITEM4, ACTOR3, 1200),\n Cost(ITEM5, ACTOR3, 1600),\n\n # I have arbitrary preference\n Cost(ITEM1, ACTOR4, 2435),\n Cost(ITEM2, ACTOR4, 305),\n Cost(ITEM3, ACTOR4, 310),\n Cost(ITEM4, ACTOR4, 1725),\n Cost(ITEM5, ACTOR4, 225),\n\n # I have strong preferences\n Cost(ITEM1, ACTOR5, 0),\n Cost(ITEM2, ACTOR5, 0),\n Cost(ITEM3, ACTOR5, 0),\n Cost(ITEM4, ACTOR5, 0),\n Cost(ITEM5, ACTOR5, 5000)]\n result = self.splitter.split(ITEMS[:5], ACTORS[:5], bids)\n expected = [(ITEM1, ACTOR4, None),\n (ITEM2, ACTOR1, None),\n (ITEM3, ACTOR3, None),\n (ITEM4, ACTOR2, None),\n (ITEM5, ACTOR5, None)]\n item_assignments_present(self, result, expected)", "def pay(drink):\n qtr_pay = int(input(\"How many quarters? \"))\n dime_pay = int(input(\"How many dimes? \"))\n nickel_pay = int(input(\" How many nickels? \"))\n penny_pay = int(input(\"How many pennies? \"))\n payment_given = (qtr_pay * qtr) + (dime_pay * dime) + (nickel_pay * nickel) + (penny_pay * penny)\n print(payment_given)\n if payment_given >= drink['cost']:\n balance = payment_given - drink['cost']\n print(f\"Here is your USD{balance} back. \")\n print(f\"Here is your {drink_choice} == Enjoy\")\n make_drink(drink)\n elif payment_given < drink['cost']:\n print(\"Sorry that's not enough\")", "def check_cap(org, amount):\n from django.db.models import Sum, Q\n\n if amount < 0:\n query = Q(favor__lt=0)\n else:\n query = Q(favor__gt=0)\n total = abs(\n org.reputations.filter(query).aggregate(sum=Sum(\"favor\"))[\"sum\"] or 0\n ) + abs(amount)\n mod = org.social_modifier * 5\n if total > mod:\n noun = \"favor\" if amount > 0 else \"disfavor\"\n raise CommandError(\n \"That would bring your total %s to %s, and you can only spend %s.\"\n % (noun, total, mod)\n )", "def test_e2e_order_book_amount_less_than_max_bal(self):\n\n cli = \"--balance 1 offline --test -ob test_data/order_books.csv\"\n deal = self._run_bot_offine(cli)\n\n self.assertAlmostEqual(0.06000734789047485, float(deal.data_row[\"start-qty\"]), 4)\n self.assertEqual(0.002407822109525136, float(deal.data_row[\"result-fact-diff\"]))\n\n # prices from order book\n self.assertNotEqual(float(deal.data_row[\"leg1-price\"]), float(deal.data_row[\"leg1-ob-price\"]))", "def is_number_correct(total):\n if int(total) < 0:\n return None\n return True", "def transaction_successful(drink_type):\r\n total = 0\r\n cost = MENU[drink_type][\"cost\"]\r\n print(f\" A {drink_type} costs ${MENU[drink_type]['cost']}\")\r\n total += float(input(\" How many quarters? \")) * 0.25\r\n total += float(input(\" How many dimes? \")) * 0.10\r\n total += float(input(\" How many nickels? \")) * 0.05\r\n total += float(input(\" How many pennies? \")) * 0.01\r\n\r\n if total >= cost:\r\n print(f\"Here is ${total - cost} in change.\")\r\n return True\r\n else:\r\n print(\"Sorry that's not enough money. Money refunded.\")\r\n return False", "def options_to_withdraw(self, amount):\n counter = PaperMoneyCounter() # aux class\n options = [] # options to withdraw\n remaining_cash = 0 # aux var\n\n if (amount % 20 == 0 or amount % 50 == 0) and (amount <= 1000): # is it allowed to withdraw?\n # prioritizing 100-dollar bills\n qtt_100s = counter.how_many_100s(amount)\n remaining_cash = counter.remaining_cash_without_100s(amount)\n\n qtt_50s = counter.how_many_50s(remaining_cash)\n remaining_cash = counter.remaining_cash_without_50s(remaining_cash)\n\n qtt_20s = counter.how_many_20s(remaining_cash)\n remaining_cash = counter.remaining_cash_without_20s(remaining_cash)\n\n if counter.cash(qtt_100s, qtt_50s, qtt_20s) == amount:\n options.append([int(qtt_100s), int(qtt_50s), int(qtt_20s)])\n\n # prioritizing 50-dollar bills\n qtt_100s = 0\n\n qtt_50s = counter.how_many_50s(amount)\n remaining_cash = counter.remaining_cash_without_50s(amount)\n\n qtt_20s = counter.how_many_20s(remaining_cash)\n remaining_cash = counter.remaining_cash_without_20s(remaining_cash)\n\n if counter.cash(qtt_100s, qtt_50s, qtt_20s) == amount:\n if not(options[0] == [qtt_100s, qtt_50s, qtt_20s]):\n options.append([int(qtt_100s), int(qtt_50s), int(qtt_20s)])\n\n # prioritizing 20-dollar bills\n qtt_100s = 0\n\n qtt_50s = 0\n\n qtt_20s = counter.how_many_20s(amount)\n\n if counter.cash(qtt_100s, qtt_50s, qtt_20s) == amount:\n if not(options[0] == [qtt_100s, qtt_50s, qtt_20s]):\n if not(options[1] == [qtt_100s, qtt_50s, qtt_20s]):\n options.append([int(qtt_100s), int(qtt_50s), int(qtt_20s)])\n\n return options\n\n return None # if it wasn't allowed to withdraw", "def test_bet(self):\n hand = self._hand\n self.assertEqual(hand.bet.amount, 150)", "def withdraw(self, amount):\n if amount < 0:\n return \"Amount must be >= 0\"\n elif self._balance < amount:\n return \"Insufficient funds\"\n else:\n self._balance -= amount\n return None", "def check_costs(self):\r\n if self.cost > self.owner.player.char_ob.currency:\r\n self.add_error(\r\n \"celebration_tier\",\r\n \"You cannot afford to pay the cost of %s.\" % self.cost,\r\n )", "def bet(self, amount):\n if amount >self.budget:\n print 'you cannot bet because of little money'\n else:\n self.bet_amount = amount\n print 'you bet %s' % (amount)", "def balance_money_check():\r\n print(balance_money)", "def check_funds(self, amount):\n if amount > self.get_balance():\n return False\n else:\n return True", "def test_buyTicket_insufficientFunds():\n old_venue_balance = testVenue.wallet\n assert not testUser4.buyTicket(testTicket3)\n assert testTicket3 not in testUser4.inventory\n assert testTicket3.for_sale\n assert testUser4.wallet == 0\n assert testVenue.wallet == old_venue_balance", "def check_price():\n global NUMBER_OF_TOTAL_COINS, BEVERAGE_PRICE\n\n if NUMBER_OF_TOTAL_COINS == BEVERAGE_PRICE:\n return True\n elif NUMBER_OF_TOTAL_COINS < BEVERAGE_PRICE:\n return False\n else:\n return \"FATAL\"", "def validate_bet(buy_type, cash_in):\n while cash_in < 0:\n print(\"Invalid\", buy_type)\n cash_in = round(float(input(\"Enter \" + buy_type + \": $\")), 2)\n\n return cash_in", "def test_amount_in_tons(self):", "def test_open_ru_ballance(self, ):\n if self.report_type == 'open.ru':\n (mid, aid) = self.make_money_and_account() #@UnusedVariable\n self.load_data_into_account(aid)\n deals = self.get_deals()\n repo_deals = self.get_repo_deals()\n \n if self.open_ru_report_type == 'stock':\n comm = self.open_ru_get_micex_commission(deals, repo_deals)\n elif self.open_ru_report_type == 'future':\n atl = self.get_account_totally_line()\n comm = self.open_ru_get_forts_comm(atl)\n ballance = sum([float(d.getAttribute('deal_sign')) *\n float(d.getAttribute('price')) *\n float(d.getAttribute('quantity'))\n for d in deals])\n ballance += sum([float(d.getAttribute('deal_sign')) *\n float(d.getAttribute('deal_price')) *\n float(d.getAttribute('quantity'))\n for d in repo_deals])\n ballance += 10000 - comm # 10000 is the initial account amount\n accs = self.model.list_view_accounts().fetchall()\n self.assertEqual(1, len(accs))\n self.assertAlmostEqual(ballance, accs[0]['current_money'])", "def test_return_goal_weight_actual_weight_is_too_low(self):\n data_weight_user = {\"height\": \"1,60\", \"actual_weight\": \"45\",\n \"cruising_weight\": \"45\", \"weight_goal\": \"40\"}\n goal_weight = self.new_weight_advice_goal.return_weight_advices_goal(data_weight_user)[2]\n\n self.assertFalse(goal_weight)", "def test_return_goal_under_cruising_weight(self):\n data_weight_user = {\"height\": \"1,60\", \"actual_weight\": \"60\",\n \"cruising_weight\": \"55\", \"weight_goal\": \"51\"}\n return_goal = self.new_weight_advice_goal.return_weight_advices_goal(data_weight_user)[0]\n\n self.assertEqual(return_goal, 9)", "def test_preliminary(self, ):\n (mid, aid) = self.make_money_and_account() #@UnusedVariable\n self.load_data_into_account(aid)\n if self.deals_count >= 0:\n self.assertEqual(self.deals_count, self.model._sqlite_connection.execute('select count(*) from deals').fetchone()[0])\n print('deals count passed')\n if self.report_type == 'open.ru':\n if self.open_ru_report_type == 'stock':\n pt = self.model.get_paper_type('stock')\n self.assertEqual(set([pt['id']]), set(map(lambda a: a[0], self.model._sqlite_connection.execute('select distinct type from papers').fetchall())))\n print('stock or fut passed')\n elif self.open_ru_report_type == 'future':\n pt = self.model.get_paper_type('future')\n self.assertEqual(set([pt['id']]), set(map(lambda a: a[0], self.model._sqlite_connection.execute('select distinct type from papers').fetchall())))\n print('stock or fut passed')", "def test_account_net_worth_4(account_checking, asset_usd):\n deposit(account_checking, asset_usd, 1000, parse_datetime(\"2018-08-30 23:00:00\"))\n\n net_worth = account_checking.net_worth(\n base_asset=asset_usd, evaluated_at=parse_date(\"2018-08-30\")\n )\n assert net_worth == 1000", "def test_product_buy_more_then_have(self):\n result_buy = self.info_list.product_buy(\"соль 1 кг\", 50)\n self.assertFalse(result_buy)", "def test_underpayment(self):\n debit_jobs([(self.job, A(500), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(480), A(0), A(0))], D(480))\n diff = A(500) - A(480)\n self.assert_balances(\n bank=A(480, 0, 0),\n invoiced=A(500),\n paid=A(-480),\n partial=A(480).net_amount,\n tax=A(480).tax_amount,\n balance=diff,\n promised=diff,\n ) # <- negative balances because of overpayment", "def withdraw(self,withdrawal_money):\r\n if self.balance < withdrawal_money:\r\n print(\"Funds are insufficient\")\r\n \r\n else:\r\n self.balance -= withdrawal_money\r\n print(\"Withdrawal Accepted\")", "def bet_check(m):\n try:\n value = float(m.content)\n if 0 <= value <= player.coins:\n return True\n else:\n return False\n except:\n return False", "def test_discard_buy(self):\n self.plr.test_input = [\"finish selecting\", \"discard gold\"]\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.piles[Piles.HAND].size(), 2)\n self.assertEqual(self.plr.actions.get(), 1)\n self.assertEqual(self.plr.buys.get(), 2)\n self.assertNotIn(\"Gold\", self.plr.piles[Piles.HAND])", "def clean_amount(self):\n if self.payer_channel == 2: # ignore balance check if not using wallet\n return self.cleaned_data['amount']\n else:\n pay_amount = self.cleaned_data.get('amount')*100\n payer_wallet = Wallet.objects.filter(wallet_id=self.cleaned_data.get('payer_method')).first()\n if payer_wallet is None:\n raise forms.ValidationError(\n self.error_messages['payer wallet unavailable'],\n code='payer wallet unavailable'\n )\n else:\n payer_balance = payer_wallet.balance\n if pay_amount > payer_balance:\n raise forms.ValidationError(\n self.error_messages['no_enough_balance'],\n code='no_enough_balance'\n )\n else:\n return self.cleaned_data['amount']", "def test_bidding_round_handle_transactions(self):\n self.order_1.save()\n self.order_2.save()\n self.order_3.save()\n self.order_4.save()\n self.order_5.save()\n self.order_6.save()\n self.order_7.save()\n self.order_8.save()\n self.order_9.save()\n self.order_10.save()\n self.order_11.save()\n self.order_12.save()\n self.order_13.save()\n\n # =================================================================\n # test: sell order has more stocks then sell-person\n # =================================================================\n\n self.person_2.number_of_stocks = 0\n self.person_2.save()\n\n try:\n self.bidding_round_manager.handle_transactions(bidding_rounds=[self.bidding_round])\n raise AssertionError('ExceedMaxSellSharesException expected')\n except ExceedMaxSellSharesException:\n pass", "def safeWithdrawal(self):\n if self._after_dead_line():\n # each contributor can withdraw the amount they contributed if the goal was not reached\n if not self._funding_goal_reached.get():\n amount = self._balances[self.msg.sender]\n self._balances[self.msg.sender] = 0\n if amount > 0:\n if self.icx.send(self.msg.sender, amount):\n self.FundTransfer(self.msg.sender, amount, False)\n Logger.debug(f'FundTransfer({self.msg.sender}, {amount}, False)', TAG)\n else:\n self._balances[self.msg.sender] = amount\n\n # The sales target has been met. Owner can withdraw the contribution.\n if self._funding_goal_reached.get() and self._addr_beneficiary.get() == self.msg.sender:\n if self.icx.send(self._addr_beneficiary.get(), self._amount_raised.get()):\n self.FundTransfer(self._addr_beneficiary.get(), self._amount_raised.get(), False)\n Logger.debug(f'FundTransfer({self._addr_beneficiary.get()},'\n f'{self._amount_raised.get()}, False)', TAG)\n # reset amount_raised\n self._amount_raised.set(0)\n else:\n # if the transfer to beneficiary fails, unlock contributors balance\n Logger.debug(f'Failed to send to beneficiary!', TAG)\n self._funding_goal_reached.set(False)", "def check_user_has_enough_money(session, user_id, amount):\n user_funds = get_user_balance(session, user_id)\n if user_funds + amount < 0:\n raise NotEnoughMoneyException(\"Not enough money in your wallet!\")", "def pay(self, amount):\n if amount > self.balance:\n print(f\"Not enough balance! Only ${self.balance} left.\")\n return False\n self.balance -= amount\n return True", "def test_return_goal_goal_weight_is_too_low(self):\n data_weight_user = {\"height\": \"1,60\", \"actual_weight\": \"60\",\n \"cruising_weight\": \"45\", \"weight_goal\": \"40\"}\n return_goal = self.new_weight_advice_goal.return_weight_advices_goal(data_weight_user)[0]\n\n self.assertEqual(return_goal, 12.6)", "def validate(self):\n if self.amount > 0:\n return True\n return False", "def decide_winner(user, dealer):\n print(f\"\\n=============== END OF ROUND ===============\\n\\n\"\n f\"This round, your total hand is {user.total} and the dealer's total hand is {dealer.total}\")\n if bust(dealer):\n return \"user\"\n if bust(user):\n return \"dealer\"\n if user.total > dealer.total:\n return \"user\"\n if user.total < dealer.total:\n return \"dealer\"\n else: # tie\n return \"draw\"", "def test_account_net_worth_3(account_checking, asset_usd):\n deposit(account_checking, asset_usd, 1000)\n\n net_worth = account_checking.net_worth(base_asset=asset_usd)\n assert net_worth == 1000", "def test_return_advice_goal_weight_is_too_low(self):\n data_weight_user = {\"height\": \"1,60\", \"actual_weight\": \"60\",\n \"cruising_weight\": \"45\", \"weight_goal\": \"40\"}\n return_advice = self.new_weight_advice_goal.return_weight_advices_goal(data_weight_user)[1]\n\n advice = \"Ton objectif semble trop bas, je te conseille de ne pas \" \\\n \"aller en dessous de 47.4 kg. \" \\\n \"C'est donc l'objectif que nous allons fixer ! \"\n self.assertEqual(return_advice, advice)", "def test_return_goal_weight_under_cruising_weight(self):\n data_weight_user = {\"height\": \"1,60\", \"actual_weight\": \"60\",\n \"cruising_weight\": \"55\", \"weight_goal\": \"51\"}\n return_goal = self.new_weight_advice_goal.return_weight_advices_goal(data_weight_user)[2]\n\n self.assertEqual(return_goal, 51)", "def test_convert_amounts(self):\n pass", "def withdraws(account):\r\n limit = 500\r\n print(\"Your account balance is $\", format(account, \"0.2f\"), sep='')\r\n print(\"Your withdraw limit is $\", format(limit, \"0.2f\"), sep='')\r\n while True:\r\n try:\r\n withdraw_amount = int(input(\"Enter withdraw amount. $\"))\r\n break\r\n except ValueError:\r\n print(\"Error. Must be a whole number.\")\r\n # Checking if the customer has sufficient funds/over daily limit\r\n while withdraw_amount > account or withdraw_amount > limit:\r\n print(\"Insufficient funds or daily limit exceeded.\")\r\n while True:\r\n try:\r\n withdraw_amount = int(\r\n input(\"Enter withdraw amount. $\"))\r\n break\r\n except ValueError:\r\n print(\"Error. Must be a whole number.\")\r\n account -= withdraw_amount\r\n limit -= withdraw_amount\r\n print(\"Your new balance is $\", format(account, \"0.2f\"), sep='')\r\n print(\"Your new limit is $\", format(limit, \"0.2f\"), sep='')", "async def _bailout_heist(self, ctx, user: discord.Member=None):\r\n author = ctx.message.author\r\n theme = await self.thief.get_guild_theme(ctx.guild)\r\n\r\n t_bail = theme[\"Bail\"]\r\n t_sentence = theme[\"Sentence\"]\r\n\r\n if user is None:\r\n player = author\r\n else:\r\n player = user\r\n\r\n if await self.thief.get_member_status(player) != \"Apprehended\":\r\n return await ctx.send(\"{} is not in jail.\".format(player.display_name))\r\n\r\n cost = await self.thief.get_member_bailcost(player)\r\n if not await bank.get_balance(player) >= cost:\r\n await ctx.send(\"You do not have enough to afford the {} amount.\".format(t_bail))\r\n return\r\n\r\n if player.id == author.id:\r\n msg = (\"Do you want to make a {0} amount? It will cost {1} credits. If you are \"\r\n \"caught again, your next {2} and {0} amount will triple. \"\r\n \"Do you still wish to pay the {0} amount?\".format(t_bail, cost, t_sentence))\r\n else:\r\n msg = (\"You are about pay a {2} amount for {0} and it will cost you {1} credits. \"\r\n \"Are you sure you wish to pay {1} for {0}?\".format(player.name, cost, t_bail))\r\n\r\n await ctx.send(msg)\r\n response = await self.bot.wait_for('MESSAGE', timeout=15, check=lambda x: x.author == author)\r\n\r\n if response is None:\r\n await ctx.send(\"You took too long. canceling transaction.\")\r\n return\r\n\r\n if \"yes\" in response.content.lower():\r\n msg = (\"Congratulations {}, you are free! Enjoy your freedom while it \"\r\n \"lasts...\".format(player.display_name))\r\n await bank.withdraw_credits(author, cost)\r\n await self.thief.set_member_free(author)\r\n await self.thief.set_member_oob(author, False)\r\n elif \"no\" in response.content.lower():\r\n msg = \"Canceling transaction.\"\r\n else:\r\n msg = \"Incorrect response, canceling transaction.\"\r\n\r\n await ctx.send(msg)", "def test_return_advice_actual_weight_is_too_low(self):\n data_weight_user = {\"height\": \"1,60\", \"actual_weight\": \"45\",\n \"cruising_weight\": \"45\", \"weight_goal\": \"40\"}\n advice = self.new_weight_advice_goal.return_weight_advices_goal(data_weight_user)[1]\n\n text = \"Ton poids actuel est déjà bien bas... je te déconseille \" \\\n \"de perdre plus de poids. \"\n self.assertEqual(advice, text)", "def test_isolate_amount(self):\n self.assertIsNotNone(isolate_amount)", "def test_return_goal_weight_goal_weight_is_too_low(self):\n data_weight_user = {\"height\": \"1,60\", \"actual_weight\": \"60\",\n \"cruising_weight\": \"45\", \"weight_goal\": \"40\"}\n return_goal = self.new_weight_advice_goal.return_weight_advices_goal(data_weight_user)[2]\n\n self.assertEqual(return_goal, 47.4)", "def test_case_1(\n steth, asteth, debtsteth,\n fixed_stake_eth, fixed_deposit_steth,\n fixed_borrow_steth, fixed_repay_steth,\n accounts\n):\n a = accounts[0]\n b = accounts[1]\n c = accounts[2]\n\n assert fixed_stake_eth(a, 1000) == 1000\n assert fixed_stake_eth(b, 1000) == 1000\n\n assert fixed_deposit_steth(a, 500) == 500\n assert fixed_deposit_steth(b, 500) == 500\n\n assert fixed_borrow_steth(c, 500) == 500\n\n assert steth.balance_of(a) == 500\n assert steth.balance_of(b) == 500\n assert steth.balance_of(c) == 500\n assert steth.balance_of(asteth.address) == 500\n\n assert asteth.balance_of(a) == 500\n assert asteth.balance_of(b) == 500\n\n assert debtsteth.balance_of(c) == 500\n\n # Rebase x2\n assert steth.total_supply() * 2 == steth.rebase_mul(2.0)\n\n assert steth.balance_of(a) == 1000\n assert steth.balance_of(b) == 1000\n assert steth.balance_of(c) == 1000\n assert steth.balance_of(asteth.address) == 1000\n\n fair_sharing = 500 / 2\n expected_asteth_balance = 1000 - fair_sharing\n assert asteth.balance_of(a) == expected_asteth_balance\n assert asteth.balance_of(b) == expected_asteth_balance\n\n assert debtsteth.balance_of(c) == 500\n\n # Repay\n assert fixed_repay_steth(c, 500) == 0\n\n assert steth.balance_of(a) == 1000\n assert steth.balance_of(b) == 1000\n assert steth.balance_of(c) == 500\n assert steth.balance_of(asteth.address) == 1500\n\n expected_asteth_balance = asteth.total_supply() / 2\n assert asteth.balance_of(a) == expected_asteth_balance\n assert asteth.balance_of(b) == expected_asteth_balance\n\n assert debtsteth.balance_of(c) == 0\n\n d = accounts[3]\n\n fixed_stake_eth(d, 100)\n fixed_deposit_steth(d, 50)\n\n assert steth.balance_of(a) == 1000\n assert steth.balance_of(b) == 1000\n assert steth.balance_of(c) == 500\n assert steth.balance_of(asteth.address) == 1550\n\n assert asteth.balance_of(a) == expected_asteth_balance\n assert asteth.balance_of(b) == expected_asteth_balance\n assert steth.balance_of(d) == asteth.balance_of(d)\n\n # Rebase x2\n steth.rebase_mul(2.0)\n\n assert steth.balance_of(a) == 2000\n assert steth.balance_of(b) == 2000\n assert steth.balance_of(c) == 1000\n assert steth.balance_of(d) == 100\n assert steth.balance_of(asteth.address) == 3100\n\n assert asteth.balance_of(a) == expected_asteth_balance * 2 < 2000\n assert asteth.balance_of(b) == expected_asteth_balance * 2 < 2000\n assert steth.balance_of(d) == asteth.balance_of(d)", "def raise_bet(value):\r\n\r\n global total_bet, dealer_bet, in_play, bottom_alert\r\n if value > player.get_cash() or not in_play:\r\n bottom_alert = \"You cannot bet $%i right now.\" % (value)\r\n elif in_play:\r\n player.spend_cash(value)\r\n dealer_bet += value\r\n total_bet += value * 2\r\n bottom_alert = \"\"", "def test_market_1_2(self):\n\n def check_1_2(buyers: List[float], sellers: List[float], expected_num_of_deals: int,\n expected_prices: List[float]):\n market = Market([\n AgentCategory(\"buyer\", buyers),\n AgentCategory(\"seller\", sellers),\n ])\n ps_recipe = [1, 2]\n self._check_market(market, ps_recipe, expected_num_of_deals, expected_prices)\n\n check_1_2(buyers=[9], sellers=[-4, -3],\n expected_num_of_deals=0, expected_prices=[9, -4.5])\n check_1_2(buyers=[9, 8, 7, 6], sellers=[-6, -5, -4, -3, -2, -1],\n expected_num_of_deals=1, expected_prices=[8, -4])\n check_1_2(buyers=[9, 8], sellers=[-4, -3, -2, -1],\n expected_num_of_deals=1, expected_prices=[8, -4])\n check_1_2(buyers=[9, 8], sellers=[-6, -3, -2, -1],\n expected_num_of_deals=1, expected_prices=[8, -4])\n check_1_2(buyers=[9, 8], sellers=[-4, -3, -2, -1],\n expected_num_of_deals=1, expected_prices=[8, -4])\n\n # PRICE CROSSES ZERO AT FIRST PHASE\n check_1_2(buyers=list(range(20)), sellers=[-3, -2, -1],\n expected_num_of_deals=1, expected_prices=[18, -9])", "def charge(self, price):\n '''try:\n type(price) == int or type(price) == float\n except ValueError: \n print 'Not a number!'\n \n if type(price) != int or type(price) != float:\n raise ValueError(\"Not a number!\")\n '''\n if price < 0:\n return False\n elif price + self._balance > self._limit:\n return False\n else:\n self._balance += price\n return True", "def test_adjusted_payment_still_below_invoice(self):\n debit_jobs([(self.job, A(600), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(480), A(0), A(20))], D(480))\n self.assert_balances(\n bank=A(480, 0, 0),\n balance=A(100), # debited (600) + credited (-500) = balance (100)\n debited=A(600),\n invoiced=A(580), # debited (600) + adjustment (-20) = invoiced (580)\n paid=A(-480),\n credited=A(-500), # payment (-480) + adjustment (-20) = credited (-500)\n promised=A(100),\n partial=A(480).net_amount,\n tax=A(480).tax_amount,\n )", "def valid_bet(self, amount: int) -> bool:\n return MINIMUM_BET() <= amount <= self.balance", "def bust(person):\n if person.total > GOAL_TOTAL() and person.aceCount == 0:\n return True\n elif person.total > GOAL_TOTAL() and person.aceCount > 0:\n adjust_ace(person)\n return person.total > GOAL_TOTAL()\n else: # person.total <= GOAL_TOTAL()\n return False", "def test_buyTicket_NotForSale():\n old_venue_balance = testVenue.wallet\n assert not testUser2.buyTicket(testTicket2)\n assert testTicket2 not in testUser2.inventory\n assert not testTicket1.for_sale\n assert testUser2.wallet == 500\n assert testVenue.wallet == old_venue_balance", "def __call__(self, auctioneer):\n curr_bid = auctioneer.current_bid\n bid_price = curr_bid * self._bid_increase_perc\n if bid_price <= self._budget and self.get_bid_probability() > 0.3:\n self._highest_bid = bid_price\n return bid_price\n return 0", "def payment_approval(self, house_cost: (int, float)):\n if self.money_available >= house_cost: # Person has enough available money to make a deal with Realtor\n self.money_available -= house_cost\n print(f'Payment from {self.name} was approved')\n return True\n print(f'{self.name} doesn\\'t have enough money to buy this house')\n return False", "def user_balance_lost(user_pokemon: str, computer_pokemon: str, bet_amount) -> int:\n user_pokemon = get_pokemon(user_pokemon)\n computer_pokemon = get_pokemon(computer_pokemon)\n user_attack = type_logic.damage_to(user_pokemon.first_type, computer_pokemon.first_type) + type_logic.damage_to(user_pokemon.first_type, computer_pokemon.second_type)\n computer_attack = type_logic.damage_to(computer_pokemon.first_type, user_pokemon.first_type) + type_logic.damage_to(computer_pokemon.first_type, user_pokemon.second_type)\n difference = abs(user_attack - computer_attack)\n print(\"{} attacks {} for {}\".format(user_pokemon.name, computer_pokemon.name, difference))\n\n money_exchange = 0\n if difference == 0:\n # 45% of this happening\n money_exchange = 0\n elif .5 <= difference <= 1.5:\n # 33% of this happening\n money_exchange = .5 * bet_amount\n else:\n # 22% of this happening\n money_exchange = bet_amount\n\n return -money_exchange if user_attack < computer_attack else money_exchange", "def clean_bid(self):\n new_bid = int(self.cleaned_data['bid'])\n top_bid = int(self.top_bid)\n min_bid = int(self.min_bid)\n if new_bid < 0:\n raise forms.ValidationError(\n f\"Days cannot be less 0.\"\n )\n if new_bid < top_bid or new_bid == top_bid:\n raise forms.ValidationError(\n \"Number of days have to be greater than the Option available.\")\n\n if new_bid < min_bid:\n raise forms.ValidationError(\n \"Number of days have to be greater than or equal to the Option available.\"\n )\n return new_bid", "def can_bet(self, amount: int) -> bool:\n if isinstance(amount, int):\n if self.chips_amount - amount >= 0:\n return True\n else:\n raise ValueError(\"The 'amount' param must be an instance of int, got\"+\n str(type(amount)))\n\n return False", "def Trading(Seller,Buyer):\n if Seller.has_sold == False:\n if Buyer.like_buy >= Seller.like_sell:\n Seller.has_sold = True\n Buyer.has_bought = True\n Seller.sold_objects += 1\n Buyer.bought_objects += 1\n print('A trade has been made')\n else:\n Buyer.has_bought = False\n Seller.has_sold = False\n print('There was no deal')\n else:\n Buyer.has_bought = False", "def bet(self, amount):\r\n\r\n if self.players[self.active_player].credits < self.big_blind:\r\n message = \"Player {} won! Not enough money remaining.\".format(self.players[(self.active_player + 1) %\r\n len(self.players)].name)\r\n self.game_message.emit(message)\r\n self.restart()\r\n if self.players[(self.active_player + 1) % len(self.players)].credits < self.big_blind:\r\n message = \"Player {} won! Not enough money remaining.\".format(self.players[self.active_player].name)\r\n self.game_message_warning.emit(message)\r\n self.restart()\r\n\r\n if amount == 0:\r\n message = \"Raises must be larger than zero!\"\r\n self.game_message_warning.emit(message)\r\n\r\n elif self.previous_bet + amount > self.players[self.active_player].credits:\r\n message = \"Not enough money!\"\r\n self.game_message_warning.emit(message)\r\n else:\r\n self.pot += amount\r\n self.new_pot.emit()\r\n\r\n self.players[self.active_player].credits -= (self.previous_bet + amount)\r\n self.new_credits.emit()\r\n\r\n output_text = \"{} bet ${} and raised ${}\".format(self.players[self.active_player].name, self.previous_bet,\r\n amount)\r\n\r\n self.previous_bet = (self.previous_bet + amount)\r\n self.actions += 1\r\n\r\n self.new_output.emit(output_text)\r\n\r\n self.active_player = (self.active_player + 1) % len(self.players)\r\n\r\n # Update the players to hide their cards when it is not their turn\r\n for player in self.players:\r\n player.flip_cards()\r\n\r\n self.progress_game()", "def balance(self, player):\n print 'hand of %s: %s'%(player.name,player.cards.hand)\n print 'hand of %s: %s'%(self.name,self.cards.hand)\n if player.cards.hand == self.cards.hand:\n return 0\n elif player.cards.hand > self.cards.hand:\n return player.bet_amount*2\n else:\n return -player.bet_amount", "def is_sufficient(money_received, price):\n if price <= money_received:\n change = round(money_received - price, 2)\n print(f\"Here is your {option}.Enjoy!\\nHere us £{change} in change\")\n global profit\n profit += price\n return True\n else:\n print(f\"Sorry not enough money\")\n return False", "def test_gain(self):\n self.plr.piles[Piles.DECK].set(\"Duchy\")\n self.plr.test_input = [\"Get Estate\"]\n self.plr.gain_card(\"Cursed Village\")\n self.assertNotIn(\"Curse\", self.plr.piles[Piles.DISCARD])\n self.assertIsNotNone(self.plr.piles[Piles.DISCARD][\"Estate\"])\n self.assertIn(\"Duchy\", self.g.trashpile)", "def main():\n\n # values to receive from a user\n bidders = []\n item_name = \"\"\n num_of_bidders = 0\n starting_price = 0\n\n errors = []\n is_valid = False\n while not is_valid:\n is_valid = True\n errors.clear()\n item_name = input(\"Please type the name of the item: \")\n\n # Starting price\n try:\n starting_price = float(input(\"Please type the starting price: \"))\n except ValueError:\n errors.append(\"[Error] Starting price should be a decimal number.\")\n is_valid = False\n\n # Number of bidders\n try:\n num_of_bidders = int(input(\"Please type the number of bidders: \"))\n except ValueError:\n errors.append(\"[Error] Number of bidders should be an integer.\")\n is_valid = False\n\n # print input errors\n for error in errors:\n print(error)\n\n # Creating bidders\n num = 1\n bidder_name = \"\"\n is_valid = False\n while num <= int(num_of_bidders) or is_valid is False:\n print(f\"Please provide the details of the bidder {num}\")\n name = input(\"name: \")\n try:\n budget = float(input(\"budget: \"))\n except ValueError as e:\n print(\"[Error] Budget should be a decimal number\")\n else:\n is_valid = True\n inc_rate = random.random()\n bidders.append(Bidder(name, float(budget), (1 + inc_rate)))\n num += 1\n\n # Create Auction with the input values and Start the auction\n my_auction = Auction(bidders, item_name, float(starting_price))\n print(f\"\\nStarting Auction!!\\n----------------------\\n\"\n f\"Auctioning {bidder_name} starting at {starting_price}.\")\n my_auction.start_auction()\n\n # Print out the auction results\n my_auction.print_auction_result()", "def check_price(self):\n if self.price < 0:\n self.raise_user_error(\"negative_amount\")", "def _check_rate_limit(self, res, amt, balance, meta, raven_vars, dispatch, t):\n # TODO distinct up/down rates\n # check limiting rate for resource flow in/out, if any\n if self._rate:\n request = {res: None}\n inputs = {'request': request,\n 'meta': meta,\n 'raven_vars': raven_vars,\n 'dispatch': dispatch,\n 't': t}\n max_rate = self._rate.evaluate(inputs, target_var=res)[0][res]\n delta = np.sign(amt) * min(max_rate, abs(amt))\n print('max_rate in _check_rate_limit',max_rate, 'delta (min of maxrate and abs(amt)',delta)\n return {res: delta}, meta\n return {res: amt}, meta", "def __call__(self, auctioneer):\n possible_bid = self.bid_increase_perc * auctioneer.get_highest_bid()\n if possible_bid < self.budget and random.random() <= self.bid_probability:\n self.highest_bid = possible_bid\n auctioneer.accept_bid(possible_bid, self)", "def bet(self):\n while True:\n try:\n self.round_bet = float(\n input(f'{self.name}, please enter an amount to bet for this round: '))\n if self.round_bet > self.bankroll:\n print('You have bet more than you have!')\n continue\n if self.round_bet <= 0:\n self.out_of_round = True\n else:\n self.bankroll -= self.round_bet\n break\n except TypeError:\n print('Please enter in a valid bet!')\n continue\n except ValueError:\n print('Please enter in a valid bet!')\n return self.name, self.round_bet", "def test_positive_price_details(self):\n with self.client:\n response = self.add_meal(\"beef\", -15000)\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'),\n \"Price must be a positive number\")\n self.assertEqual(response.status_code, 400)", "def test_balance(db_mock):\n assert db_mock.balance(\"ACCT100\") == \"40.00 USD\"\n assert db_mock.balance(\"ACCT200\") == \"-10.00 USD\"\n assert db_mock.balance(\"ACCT300\") == \"0.00 USD\"\n assert db_mock.balance(\"7ammo\") is None", "def test_credit(self):\n new_wallet = Wallet.objects.get(name=\"new_wallet\")\n new_wallet.credit(\"100.99\")\n new_wallet_money = Wallet.objects.filter(name=\"new_wallet\")[0].money\n decimal_money_raised = decimal.Decimal(\"100.99\")\n self.assertEqual(new_wallet_money, decimal_money_raised)", "async def bet(message, user: ParamType.MIXER_USER, amount):\n\n username = user.username.lower()\n username_sender = message.username.lower()\n\n mixcord_user = await database.get_user(message.user_id)\n\n # handle if somebody is trying to accept or deny\n if amount == \"accept\" or amount == \"deny\":\n\n # get the pending bet\n bet = pending_bets.get(username)\n if bet is None or bet[\"username\"] != username_sender:\n return \"failed to find the bet you're responding to.\"\n\n # delete the pending bet, because we're handling it\n del pending_bets[username]\n\n # if the user wants to deny the bet, don't do anything\n if amount == \"deny\":\n return \"you have denied the pending bet from @{}.\".format(username)\n\n # if the user wants to accept the bet, continue\n if amount == \"accept\":\n\n # make sure they have enough money to accept\n if bet[\"amount\"] > mixcord_user[\"balance\"]:\n return \"you have insufficient funds to accept this bet.\"\n\n # make sure the issuer of the challenge still has enough money\n competitor_mixcord_user = await database.get_user(user.id)\n if bet[\"amount\"] > competitor_mixcord_user[\"balance\"]:\n return \"@{} no longer has sufficient funding to run this bet.\".format(username)\n\n # determine winner/loser\n pick = random.randint(0, 1) == 1\n winner_id = user.id if pick else message.user_id\n loser_id = message.user_id if pick else user.id\n winner_username = username if pick else username_sender\n loser_username = message.username if pick else username\n\n # affect balances accordingly\n await database.add_balance(winner_id, bet[\"amount\"])\n await database.add_balance(loser_id, -bet[\"amount\"])\n\n # end the bet!\n await chat.send_message(\"@{} has won {} {}! better luck next time, @{}.\".format(winner_username, bet[\"amount\"], currency_name, loser_username))\n return None\n\n # make sure the amount is numeric by converting it to an int\n amount = utils.get_positive_int(amount)\n if amount is None: return \"amount must be a positive integer.\"\n\n # make sure they're not trying to start a bet against themself :/\n if message.username == username:\n return \"you're not able to start a bet against yourself.\"\n\n # make sure we don't already have a pending bet\n if pending_bets.get(message.username) is not None:\n return \"you already have a pending bet.\"\n\n # make sure the challenger has enough money to start the bet\n if amount > mixcord_user[\"balance\"]:\n return \"you have insufficient funds to request this bet.\"\n\n # store challenge information\n pending_bets[message.username] = {\n \"username\": username,\n \"amount\": amount\n }\n\n # send messages indicating the challenge has been issued\n await chat.send_message(\"@{} has challenged @{} to a bet of {} {}!\".format(message.username, username, amount, currency_name))\n await asyncio.sleep(0.5)\n await chat.send_message(\"use {}bet @{} [accept/deny] to respond to your pending bet!\".format(chat.commands.prefix, message.username), username)\n\n # automatically timeout the bet in 30 seconds\n await asyncio.sleep(30)\n bet = pending_bets.get(message.username)\n if bet is not None:\n del pending_bets[message.username]\n await chat.send_message(\"@{} your pending bet has timed out.\".format(message.username))", "def test_open_ru_stock_commission(self, ):\n if self.report_type == 'open.ru' and self.open_ru_report_type == 'stock':\n (mid, aid) = self.make_money_and_account() #@UnusedVariable\n self.load_data_into_account(aid)\n deals = self.get_deals() \n repo_deals = self.get_repo_deals()\n summcomm = self.open_ru_get_micex_commission(deals, repo_deals)\n self.assertAlmostEqual(summcomm, \n self.model._sqlite_connection.execute('select sum(commission) from deals').fetchone()[0])\n print('test stock commission passed')", "def determine_winner(self):\n if self.player.sum_cards() > 21:\n print(\"BUST! Dealer wins.\")\n\n elif self.dealer.sum_cards() > 21:\n print(\"DEALER BUSTS! You win\")\n\n elif self.player.sum_cards() > self.dealer.sum_cards():\n print(\"You win!\")\n\n elif self.dealer.sum_cards() > self.player.sum_cards():\n print(\"Dealer wins!\")\n\n else:\n print(\"It's a tie!\")", "def test_adjusted_payment_below_debit_with_recognized_revenue(self):\n debit_jobs([(self.job, A(600), Entry.WORK_DEBIT)], recognize_revenue=True)\n credit_jobs([(self.job, A(480), A(0), A(20))], D(480))\n self.assert_balances(\n bank=A(480, 0, 0),\n balance=A(100), # <- job still has some balance\n invoiced=A(580),\n paid=A(-480), # <- 20.00 adjusted\n debited=A(600),\n credited=A(-500),\n income=A(580).net_amount,\n tax=A(580).tax_amount,\n ) # <- income is higher than bank balance", "def prepare_trade(self, input_, prediction):\n if prediction > input_:\n # Price will go up, so we should buy\n # amount = self.amount\n amount = self.amount\n allowed, amount_ret = self.check_risk('buy', amount)\n assert amount == amount_ret or amount == 'max', \"Mistake in check_risk function\"\n if allowed:\n return 'buy', amount_ret\n else:\n return False, amount_ret\n elif prediction < input_:\n # Sell, short or hold?\n amount = -1 * self.amount\n allowed, amount_ret = self.check_risk('buy', amount)\n assert amount == amount_ret, \"Mistake in check_risk function\"\n if allowed:\n return 'sell', amount_ret\n else:\n return False, amount_ret", "def withdrawMoney(self, withdraw_amount):\r\n if (self.balance_amt - withdraw_amount) > 0:\r\n self.balance_amt = self.balance_amt - withdraw_amount\r\n else:\r\n raise WithdrawError #Exception('Overdraft withdrawal Error. Cannot withdraw more than amount in account balance: {}'.format(self.balance_amt))\r", "def evaluate_winners_and_losers(future_price):\n\n winners = []\n losers = []\n\n target_price = future_price.target_price\n try:\n actual_price_obj = Bitcoin_Price.objects.get(time=future_price.time_to_match_price)\n except:\n return # there is no bitcoin price for this time so this future_price cannot be evaluated\n actual_price = actual_price_obj.price\n price_is_less_than_target = actual_price < target_price\n price_is_equal_to_target = target_price == actual_price\n\n amounts = Received_Amount.objects.filter(\n amount__gt=0,\n prediction__future_price=future_price,\n time__lt=future_price.time_window_closes\n ).order_by('time', 'id')\n\n # Split into winners and losers\n for received_amount in amounts:\n guessed_correctly = (received_amount.prediction.price_will_be_less_than_target and price_is_less_than_target) or \\\n (not received_amount.prediction.price_will_be_less_than_target and not price_is_less_than_target)\n if guessed_correctly:\n # This is a winner\n returned_amount = {\n \"amount\": received_amount.amount,\n \"from_received_amount\": received_amount,\n \"to_prediction\": received_amount.prediction,\n }\n returned_amount_obj = Returned_Amount(**returned_amount)\n returned_amount_obj.save()\n winners.append({\n \"received_amount\": received_amount,\n \"from_losers\": 0\n })\n elif price_is_equal_to_target:\n # Eligible for refund but not for winnings\n # TODO: If the received amount is not confirmed, it will still be\n # returned\n returned_amount = {\n \"amount\": received_amount.amount,\n \"from_received_amount\": received_amount,\n \"to_prediction\": received_amount.prediction,\n }\n returned_amount_obj = Returned_Amount(**returned_amount)\n returned_amount_obj.save()\n else:\n # Record this so in the next step this can be allocated to winners\n losers.append({\n \"received_amount\": received_amount,\n \"to_winners\": 0,\n \"commission\": 0\n })\n\n for loser in losers:\n # Pay the winners\n for winner in winners:\n loser_funds_remaining = loser[\"received_amount\"].amount - loser[\"to_winners\"] - loser[\"commission\"]\n loser_is_broke = loser_funds_remaining == 0\n if loser_is_broke:\n break\n winner_received_from_losers = winner[\"from_losers\"]\n winner_total_owed_from_losers = winner[\"received_amount\"].amount * (1-COMMISSION)\n amount_remaining_to_pay_winner = winner_total_owed_from_losers - winner_received_from_losers\n if amount_remaining_to_pay_winner > 0:\n amount_to_pay_winner = min(amount_remaining_to_pay_winner, loser_funds_remaining * (1-COMMISSION))\n commission = amount_to_pay_winner / (1-COMMISSION) * COMMISSION\n loser[\"to_winners\"] = loser[\"to_winners\"] + amount_to_pay_winner\n loser[\"commission\"] = loser[\"commission\"] + commission\n winner[\"from_losers\"] = winner[\"from_losers\"] + amount_to_pay_winner\n returned_amount = {\n \"amount\": amount_to_pay_winner,\n \"from_received_amount\": loser[\"received_amount\"],\n \"to_prediction\": winner[\"received_amount\"].prediction,\n }\n returned_amount_obj = Returned_Amount(**returned_amount)\n returned_amount_obj.save()\n\n commission_amount = {\n \"returned_amount\": returned_amount_obj,\n \"amount\": commission\n }\n commission_amount_obj = Commission_Amount(**commission_amount)\n commission_amount_obj.save()\n # Return any amount remaining after all the winners are paid\n loser_funds_remaining = loser[\"received_amount\"].amount - loser[\"to_winners\"] - loser[\"commission\"]\n if loser_funds_remaining > 0:\n returned_amount = {\n \"amount\": loser_funds_remaining,\n \"from_received_amount\": loser[\"received_amount\"],\n \"to_prediction\": loser[\"received_amount\"].prediction,\n }\n returned_amount_obj = Returned_Amount(**returned_amount)\n returned_amount_obj.save()", "def test_order_cost_money(self):\n\t\tself.g.resolve_current_turn()\n\t\tself.assertEqual(self.reload(self.p).money, self.initial_money - BuyInfluenceOrder.BASE_COST)", "def ReflectingBuyer(Buyer):\n increase_step = 0.01\n\n if Buyer.has_bought == True:\n Buyer.like_buy *= (1-increase_step)\n elif Buyer.like_buy * (1+increase_step) >= Buyer.max_value and Buyer.has_bought == False:\n Buyer.like_buy = Buyer.max_value\n else:\n Buyer.like_buy *= (1+increase_step)\n Buyer.has_bought = False #return to normal state", "def test_balance_tracking(self):\n # TODO\n pass", "def process_payment(money_received, drink_cost):\n if money_received >= drink_cost:\n change = round(money_received - drink_cost, 2)\n print(f\"Here is ${change} in change.\")\n global profit\n profit += drink_cost\n return True\n else:\n print(\"Sorry that's not enough money. Money refunded.\")\n return False", "def check_sufficient_funds(self, amount):\n balance = self.get_balance_amount()\n if(balance < amount):\n return False\n return True", "def withdraw(self, amount):\n if self.overdrawn:\n print('You have overdrawn, please add more money!')\n return self.balance\n self.balance = self.balance - amount\n return self.balance", "def test_return_goal_goal_weight_ok(self):\n data_weight_user = {\"height\": \"1,60\", \"actual_weight\": \"60\",\n \"cruising_weight\": \"55\", \"weight_goal\": \"55\"}\n return_goal = self.new_weight_advice_goal.return_weight_advices_goal(data_weight_user)[0]\n\n self.assertEqual(return_goal, 5)", "def calc_earning(self, data=None):\n result = Result()\n if data is None:\n data = self.security\n self.calcDecision()\n first_purchase_method = self.check_first_purchase_method()\n for i in np.arange(len(data['Close'])):\n if data['FinalDecision'].iloc[i] is None:\n pass\n elif data['FinalDecision'].iloc[i] == TransactionType.BUY:\n if data['FinalDecision'].iloc[i-1] == TransactionType.BUY:\n pass\n else:\n if (self.buys_made + self.sells_made) == 0:\n if first_purchase_method == FirstTransactionType.INIT_CAPITAL:\n self.shares_own = int((self.init_capital/data['Close'].iloc[i]))\n self.buys_made += 1\n elif first_purchase_method == FirstTransactionType.STOCK_QUANTITY:\n self.shares_own = self.stock_quantity\n self.buys_made += 1\n else:\n self.shares_own = int(self.final_capital / data['Close'].iloc[i])\n self.final_capital = self.final_capital % data['Close'].iloc[i]\n #print(self.shares_own)\n\n elif data['FinalDecision'].iloc[i] == TransactionType.SELL:\n if data['FinalDecision'].iloc[i-1] == TransactionType.SELL:\n pass\n else:\n if (self.buys_made + self.sells_made) == 0:\n pass\n else:\n self.final_capital += self.shares_own * data['Close'].iloc[i]\n self.shares_own = 0\n self.sells_made +=1\n #Checar si es el momento mas alto o bajo de ganancias\n if self.shares_own == 0:\n if (self.highest_point is None\n or self.highest_point < self.final_capital):\n self.highest_point = self.final_capital\n if (self.lowest_point is None\n or self.lowest_point > self.final_capital\n or self.lowest_point == 0):\n self.lowest_point = self.final_capital\n else:\n if (self.highest_point is None\n or self.highest_point < (self.shares_own * data['Close'].iloc[i])):\n self.highest_point = self.final_capital\n if (self.lowest_point is None\n or self.lowest_point > (self.shares_own * data['Close'].iloc[i])\n or self.lowest_point == 0):\n self.lowest_point = self.final_capital\n self.calcRealFinalCapital()\n self.calcDiferencePercentage()", "def price_check(cash, price, shares):\n affordable = (cash - (price * shares)) > 0\n\n if affordable:\n return affordable\n\n else:\n return False" ]
[ "0.67128986", "0.67048556", "0.66379863", "0.6527508", "0.6469274", "0.64126986", "0.63823014", "0.63767564", "0.63319266", "0.6226689", "0.62261546", "0.61649483", "0.6154119", "0.61476374", "0.6144717", "0.613952", "0.6133715", "0.61022395", "0.6094778", "0.60594314", "0.6058217", "0.6053892", "0.60538596", "0.6017126", "0.60085297", "0.6000387", "0.59932363", "0.59712166", "0.59692323", "0.59559584", "0.59559536", "0.5949435", "0.59444225", "0.5907703", "0.5902846", "0.5900164", "0.5894781", "0.587949", "0.58769363", "0.5872175", "0.5871329", "0.5869223", "0.58630854", "0.5856936", "0.5854474", "0.5843632", "0.584345", "0.5843033", "0.58397126", "0.5839424", "0.58243763", "0.58172125", "0.58153296", "0.5809903", "0.5804762", "0.5802411", "0.5801514", "0.5800852", "0.5791884", "0.5787075", "0.57826924", "0.57792854", "0.57768625", "0.5776615", "0.5772811", "0.5766603", "0.57531244", "0.57495147", "0.5748075", "0.57441217", "0.57348835", "0.5734089", "0.5729498", "0.5720191", "0.5709564", "0.5708278", "0.57077664", "0.57035506", "0.5697208", "0.5694472", "0.569208", "0.5688284", "0.56864333", "0.56777143", "0.5673564", "0.5669647", "0.56677127", "0.56656975", "0.56643826", "0.5657517", "0.5649218", "0.56443924", "0.564036", "0.56283003", "0.5621626", "0.5618502", "0.56146693", "0.5607604", "0.56052935", "0.5604294", "0.55990744" ]
0.0
-1
Make sure they have enough money.
def testInsufficientCash(self): bid_move = self._move() context = self._context() context.players[0].cash = 200 bfpc = BiddingForPrivateCompany() self.assertFalse(bfpc.run(bid_move, context), bfpc.errors())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_amount_not_enough(self):\n item, change, _ = give_item_and_change('coke', .50)\n self.assertIsNone(item)\n self.assertEqual(change, 0.5)", "def check_costs(self):\r\n if self.cost > self.owner.player.char_ob.currency:\r\n self.add_error(\r\n \"celebration_tier\",\r\n \"You cannot afford to pay the cost of %s.\" % self.cost,\r\n )", "def test_has_enough_money_handles_insufficient_funds(self):\n # Params\n f_money_collected = 2.00\n f_chocolate_price = 2.25\n\n # Returns\n return_1 = 'Insufficient funds... Dispensing coins inserted.\\n'\n\n # Calls\n string_1 = has_enough_money(f_money_collected, f_chocolate_price)\n\n # Asserts\n self.assertEqual(string_1, return_1)", "def check_price(self):\n if self.price < 0:\n self.raise_user_error(\"negative_amount\")", "def check_sufficient_funds(self, amount):\n balance = self.get_balance_amount()\n if(balance < amount):\n return False\n return True", "def _check_cost(self, cr, uid, ids, context=None):\n for enrich in self.browse(cr, uid, ids, context=context):\n if enrich.amount <= 0:\n raise osv.except_osv(_('ValidateError'), _('The Cost Must Be Greater Than Zero!'))\n return True", "def check_user_has_enough_money(session, user_id, amount):\n user_funds = get_user_balance(session, user_id)\n if user_funds + amount < 0:\n raise NotEnoughMoneyException(\"Not enough money in your wallet!\")", "def test_buyTicket_insufficientFunds():\n old_venue_balance = testVenue.wallet\n assert not testUser4.buyTicket(testTicket3)\n assert testTicket3 not in testUser4.inventory\n assert testTicket3.for_sale\n assert testUser4.wallet == 0\n assert testVenue.wallet == old_venue_balance", "def payment_approval(self, house_cost: (int, float)):\n if self.money_available >= house_cost: # Person has enough available money to make a deal with Realtor\n self.money_available -= house_cost\n print(f'Payment from {self.name} was approved')\n return True\n print(f'{self.name} doesn\\'t have enough money to buy this house')\n return False", "def _check(self):\n assert isinstance(self._price, int)\n assert self._price >= 0\n assert isinstance(self._units, int)\n assert self._units > 0\n assert self._side == OrderSide.BUY or self._side == OrderSide.SELL\n assert self._type == OrderType.LIMIT or self._type == OrderType.CANCEL\n assert isinstance(self._market, int)\n assert self._market > 0", "def pay(self, amount):\n if amount > self.balance:\n print(f\"Not enough balance! Only ${self.balance} left.\")\n return False\n self.balance -= amount\n return True", "def check_money(drink, amount):\n if (drink == \"espresso\" and amount < MENU[drink][\"cost\"]) or (drink == \"latte\" and amount < MENU[drink][\"cost\"])\\\n or (drink == \"cappuccino\" and amount < MENU[drink][\"cost\"]):\n # if not enough money, start over\n print(f\"Sorry that's not enough money. Drink is ${MENU[drink]['cost']}. You gave ${amount}. Money refunded.\")\n return False\n else:\n return True", "def is_sufficient(money_received, price):\n if price <= money_received:\n change = round(money_received - price, 2)\n print(f\"Here is your {option}.Enjoy!\\nHere us £{change} in change\")\n global profit\n profit += price\n return True\n else:\n print(f\"Sorry not enough money\")\n return False", "def use(self):\n if self.credit < self.price_of_trip:\n print(\"Your credit is not enough, please increase your credit\")\n else:\n self.credit -= self.price_of_trip\n print(\"Done\")", "def test_buyTicket_EmptiesWallet():\n old_venue_balance = testVenue.wallet\n assert testUser1.buyTicket(testTicket1)\n assert testUser1.inventory[-1] == testTicket1\n assert not testTicket1.for_sale\n assert testUser1.wallet == 0\n assert testVenue.wallet == old_venue_balance + testTicket1.list_price", "def update_amounts(self, save=True):\n self.amount_donated = self.get_amount_total(\n [StatusDefinition.SUCCESS, StatusDefinition.PENDING,\n StatusDefinition.PLEDGED])\n self.amount_needed = self.amount_asked - self.amount_donated\n\n if self.amount_needed < 0:\n # Should never be less than zero\n self.amount_needed = 0\n\n if save:\n self.save()", "def testConsistency(self):\n #self.assertAlmostEqual(self.fxlinkedcashflow.amount(),0)", "def has_money(self) -> bool: \n \n return self.money > 0.0", "def test_product_buy_more_then_have(self):\n result_buy = self.info_list.product_buy(\"соль 1 кг\", 50)\n self.assertFalse(result_buy)", "def check_risk(self, action, amount=None):\n if amount is None:\n # amount not specified, so determines max amount to trade\n if action == 'buy':\n amount = int((self.upper_bound-self.owned_value)/self.price) # A unit is 1 dollar here? TODO:\n elif action == 'sell':\n amount = int((self.lower_bound-self.owned_value)/self.price)\n else:\n raise ValueError(f\"action should be buy or sell, got {action}\")\n if action == 'buy':\n if self.owned_value + amount <= self.upper_bound:\n # Allowed to buy up to upper bound\n return True, amount\n else:\n # Trying to buy too much\n print(\"Trade not allowed, attempting to increase total amount to more than upper bound.\")\n return False, amount\n elif action == 'sell':\n if self.owned_value + amount >= self.lower_bound:\n # Allowed to buy down to lower_bound\n return True, amount\n else:\n print(\"Trade not allowed, attempting to increase debt to more than lower bound.\")\n return False, amount", "def get_money(self) -> float: \n money = get_owned()\n try:\n assert type(self.owned_init) == float\n except AssertionError: #The first time one tries to make a bet this is evoked\n self.owned_init = money\n finally:\n return money", "def test_order_cost_money(self):\n\t\tself.g.resolve_current_turn()\n\t\tself.assertEqual(self.reload(self.p).money, self.initial_money - BuyInfluenceOrder.BASE_COST)", "def withdrawal(self, amount):\n if self.balance - amount < self.minimum_balance:\n print \"This would take you below your minimum balance.\"\n return\n else:\n self.balance -= amount\n print \"Please take your cash.\"\n print \"Your balance is now $%d.\" % self.balance\n self.transactions.append((\"Withdrawal\", amount))", "def validate(self):\n if self.amount > 0:\n return True\n return False", "def check_limit(self):\n self.ensure_one()\n partner = self.partner_id\n moveline_obj = self.env['account.move.line']\n movelines = moveline_obj.\\\n search([('partner_id', '=', partner.id),\n ('account_id.user_type_id.type', 'in',\n ['receivable', 'payable']),\n ('full_reconcile_id', '=', False)])\n\n debit, credit = 0.0, 0.0\n today_dt = datetime.strftime(datetime.now().date(), DF)\n for line in movelines:\n if line.date_maturity < today_dt:\n credit += line.debit\n debit += line.credit\n\n if (credit - debit + self.amount_total) > partner.credit_limit:\n # Consider partners who are under a company.\n if partner.over_credit or (partner.parent_id and partner.parent_id.over_credit):\n partner.write({\n 'credit_limit': credit - debit + self.amount_total})\n return True\n else:\n msg = '%s Can not confirm Sale Order,Total mature due Amount ' \\\n '%s as on %s !\\nCheck Partner Accounts or Credit ' \\\n 'Limits !' % (partner.over_credit,credit - debit, today_dt)\n raise UserError(_('Credit Over Limits !\\n' + msg))\n else:\n return True", "def safeWithdrawal(self):\n if self._after_dead_line():\n # each contributor can withdraw the amount they contributed if the goal was not reached\n if not self._funding_goal_reached.get():\n amount = self._balances[self.msg.sender]\n self._balances[self.msg.sender] = 0\n if amount > 0:\n if self.icx.send(self.msg.sender, amount):\n self.FundTransfer(self.msg.sender, amount, False)\n Logger.debug(f'FundTransfer({self.msg.sender}, {amount}, False)', TAG)\n else:\n self._balances[self.msg.sender] = amount\n\n # The sales target has been met. Owner can withdraw the contribution.\n if self._funding_goal_reached.get() and self._addr_beneficiary.get() == self.msg.sender:\n if self.icx.send(self._addr_beneficiary.get(), self._amount_raised.get()):\n self.FundTransfer(self._addr_beneficiary.get(), self._amount_raised.get(), False)\n Logger.debug(f'FundTransfer({self._addr_beneficiary.get()},'\n f'{self._amount_raised.get()}, False)', TAG)\n # reset amount_raised\n self._amount_raised.set(0)\n else:\n # if the transfer to beneficiary fails, unlock contributors balance\n Logger.debug(f'Failed to send to beneficiary!', TAG)\n self._funding_goal_reached.set(False)", "def check_cap(org, amount):\n from django.db.models import Sum, Q\n\n if amount < 0:\n query = Q(favor__lt=0)\n else:\n query = Q(favor__gt=0)\n total = abs(\n org.reputations.filter(query).aggregate(sum=Sum(\"favor\"))[\"sum\"] or 0\n ) + abs(amount)\n mod = org.social_modifier * 5\n if total > mod:\n noun = \"favor\" if amount > 0 else \"disfavor\"\n raise CommandError(\n \"That would bring your total %s to %s, and you can only spend %s.\"\n % (noun, total, mod)\n )", "def charge(self, price):\n '''try:\n type(price) == int or type(price) == float\n except ValueError: \n print 'Not a number!'\n \n if type(price) != int or type(price) != float:\n raise ValueError(\"Not a number!\")\n '''\n if price < 0:\n return False\n elif price + self._balance > self._limit:\n return False\n else:\n self._balance += price\n return True", "def test_buyTicket_FreeTicket():\n old_venue_balance = testVenue.wallet\n assert testUser4.buyTicket(testTicket4)\n assert testUser4.inventory[-1] == testTicket4\n assert not testTicket4.for_sale\n assert testUser4.wallet == 0\n assert testVenue.wallet == old_venue_balance", "def restock(self):\n self.money = 9999", "def check_funds(self, amount):\n if abs(amount)>self.get_balance(): return False\n else: return True", "def pay_costs(self):\r\n cost = self.cost\r\n if cost:\r\n self.owner.player.char_ob.pay_money(cost)\r\n self.owner.player.msg(\"You pay %s coins for the event.\" % cost)", "def get_money(self, fromobj):\n val, currency = money_from_args(self.args, fromobj)\n if val > currency:\n raise CommandError(\n \"Not enough money. You tried to {verb} {val}, but can only {verb} {currency}.\".format(\n verb=self.cmdstring, val=val, currency=currency\n )\n )\n fromobj.pay_money(val, self.caller)\n return val", "def withdrawMoney(self, withdraw_amount):\r\n if (self.balance_amt - withdraw_amount) > 0:\r\n self.balance_amt = self.balance_amt - withdraw_amount\r\n else:\r\n raise WithdrawError #Exception('Overdraft withdrawal Error. Cannot withdraw more than amount in account balance: {}'.format(self.balance_amt))\r", "def check_required_change(drink, amount):\n if (drink == \"espresso\" and amount > MENU[drink][\"cost\"]) or (drink == \"latte\" and amount > MENU[drink][\"cost\"])\\\n or (drink == \"cappuccino\" and amount > MENU[drink][\"cost\"]):\n return amount - MENU[drink][\"cost\"]\n else:\n return 0.00", "def test_balance_too_low(\n self, wait_tx_settled_mock, confirm_mock, do_transfer_mock\n ):\n password_option = self.get_password_args(self.PASSWORD)\n with pytest.raises(\n ClickException,\n match=r\"Balance is not enough! Available=[0-9]+, required=[0-9]+!\",\n ):\n self.invoke(\n \"transfer\",\n self.LEDGER_ID,\n self.get_address(self.LEDGER_ID, self.PASSWORD),\n \"100000\",\n \"100\",\n *password_option,\n )", "def use(self):\n if self.price_of_trip == 0:\n print(\"Sorry your card has been used\")\n else:\n self.price_of_trip -= self.price_of_trip\n print(\"Done\")", "def make_payment(self, cost):\n self.process_coins()\n if self.money_received >= cost:\n change = round(self.money_received - cost, 2)\n print(f\"Here is {self.CURRENCY}{change} in change.\")\n self.profit += cost\n self.money_received = 0\n return True\n else:\n print(\"Sorry that's not enough money. Money refunded.\")\n self.money_received = 0\n return False", "def test_credit(self):\n new_wallet = Wallet.objects.get(name=\"new_wallet\")\n new_wallet.credit(\"100.99\")\n new_wallet_money = Wallet.objects.filter(name=\"new_wallet\")[0].money\n decimal_money_raised = decimal.Decimal(\"100.99\")\n self.assertEqual(new_wallet_money, decimal_money_raised)", "def test_isolate_amount(self):\n self.assertIsNotNone(isolate_amount)", "def test_buyTicket_Valid_Paramaters():\n old_venue_balance = testVenue.wallet\n assert testUser3.buyTicket(testTicket3)\n assert testTicket3 in testUser3.inventory\n assert not testTicket3.for_sale\n assert testUser3.wallet == 950\n assert testVenue.wallet == old_venue_balance + testTicket3.list_price", "def test_create_warranty(self):\n pass", "def check_funds(self, amount):\n if amount > self.get_balance():\n return False\n else:\n return True", "def test_buyTicket_NotForSale():\n old_venue_balance = testVenue.wallet\n assert not testUser2.buyTicket(testTicket2)\n assert testTicket2 not in testUser2.inventory\n assert not testTicket1.for_sale\n assert testUser2.wallet == 500\n assert testVenue.wallet == old_venue_balance", "def transaction_successful(drink_type):\r\n total = 0\r\n cost = MENU[drink_type][\"cost\"]\r\n print(f\" A {drink_type} costs ${MENU[drink_type]['cost']}\")\r\n total += float(input(\" How many quarters? \")) * 0.25\r\n total += float(input(\" How many dimes? \")) * 0.10\r\n total += float(input(\" How many nickels? \")) * 0.05\r\n total += float(input(\" How many pennies? \")) * 0.01\r\n\r\n if total >= cost:\r\n print(f\"Here is ${total - cost} in change.\")\r\n return True\r\n else:\r\n print(\"Sorry that's not enough money. Money refunded.\")\r\n return False", "def check_transaction(menu, drink, resources):\r\n customer_money = process_coins()\r\n drink_cost = menu[drink]['cost']\r\n if customer_money < drink_cost:\r\n print(\"Sorry that's not enough money.Money refunded\")\r\n return False\r\n else:\r\n if customer_money > drink_cost:\r\n change = round((customer_money - drink_cost), 2)\r\n print(f\"Here is your ${change} in change\")\r\n resources['Money'] += drink_cost\r\n return True", "def check_price():\n global NUMBER_OF_TOTAL_COINS, BEVERAGE_PRICE\n\n if NUMBER_OF_TOTAL_COINS == BEVERAGE_PRICE:\n return True\n elif NUMBER_OF_TOTAL_COINS < BEVERAGE_PRICE:\n return False\n else:\n return \"FATAL\"", "def buy_item(self, item):\n if self.amount < item.price:\n custom_log(\"Insufficient amount. Insert more coins.\", MSG_ERROR)\n else:\n self.amount = round((self.amount - item.price), 2)\n item._buy()\n custom_log(f\"You bought - {item.name}, remaining cash - €{self.amount}\", MSG_DEBUG)", "def give_raise(self):\r\n self.hourly_pay = 12.00", "def deposit(amt) :\r\n\tglobal bal\r\n\tbal_in = bal\r\n\t#PREMISES FOR NEXT LINE: \r\n\t# (amt >= 0)\r\n\t# (bal >= 0)\r\n\t# (bal == bal_in)\r\n\tbal = bal + amt\r\n\t#PREMISES FOR ATTACHED PROOF, IF ANY: \r\n\t# (bal == (bal_old + amt))\r\n\t# (amt >= 0)\r\n\t# (bal_old >= 0)\r\n\t# (bal_old == bal_in)\r\n\t#PREMISES FOR NEXT LINE: \r\n\t# (amt >= 0)\r", "def charge(self,price):\n\n if price + self._balance> self._limit:\n return False\n else:\n self._balance+=price\n return True", "def free(amounts: Dict[str, int]) -> None:\n for name, amount in amounts.items():\n assert 0 <= amount <= Resources.total[name] - Resources.available[name]\n Resources.available[name] += amount", "def withdraw(self,withdrawal_money):\r\n if self.balance < withdrawal_money:\r\n print(\"Funds are insufficient\")\r\n \r\n else:\r\n self.balance -= withdrawal_money\r\n print(\"Withdrawal Accepted\")", "def before_save(self):\n\t\t\n\t\tself.total_debit = 0\n\t\tself.total_credit = 0\n\t\t\n\t\tfor accounting_entry in self.get('accounting_entries'):\n\t\t\tself.total_credit += accounting_entry.credit\n\t\t\tself.total_debit += accounting_entry.debit\n\n\t\tif self.total_credit != self.total_debit:\n\t\t\tfrappe.throw(_('Total credit should be equal to total debit'))", "def withdraw(self, amount):\n if self.overdrawn:\n print('You have overdrawn, please add more money!')\n return self.balance\n self.balance = self.balance - amount\n return self.balance", "def take(self, desired_amount):\n if self.amount >= desired_amount:\n grab = desired_amount\n else:\n grab = min(desired_amount, self.amount)\n self.amount -= grab\n print(f\"{self} {self.amount} of supplies left\")\n return grab", "def test_insufficient_funds(self):\n data = {\n 'from_account': self.from_account.id,\n 'to_account': self.to_account.id,\n 'amount': '100.01',\n }\n response = self.client.post(\n self.payments_list_url, data=data, format='json'\n )\n self.assertEqual(\n response.status_code, status.HTTP_400_BAD_REQUEST, response.data\n )\n self.assertIn(\n ERROR_INSUFFICIENT_FUNDS.format(self.from_account.owner),\n response.data['non_field_errors']\n )", "def transaction(money_in, drink_cost):\n if money_in >= drink_cost:\n change = round((money_in - drink_cost), 2) # 2 decimal places\n print(f'your change is £{change}')\n global profit #Global scope needed\n profit = profit + drink_cost\n return True\n else:\n print(\"That's not enough money!\")\n return False", "def test_check_cost():", "def clean_amount(self):\n if self.payer_channel == 2: # ignore balance check if not using wallet\n return self.cleaned_data['amount']\n else:\n pay_amount = self.cleaned_data.get('amount')*100\n payer_wallet = Wallet.objects.filter(wallet_id=self.cleaned_data.get('payer_method')).first()\n if payer_wallet is None:\n raise forms.ValidationError(\n self.error_messages['payer wallet unavailable'],\n code='payer wallet unavailable'\n )\n else:\n payer_balance = payer_wallet.balance\n if pay_amount > payer_balance:\n raise forms.ValidationError(\n self.error_messages['no_enough_balance'],\n code='no_enough_balance'\n )\n else:\n return self.cleaned_data['amount']", "async def _bailout_heist(self, ctx, user: discord.Member=None):\r\n author = ctx.message.author\r\n theme = await self.thief.get_guild_theme(ctx.guild)\r\n\r\n t_bail = theme[\"Bail\"]\r\n t_sentence = theme[\"Sentence\"]\r\n\r\n if user is None:\r\n player = author\r\n else:\r\n player = user\r\n\r\n if await self.thief.get_member_status(player) != \"Apprehended\":\r\n return await ctx.send(\"{} is not in jail.\".format(player.display_name))\r\n\r\n cost = await self.thief.get_member_bailcost(player)\r\n if not await bank.get_balance(player) >= cost:\r\n await ctx.send(\"You do not have enough to afford the {} amount.\".format(t_bail))\r\n return\r\n\r\n if player.id == author.id:\r\n msg = (\"Do you want to make a {0} amount? It will cost {1} credits. If you are \"\r\n \"caught again, your next {2} and {0} amount will triple. \"\r\n \"Do you still wish to pay the {0} amount?\".format(t_bail, cost, t_sentence))\r\n else:\r\n msg = (\"You are about pay a {2} amount for {0} and it will cost you {1} credits. \"\r\n \"Are you sure you wish to pay {1} for {0}?\".format(player.name, cost, t_bail))\r\n\r\n await ctx.send(msg)\r\n response = await self.bot.wait_for('MESSAGE', timeout=15, check=lambda x: x.author == author)\r\n\r\n if response is None:\r\n await ctx.send(\"You took too long. canceling transaction.\")\r\n return\r\n\r\n if \"yes\" in response.content.lower():\r\n msg = (\"Congratulations {}, you are free! Enjoy your freedom while it \"\r\n \"lasts...\".format(player.display_name))\r\n await bank.withdraw_credits(author, cost)\r\n await self.thief.set_member_free(author)\r\n await self.thief.set_member_oob(author, False)\r\n elif \"no\" in response.content.lower():\r\n msg = \"Canceling transaction.\"\r\n else:\r\n msg = \"Incorrect response, canceling transaction.\"\r\n\r\n await ctx.send(msg)", "def prepare_funding(self):\n entity_miner = self.entities[0]\n\n entity_miner.send_bitcoins(entity_miner.address)\n entity_miner.purchase_mastercoins(500.0)\n\n self.generate_block()\n self.check_balance(entity_miner.address, MSC, '50000.00000000', '0.00000000')\n self.check_balance(entity_miner.address, TMSC, '50000.00000000', '0.00000000')", "def test_excess_quantity(self):\n excess = self._uncertain_demand.excess_stock\n avg_order = sum([int(item) for item in self._data_set.values()]) //len(self._data_set)\n variance = [(item - avg_order) for item in self._data_set.values()]\n stdev = pow(sum([pow(j, 2) for j in variance]) / len(self._data_set), 0.5)\n cal_safety = lambda x, y, z: x * y * (z ** 0.5)\n safety_stock = cal_safety(float(self._z_value), float(stdev), float(self._lead_time))\n cal_reorder_level = lambda x, y, z: ((x ** 0.5) * y) + z\n reorder = cal_reorder_level(float(self._lead_time), avg_order, float(safety_stock))\n cal_excess = lambda x, y, z: round(x - (y + (y - z)), 0) if x > y + (y - z) else 0\n test_excess = cal_excess(self._quantity_on_hand, reorder, safety_stock)\n self.assertEqual(int(excess), int(test_excess))", "def check_stock(self):\n if self.quantity > self.item.quantity:\n return \"%s Please adjust your cart.\" % CartItem.get_insufficient_stock_msg(self.item.quantity)\n return None", "def test_add_sale_with_price_below_one(self):\n self.register_admin_test_account()\n token = self.login_admin_test()\n\n response = self.app_test_client.post('{}/saleorder'.format(\n self.base_url), json={'name': 'Torch', 'price': -10, 'quantity': 5, 'totalamt': \"\"},\n headers=dict(Authorization=token),\n content_type='application/json')\n\n self.assertEqual(response.status_code, 400)\n\n self.assertEqual(general_helper_functions.convert_json(\n response)['message'], 'Bad request. The product price should be a positive number above 0.')", "def get_money(self, money: float):\n\n assert isinstance(money, float), f\"{money} must be float.\"\n assert money > 0.0, f\"{money} must be a positive number.\"\n assert self.money >= money,(\n f\"There's no enough {money} in the account. \" \n f\"Current money: {self.money}\"\n )\n self.money -= money", "def __check_for_dividends(self) -> None:\n excess = self._excess_to_distribute.get()\n daofund = self._daofund_to_distirbute.get()\n\n Logger.debug(f'Found treasury excess of {excess}.', TAG)\n if excess > 0:\n try:\n Logger.debug(f'Trying to send to ({self._dividends_score.get()}): {excess}.', TAG)\n self.icx.transfer(self._dividends_score.get(), excess)\n self.FundTransfer(self._dividends_score.get(), excess, \"Excess made by games\")\n Logger.debug(f'Sent div score ({self._dividends_score.get()}) {excess}.', TAG)\n self._total_distributed.set(self._total_distributed.get() + excess)\n self._excess_to_distribute.set(0)\n except BaseException as e:\n Logger.debug(f'Send failed. Exception: {e}', TAG)\n revert('Network problem. Excess not sent. '\n f'Exception: {e}')\n\n if daofund > 0:\n try:\n self._daofund_to_distirbute.set(0)\n self.icx.transfer(self._daofund_score.get(), daofund)\n self.FundTransfer(self._daofund_score.get(), daofund, \"Excess transerred to daofund\")\n except BaseException as e:\n revert('Network problem. DAOfund not sent. '\n f'Exception: {e}')", "def test_collect_money_handles_excess_funds_over_max_value(self):\n # Params\n f_max_value = 100.00\n f_quarters = 2000\n f_dimes = 1\n f_nickels = 5\n\n # Returns\n return_1 = 'Machine can\\'t hold more than $100.00... Dispensing coins inserted.'\n\n # Calls\n string_1 = collect_money(f_max_value, f_quarters, f_dimes, f_nickels)\n\n # Asserts\n self.assertEqual(string_1, return_1)", "def add_to_excess(self) -> None:\n if self.msg.value <= 0:\n revert(\"No amount added to excess\")\n self._treasury_balance.set(self.icx.get_balance(self.address))\n self.FundReceived(self.msg.sender, self.msg.value, f\"{self.msg.value} added to excess\")", "def withdraw(self, user_id, money, **kwargs):\n user = User.objects(user_id=user_id).first()\n\n if money > 0:\n if user.balance >= money:\n print('Cantidad retirada: ', money)\n user.balance = float(user.balance) - float(money)\n user.save()\n else:\n print('No hay fondos suficientes para realizar el retiro.')\n else:\n print('No es posible retirar valores negativos.')", "def test_positive_price_details(self):\n with self.client:\n response = self.add_meal(\"beef\", -15000)\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'),\n \"Price must be a positive number\")\n self.assertEqual(response.status_code, 400)", "def withdraw(self, amount):\n if amount < 0:\n return \"Amount must be >= 0\"\n elif self._balance < amount:\n return \"Insufficient funds\"\n else:\n self._balance -= amount\n return None", "def charge(self, price):\n if not isinstance(price, (int, float)):\n raise TypeError()\n \n if self._balance + price <= self._limit:\n self._balance += price\n return True\n else: return False", "def test_underpayment(self):\n debit_jobs([(self.job, A(500), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(480), A(0), A(0))], D(480))\n diff = A(500) - A(480)\n self.assert_balances(\n bank=A(480, 0, 0),\n invoiced=A(500),\n paid=A(-480),\n partial=A(480).net_amount,\n tax=A(480).tax_amount,\n balance=diff,\n promised=diff,\n ) # <- negative balances because of overpayment", "async def balance(self, ctx):\n try:\n cash = await ctx.bot.pool.fetchrow(f'select cash from wallet where id={ctx.author.id}')\n\n if cash is None:\n await ctx.bot.pool.execute(f'insert into wallet values ({ctx.author.id}, 0);')\n return await ctx.send('You do not have a wallet yet.')\n\n if cash[0] is None:\n return await ctx.send('You do not have a wallet yet.')\n\n await ctx.send(f'You have {cash[0]} robux.')\n except Exception as e:\n await ctx.send(e)", "def test_deal_sufficient_cards(self):\n cards = self.deck._deal(10)\n self.assertEqual(len(cards), 10)\n self.assertEqual(self.deck.count(), 42)", "def test_account_net_worth_3(account_checking, asset_usd):\n deposit(account_checking, asset_usd, 1000)\n\n net_worth = account_checking.net_worth(base_asset=asset_usd)\n assert net_worth == 1000", "def _validateSale(self, player: Player, company: PublicCompany, amount: int, kwargs: MutableGameState):\n my_purchases = kwargs.purchases[kwargs.stock_round_count].get(player, [])\n\n my_stock = player.hasStock(company)\n potential_owners = company.potentialPresidents()\n\n validations = [\n err(company not in my_purchases,\n \"You can't sell something you already bought: {} {}\",\n company.id, company.short_name),\n\n err(\n my_stock >= amount,\n \"You must have as much stock than you are trying to sell {}\",\n amount\n ),\n\n err(\n company.availableStock(StockPurchaseSource.BANK) + amount <= 60,\n \"You can't sell that much ({}); the bank can only have 50 shares max.\",\n amount\n ),\n\n err(\n len(company.potentialPresidents() - {player}) > 0 or my_stock - amount >= 20,\n \"There are no other potential presidents, so you can't sell your shares. {} / {} (original stock: {})\",\n \",\".join([p.id for p in company.potentialPresidents()]),\n company.name,\n str(company.owners.get(player))\n\n ),\n\n err(amount % STOCK_CERTIFICATE == 0,\n \"You can only sell in units of 10 stocks ({})\".format(amount),\n ),\n\n err(kwargs.stock_round_count > 1,\n \"You can only sell after the first stock round.\")\n ]\n\n return self.validate(validations)", "def balance_money_check():\r\n print(balance_money)", "def resolve_to_fail(self):\n\t\tself.player.money -= self.get_cost()\n\t\tself.player.save()\n\n\t\tself.resolve_failure()\n\t\treturn False", "def calculate_reserves(self):\n # TODO: Add back cash dividends and deduct exchange costs\n console.print(\"Still has to be build.\")", "def _check_capacity_limit(self, res, amt, balance, meta, raven_vars, dispatch, t, level):\n # note \"amt\" has units of AMOUNT not RATE (resource, not resource per second)\n sign = np.sign(amt)\n # are we storing or providing?\n #print('DEBUGG supposed current level:', level)\n if sign < 0:\n # we are being asked to consume some\n cap, meta = self.get_capacity(meta, raven_vars, dispatch, t)\n available_amount = cap[res] - level\n #print('Supposed Capacity, Only calculated ins sign<0 (being asked to consumer)',cap)\n else:\n # we are being asked to produce some\n available_amount = level\n # the amount we can consume is the minimum of the requested or what's available\n delta = sign * min(available_amount, abs(amt))\n return {res: delta}, meta", "def test_account_net_worth_4(account_checking, asset_usd):\n deposit(account_checking, asset_usd, 1000, parse_datetime(\"2018-08-30 23:00:00\"))\n\n net_worth = account_checking.net_worth(\n base_asset=asset_usd, evaluated_at=parse_date(\"2018-08-30\")\n )\n assert net_worth == 1000", "def spend_cash(self, num):\r\n self.cash -= num\r\n return self.cash > num", "def deposit_money_check(amt):\r\n global balance_money\r\n print(\"Deposit money is : \", amt)\r\n balance_money = balance_money + amt", "def test_get_damage_out_of_limit(self):\n self.sold.health = 0.2\n self.sold.get_damage(0.32)\n self.assertEqual(self.sold.health, 0)", "def test_bidding_round_handle_transactions(self):\n self.order_1.save()\n self.order_2.save()\n self.order_3.save()\n self.order_4.save()\n self.order_5.save()\n self.order_6.save()\n self.order_7.save()\n self.order_8.save()\n self.order_9.save()\n self.order_10.save()\n self.order_11.save()\n self.order_12.save()\n self.order_13.save()\n\n # =================================================================\n # test: sell order has more stocks then sell-person\n # =================================================================\n\n self.person_2.number_of_stocks = 0\n self.person_2.save()\n\n try:\n self.bidding_round_manager.handle_transactions(bidding_rounds=[self.bidding_round])\n raise AssertionError('ExceedMaxSellSharesException expected')\n except ExceedMaxSellSharesException:\n pass", "def check_for_offer(self, bid, commodity, limit, actual, quantity, price):\n if bid:\n if len(self.trades[\"buys\"][commodity]) == 0:\n return 0\n else: # tally up how much trying to buy.\n total = 0.0\n total_price = 0.0\n for offer in self.trades[\"buys\"][commodity]:\n total += offer.quantity\n total_price += offer.price\n\n avg_price = total_price / len(self.trades[\"buys\"][commodity])\n\n # if total < limit:\n # #PLACE MORE BIDS.\n return total\n\n else:\n if len(self.trades[\"asks\"][commodity]) == 0:\n return 0\n else: # tally up how much trying to buy.\n total = 0.0\n total_price = 0.0\n for offer in self.trades[\"asks\"][commodity]:\n total += offer.quantity\n total_price += offer.price\n\n avg_price = total_price / len(self.trades[\"asks\"][commodity])\n #\n # if total < limit:\n # #PLACE MORE asks.\n # return total\n # if total < limit:\n # #PLACE MORE asks.\n return total # - limit", "def _award_accounts(self):\n\n prize_money = 0\n for i in xrange(len(self.accounts)):\n # Each savings account has a 1% chance of quadrupling their principal. The\n # chance is independent between accounts.\n if random.randint(1, 100) == 1:\n prize_money += 3 * self.accounts[i]\n self.accounts[i] *= 4\n return prize_money", "def bet(self, amount):\n if amount >self.budget:\n print 'you cannot bet because of little money'\n else:\n self.bet_amount = amount\n print 'you bet %s' % (amount)", "def give_raise(self, amount=5000):\n self.salary += amount", "def all_in():\r\n\r\n raise_bet(player.get_cash())", "def delete(self):\n existing_balance = self.account.calculated_balance\n\n if not self.active:\n pass\n elif (existing_balance - self.amount) < 0:\n raise AccountBalanceError(\n 'Balance of account {} would be brought below 0'.format(self.account)\n )\n\n super().delete()", "def is_resource_sufficient(self, drink):\n can_make = True\n for item in drink.ingredients:\n if drink.ingredients[item] > self.resources[item]:\n print(f\"Sorry there is not enough {item}.\")\n can_make = False\n return can_make", "def test_count_current_money_quantity_in_offers_with_greater_quantity(offer_instances):\n\n offer_purchase_instance = offer_instances[0]\n\n result = check_user_balance(\n user_id=offer_purchase_instance.user.id,\n quantity=get_available_quantity_stocks(offer_id=offer_purchase_instance.id),\n price=100000,\n )\n\n assert result == False", "async def deposit(ctx, money:int):\n author = ctx.message.author\n if str(author) in settings.BOT_ADMIN:\n database.add_pokedollars(author, money)\n await ctx.send(\"funds deposited\")\n else:\n await ctx.send(\"You are not the bot admin. Go awai.\")", "def Buy(self, X, Y):\n if self.money - (int(Y) * self.price[X][0] * (1 + self.taxe)) < 0:\n raise TradeError(\"Not Enough Money\")\n self.share[X] += int(Y)\n self.money -= int(Y) * self.price[X][0] * (1 + self.taxe)\n print(f\"BUY:{str(int(Y))}:{str(X)}\", flush = True)", "def _warn_and_lock_if_needed(self, transaction: Transaction) -> None:\n budget = self.budget_manager.get_budget(transaction.budget_category)\n exceeded_ratio = budget.exceeded_ratio\n if exceeded_ratio > 1:\n self._notify_exceeded_budget(budget)\n self._lock_budget(budget)\n self.print_transactions_for_review(budget)\n if self.budget_manager.no_locked_budgets >= 2:\n self._locked = True\n print('YOUR BANK ACCOUNT HAS BEEN LOCKED!')\n elif exceeded_ratio > 0.5:\n self._warn_nearing_exceed_budget(budget, 50)\n self.print_transactions_for_review(budget)", "def buying_price(self):\n buy_price = self.standard_init_price()\n # Special status and resources price adaptation\n if self.planet.status in [self.tradeitem.dps]:\n buy_price = (buy_price * 5) / 3\n\n elif self.planet.special in [self.tradeitem.cr]:\n buy_price = (buy_price * 3) / 4\n\n elif self.planet.special in [self.tradeitem.er]:\n buy_price = (buy_price * 4) / 3\n\n # randomize a bit\n moins = random.randrange(self.tradeitem.var)\n plus = random.randrange(self.tradeitem.var)\n buy_price = buy_price - moins + plus\n\n # price can't be negative\n if buy_price < 0:\n buy_price = 0\n\n return int(buy_price)", "def test_e2e_order_book_amount_less_than_max_bal(self):\n\n cli = \"--balance 1 offline --test -ob test_data/order_books.csv\"\n deal = self._run_bot_offine(cli)\n\n self.assertAlmostEqual(0.06000734789047485, float(deal.data_row[\"start-qty\"]), 4)\n self.assertEqual(0.002407822109525136, float(deal.data_row[\"result-fact-diff\"]))\n\n # prices from order book\n self.assertNotEqual(float(deal.data_row[\"leg1-price\"]), float(deal.data_row[\"leg1-ob-price\"]))" ]
[ "0.73113734", "0.7142943", "0.6958009", "0.683706", "0.6807364", "0.6756622", "0.6750081", "0.6689734", "0.6550323", "0.65317523", "0.6529866", "0.65116155", "0.6492988", "0.6469569", "0.64487016", "0.64354503", "0.6415727", "0.6402978", "0.6386353", "0.637824", "0.6367256", "0.6363435", "0.63479817", "0.63307536", "0.63026", "0.6293664", "0.6272616", "0.6224096", "0.62065643", "0.62051725", "0.62043357", "0.6178486", "0.61619186", "0.6159341", "0.6153526", "0.61104566", "0.6078387", "0.6072608", "0.6059527", "0.60576415", "0.6054578", "0.6042796", "0.6031867", "0.6023573", "0.60214466", "0.6007401", "0.6002396", "0.60022503", "0.5992066", "0.59898704", "0.59893054", "0.5987873", "0.5987179", "0.59830695", "0.5981013", "0.5978558", "0.596876", "0.5967171", "0.5966388", "0.5960583", "0.59592116", "0.59580183", "0.5955789", "0.59523076", "0.59520817", "0.59381783", "0.59378207", "0.59357506", "0.5934093", "0.5932188", "0.59313834", "0.59194136", "0.58996546", "0.58872783", "0.5872625", "0.58649117", "0.5864021", "0.5851334", "0.5849586", "0.58450735", "0.5842428", "0.5833433", "0.5833191", "0.58240813", "0.581843", "0.58166796", "0.5816189", "0.5809523", "0.5809292", "0.580706", "0.5804107", "0.57974946", "0.57924986", "0.5789285", "0.5779947", "0.5776863", "0.5776521", "0.5775604", "0.57755613", "0.57750404" ]
0.6613819
8
Make sure they have enough money.
def testPassedAlready(self): _pass_move = self._pass_move() bid_move = self._move() context = self._context() bfpc = BiddingForPrivateCompany() self.assertTrue(bfpc.run(_pass_move, context), bfpc.errors()) self.assertEqual(_pass_move.move_type, BidType.PASS) self.assertEqual(len(context.private_companies[1].passed_by), 1) self.assertFalse(bfpc.run(bid_move, context), bfpc.errors()) self.assertIn("You can only keep bidding until you've passed once.", bfpc.errors())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_amount_not_enough(self):\n item, change, _ = give_item_and_change('coke', .50)\n self.assertIsNone(item)\n self.assertEqual(change, 0.5)", "def check_costs(self):\r\n if self.cost > self.owner.player.char_ob.currency:\r\n self.add_error(\r\n \"celebration_tier\",\r\n \"You cannot afford to pay the cost of %s.\" % self.cost,\r\n )", "def test_has_enough_money_handles_insufficient_funds(self):\n # Params\n f_money_collected = 2.00\n f_chocolate_price = 2.25\n\n # Returns\n return_1 = 'Insufficient funds... Dispensing coins inserted.\\n'\n\n # Calls\n string_1 = has_enough_money(f_money_collected, f_chocolate_price)\n\n # Asserts\n self.assertEqual(string_1, return_1)", "def check_price(self):\n if self.price < 0:\n self.raise_user_error(\"negative_amount\")", "def check_sufficient_funds(self, amount):\n balance = self.get_balance_amount()\n if(balance < amount):\n return False\n return True", "def _check_cost(self, cr, uid, ids, context=None):\n for enrich in self.browse(cr, uid, ids, context=context):\n if enrich.amount <= 0:\n raise osv.except_osv(_('ValidateError'), _('The Cost Must Be Greater Than Zero!'))\n return True", "def check_user_has_enough_money(session, user_id, amount):\n user_funds = get_user_balance(session, user_id)\n if user_funds + amount < 0:\n raise NotEnoughMoneyException(\"Not enough money in your wallet!\")", "def test_buyTicket_insufficientFunds():\n old_venue_balance = testVenue.wallet\n assert not testUser4.buyTicket(testTicket3)\n assert testTicket3 not in testUser4.inventory\n assert testTicket3.for_sale\n assert testUser4.wallet == 0\n assert testVenue.wallet == old_venue_balance", "def testInsufficientCash(self):\n\n bid_move = self._move()\n context = self._context()\n context.players[0].cash = 200\n bfpc = BiddingForPrivateCompany()\n\n self.assertFalse(bfpc.run(bid_move, context), bfpc.errors())", "def payment_approval(self, house_cost: (int, float)):\n if self.money_available >= house_cost: # Person has enough available money to make a deal with Realtor\n self.money_available -= house_cost\n print(f'Payment from {self.name} was approved')\n return True\n print(f'{self.name} doesn\\'t have enough money to buy this house')\n return False", "def _check(self):\n assert isinstance(self._price, int)\n assert self._price >= 0\n assert isinstance(self._units, int)\n assert self._units > 0\n assert self._side == OrderSide.BUY or self._side == OrderSide.SELL\n assert self._type == OrderType.LIMIT or self._type == OrderType.CANCEL\n assert isinstance(self._market, int)\n assert self._market > 0", "def pay(self, amount):\n if amount > self.balance:\n print(f\"Not enough balance! Only ${self.balance} left.\")\n return False\n self.balance -= amount\n return True", "def check_money(drink, amount):\n if (drink == \"espresso\" and amount < MENU[drink][\"cost\"]) or (drink == \"latte\" and amount < MENU[drink][\"cost\"])\\\n or (drink == \"cappuccino\" and amount < MENU[drink][\"cost\"]):\n # if not enough money, start over\n print(f\"Sorry that's not enough money. Drink is ${MENU[drink]['cost']}. You gave ${amount}. Money refunded.\")\n return False\n else:\n return True", "def is_sufficient(money_received, price):\n if price <= money_received:\n change = round(money_received - price, 2)\n print(f\"Here is your {option}.Enjoy!\\nHere us £{change} in change\")\n global profit\n profit += price\n return True\n else:\n print(f\"Sorry not enough money\")\n return False", "def use(self):\n if self.credit < self.price_of_trip:\n print(\"Your credit is not enough, please increase your credit\")\n else:\n self.credit -= self.price_of_trip\n print(\"Done\")", "def test_buyTicket_EmptiesWallet():\n old_venue_balance = testVenue.wallet\n assert testUser1.buyTicket(testTicket1)\n assert testUser1.inventory[-1] == testTicket1\n assert not testTicket1.for_sale\n assert testUser1.wallet == 0\n assert testVenue.wallet == old_venue_balance + testTicket1.list_price", "def update_amounts(self, save=True):\n self.amount_donated = self.get_amount_total(\n [StatusDefinition.SUCCESS, StatusDefinition.PENDING,\n StatusDefinition.PLEDGED])\n self.amount_needed = self.amount_asked - self.amount_donated\n\n if self.amount_needed < 0:\n # Should never be less than zero\n self.amount_needed = 0\n\n if save:\n self.save()", "def testConsistency(self):\n #self.assertAlmostEqual(self.fxlinkedcashflow.amount(),0)", "def has_money(self) -> bool: \n \n return self.money > 0.0", "def test_product_buy_more_then_have(self):\n result_buy = self.info_list.product_buy(\"соль 1 кг\", 50)\n self.assertFalse(result_buy)", "def check_risk(self, action, amount=None):\n if amount is None:\n # amount not specified, so determines max amount to trade\n if action == 'buy':\n amount = int((self.upper_bound-self.owned_value)/self.price) # A unit is 1 dollar here? TODO:\n elif action == 'sell':\n amount = int((self.lower_bound-self.owned_value)/self.price)\n else:\n raise ValueError(f\"action should be buy or sell, got {action}\")\n if action == 'buy':\n if self.owned_value + amount <= self.upper_bound:\n # Allowed to buy up to upper bound\n return True, amount\n else:\n # Trying to buy too much\n print(\"Trade not allowed, attempting to increase total amount to more than upper bound.\")\n return False, amount\n elif action == 'sell':\n if self.owned_value + amount >= self.lower_bound:\n # Allowed to buy down to lower_bound\n return True, amount\n else:\n print(\"Trade not allowed, attempting to increase debt to more than lower bound.\")\n return False, amount", "def get_money(self) -> float: \n money = get_owned()\n try:\n assert type(self.owned_init) == float\n except AssertionError: #The first time one tries to make a bet this is evoked\n self.owned_init = money\n finally:\n return money", "def test_order_cost_money(self):\n\t\tself.g.resolve_current_turn()\n\t\tself.assertEqual(self.reload(self.p).money, self.initial_money - BuyInfluenceOrder.BASE_COST)", "def withdrawal(self, amount):\n if self.balance - amount < self.minimum_balance:\n print \"This would take you below your minimum balance.\"\n return\n else:\n self.balance -= amount\n print \"Please take your cash.\"\n print \"Your balance is now $%d.\" % self.balance\n self.transactions.append((\"Withdrawal\", amount))", "def validate(self):\n if self.amount > 0:\n return True\n return False", "def check_limit(self):\n self.ensure_one()\n partner = self.partner_id\n moveline_obj = self.env['account.move.line']\n movelines = moveline_obj.\\\n search([('partner_id', '=', partner.id),\n ('account_id.user_type_id.type', 'in',\n ['receivable', 'payable']),\n ('full_reconcile_id', '=', False)])\n\n debit, credit = 0.0, 0.0\n today_dt = datetime.strftime(datetime.now().date(), DF)\n for line in movelines:\n if line.date_maturity < today_dt:\n credit += line.debit\n debit += line.credit\n\n if (credit - debit + self.amount_total) > partner.credit_limit:\n # Consider partners who are under a company.\n if partner.over_credit or (partner.parent_id and partner.parent_id.over_credit):\n partner.write({\n 'credit_limit': credit - debit + self.amount_total})\n return True\n else:\n msg = '%s Can not confirm Sale Order,Total mature due Amount ' \\\n '%s as on %s !\\nCheck Partner Accounts or Credit ' \\\n 'Limits !' % (partner.over_credit,credit - debit, today_dt)\n raise UserError(_('Credit Over Limits !\\n' + msg))\n else:\n return True", "def safeWithdrawal(self):\n if self._after_dead_line():\n # each contributor can withdraw the amount they contributed if the goal was not reached\n if not self._funding_goal_reached.get():\n amount = self._balances[self.msg.sender]\n self._balances[self.msg.sender] = 0\n if amount > 0:\n if self.icx.send(self.msg.sender, amount):\n self.FundTransfer(self.msg.sender, amount, False)\n Logger.debug(f'FundTransfer({self.msg.sender}, {amount}, False)', TAG)\n else:\n self._balances[self.msg.sender] = amount\n\n # The sales target has been met. Owner can withdraw the contribution.\n if self._funding_goal_reached.get() and self._addr_beneficiary.get() == self.msg.sender:\n if self.icx.send(self._addr_beneficiary.get(), self._amount_raised.get()):\n self.FundTransfer(self._addr_beneficiary.get(), self._amount_raised.get(), False)\n Logger.debug(f'FundTransfer({self._addr_beneficiary.get()},'\n f'{self._amount_raised.get()}, False)', TAG)\n # reset amount_raised\n self._amount_raised.set(0)\n else:\n # if the transfer to beneficiary fails, unlock contributors balance\n Logger.debug(f'Failed to send to beneficiary!', TAG)\n self._funding_goal_reached.set(False)", "def check_cap(org, amount):\n from django.db.models import Sum, Q\n\n if amount < 0:\n query = Q(favor__lt=0)\n else:\n query = Q(favor__gt=0)\n total = abs(\n org.reputations.filter(query).aggregate(sum=Sum(\"favor\"))[\"sum\"] or 0\n ) + abs(amount)\n mod = org.social_modifier * 5\n if total > mod:\n noun = \"favor\" if amount > 0 else \"disfavor\"\n raise CommandError(\n \"That would bring your total %s to %s, and you can only spend %s.\"\n % (noun, total, mod)\n )", "def charge(self, price):\n '''try:\n type(price) == int or type(price) == float\n except ValueError: \n print 'Not a number!'\n \n if type(price) != int or type(price) != float:\n raise ValueError(\"Not a number!\")\n '''\n if price < 0:\n return False\n elif price + self._balance > self._limit:\n return False\n else:\n self._balance += price\n return True", "def test_buyTicket_FreeTicket():\n old_venue_balance = testVenue.wallet\n assert testUser4.buyTicket(testTicket4)\n assert testUser4.inventory[-1] == testTicket4\n assert not testTicket4.for_sale\n assert testUser4.wallet == 0\n assert testVenue.wallet == old_venue_balance", "def restock(self):\n self.money = 9999", "def check_funds(self, amount):\n if abs(amount)>self.get_balance(): return False\n else: return True", "def pay_costs(self):\r\n cost = self.cost\r\n if cost:\r\n self.owner.player.char_ob.pay_money(cost)\r\n self.owner.player.msg(\"You pay %s coins for the event.\" % cost)", "def get_money(self, fromobj):\n val, currency = money_from_args(self.args, fromobj)\n if val > currency:\n raise CommandError(\n \"Not enough money. You tried to {verb} {val}, but can only {verb} {currency}.\".format(\n verb=self.cmdstring, val=val, currency=currency\n )\n )\n fromobj.pay_money(val, self.caller)\n return val", "def withdrawMoney(self, withdraw_amount):\r\n if (self.balance_amt - withdraw_amount) > 0:\r\n self.balance_amt = self.balance_amt - withdraw_amount\r\n else:\r\n raise WithdrawError #Exception('Overdraft withdrawal Error. Cannot withdraw more than amount in account balance: {}'.format(self.balance_amt))\r", "def check_required_change(drink, amount):\n if (drink == \"espresso\" and amount > MENU[drink][\"cost\"]) or (drink == \"latte\" and amount > MENU[drink][\"cost\"])\\\n or (drink == \"cappuccino\" and amount > MENU[drink][\"cost\"]):\n return amount - MENU[drink][\"cost\"]\n else:\n return 0.00", "def test_balance_too_low(\n self, wait_tx_settled_mock, confirm_mock, do_transfer_mock\n ):\n password_option = self.get_password_args(self.PASSWORD)\n with pytest.raises(\n ClickException,\n match=r\"Balance is not enough! Available=[0-9]+, required=[0-9]+!\",\n ):\n self.invoke(\n \"transfer\",\n self.LEDGER_ID,\n self.get_address(self.LEDGER_ID, self.PASSWORD),\n \"100000\",\n \"100\",\n *password_option,\n )", "def use(self):\n if self.price_of_trip == 0:\n print(\"Sorry your card has been used\")\n else:\n self.price_of_trip -= self.price_of_trip\n print(\"Done\")", "def make_payment(self, cost):\n self.process_coins()\n if self.money_received >= cost:\n change = round(self.money_received - cost, 2)\n print(f\"Here is {self.CURRENCY}{change} in change.\")\n self.profit += cost\n self.money_received = 0\n return True\n else:\n print(\"Sorry that's not enough money. Money refunded.\")\n self.money_received = 0\n return False", "def test_credit(self):\n new_wallet = Wallet.objects.get(name=\"new_wallet\")\n new_wallet.credit(\"100.99\")\n new_wallet_money = Wallet.objects.filter(name=\"new_wallet\")[0].money\n decimal_money_raised = decimal.Decimal(\"100.99\")\n self.assertEqual(new_wallet_money, decimal_money_raised)", "def test_isolate_amount(self):\n self.assertIsNotNone(isolate_amount)", "def test_buyTicket_Valid_Paramaters():\n old_venue_balance = testVenue.wallet\n assert testUser3.buyTicket(testTicket3)\n assert testTicket3 in testUser3.inventory\n assert not testTicket3.for_sale\n assert testUser3.wallet == 950\n assert testVenue.wallet == old_venue_balance + testTicket3.list_price", "def test_create_warranty(self):\n pass", "def check_funds(self, amount):\n if amount > self.get_balance():\n return False\n else:\n return True", "def test_buyTicket_NotForSale():\n old_venue_balance = testVenue.wallet\n assert not testUser2.buyTicket(testTicket2)\n assert testTicket2 not in testUser2.inventory\n assert not testTicket1.for_sale\n assert testUser2.wallet == 500\n assert testVenue.wallet == old_venue_balance", "def transaction_successful(drink_type):\r\n total = 0\r\n cost = MENU[drink_type][\"cost\"]\r\n print(f\" A {drink_type} costs ${MENU[drink_type]['cost']}\")\r\n total += float(input(\" How many quarters? \")) * 0.25\r\n total += float(input(\" How many dimes? \")) * 0.10\r\n total += float(input(\" How many nickels? \")) * 0.05\r\n total += float(input(\" How many pennies? \")) * 0.01\r\n\r\n if total >= cost:\r\n print(f\"Here is ${total - cost} in change.\")\r\n return True\r\n else:\r\n print(\"Sorry that's not enough money. Money refunded.\")\r\n return False", "def check_transaction(menu, drink, resources):\r\n customer_money = process_coins()\r\n drink_cost = menu[drink]['cost']\r\n if customer_money < drink_cost:\r\n print(\"Sorry that's not enough money.Money refunded\")\r\n return False\r\n else:\r\n if customer_money > drink_cost:\r\n change = round((customer_money - drink_cost), 2)\r\n print(f\"Here is your ${change} in change\")\r\n resources['Money'] += drink_cost\r\n return True", "def check_price():\n global NUMBER_OF_TOTAL_COINS, BEVERAGE_PRICE\n\n if NUMBER_OF_TOTAL_COINS == BEVERAGE_PRICE:\n return True\n elif NUMBER_OF_TOTAL_COINS < BEVERAGE_PRICE:\n return False\n else:\n return \"FATAL\"", "def buy_item(self, item):\n if self.amount < item.price:\n custom_log(\"Insufficient amount. Insert more coins.\", MSG_ERROR)\n else:\n self.amount = round((self.amount - item.price), 2)\n item._buy()\n custom_log(f\"You bought - {item.name}, remaining cash - €{self.amount}\", MSG_DEBUG)", "def give_raise(self):\r\n self.hourly_pay = 12.00", "def deposit(amt) :\r\n\tglobal bal\r\n\tbal_in = bal\r\n\t#PREMISES FOR NEXT LINE: \r\n\t# (amt >= 0)\r\n\t# (bal >= 0)\r\n\t# (bal == bal_in)\r\n\tbal = bal + amt\r\n\t#PREMISES FOR ATTACHED PROOF, IF ANY: \r\n\t# (bal == (bal_old + amt))\r\n\t# (amt >= 0)\r\n\t# (bal_old >= 0)\r\n\t# (bal_old == bal_in)\r\n\t#PREMISES FOR NEXT LINE: \r\n\t# (amt >= 0)\r", "def charge(self,price):\n\n if price + self._balance> self._limit:\n return False\n else:\n self._balance+=price\n return True", "def free(amounts: Dict[str, int]) -> None:\n for name, amount in amounts.items():\n assert 0 <= amount <= Resources.total[name] - Resources.available[name]\n Resources.available[name] += amount", "def withdraw(self,withdrawal_money):\r\n if self.balance < withdrawal_money:\r\n print(\"Funds are insufficient\")\r\n \r\n else:\r\n self.balance -= withdrawal_money\r\n print(\"Withdrawal Accepted\")", "def before_save(self):\n\t\t\n\t\tself.total_debit = 0\n\t\tself.total_credit = 0\n\t\t\n\t\tfor accounting_entry in self.get('accounting_entries'):\n\t\t\tself.total_credit += accounting_entry.credit\n\t\t\tself.total_debit += accounting_entry.debit\n\n\t\tif self.total_credit != self.total_debit:\n\t\t\tfrappe.throw(_('Total credit should be equal to total debit'))", "def withdraw(self, amount):\n if self.overdrawn:\n print('You have overdrawn, please add more money!')\n return self.balance\n self.balance = self.balance - amount\n return self.balance", "def take(self, desired_amount):\n if self.amount >= desired_amount:\n grab = desired_amount\n else:\n grab = min(desired_amount, self.amount)\n self.amount -= grab\n print(f\"{self} {self.amount} of supplies left\")\n return grab", "def test_insufficient_funds(self):\n data = {\n 'from_account': self.from_account.id,\n 'to_account': self.to_account.id,\n 'amount': '100.01',\n }\n response = self.client.post(\n self.payments_list_url, data=data, format='json'\n )\n self.assertEqual(\n response.status_code, status.HTTP_400_BAD_REQUEST, response.data\n )\n self.assertIn(\n ERROR_INSUFFICIENT_FUNDS.format(self.from_account.owner),\n response.data['non_field_errors']\n )", "def transaction(money_in, drink_cost):\n if money_in >= drink_cost:\n change = round((money_in - drink_cost), 2) # 2 decimal places\n print(f'your change is £{change}')\n global profit #Global scope needed\n profit = profit + drink_cost\n return True\n else:\n print(\"That's not enough money!\")\n return False", "def test_check_cost():", "def clean_amount(self):\n if self.payer_channel == 2: # ignore balance check if not using wallet\n return self.cleaned_data['amount']\n else:\n pay_amount = self.cleaned_data.get('amount')*100\n payer_wallet = Wallet.objects.filter(wallet_id=self.cleaned_data.get('payer_method')).first()\n if payer_wallet is None:\n raise forms.ValidationError(\n self.error_messages['payer wallet unavailable'],\n code='payer wallet unavailable'\n )\n else:\n payer_balance = payer_wallet.balance\n if pay_amount > payer_balance:\n raise forms.ValidationError(\n self.error_messages['no_enough_balance'],\n code='no_enough_balance'\n )\n else:\n return self.cleaned_data['amount']", "async def _bailout_heist(self, ctx, user: discord.Member=None):\r\n author = ctx.message.author\r\n theme = await self.thief.get_guild_theme(ctx.guild)\r\n\r\n t_bail = theme[\"Bail\"]\r\n t_sentence = theme[\"Sentence\"]\r\n\r\n if user is None:\r\n player = author\r\n else:\r\n player = user\r\n\r\n if await self.thief.get_member_status(player) != \"Apprehended\":\r\n return await ctx.send(\"{} is not in jail.\".format(player.display_name))\r\n\r\n cost = await self.thief.get_member_bailcost(player)\r\n if not await bank.get_balance(player) >= cost:\r\n await ctx.send(\"You do not have enough to afford the {} amount.\".format(t_bail))\r\n return\r\n\r\n if player.id == author.id:\r\n msg = (\"Do you want to make a {0} amount? It will cost {1} credits. If you are \"\r\n \"caught again, your next {2} and {0} amount will triple. \"\r\n \"Do you still wish to pay the {0} amount?\".format(t_bail, cost, t_sentence))\r\n else:\r\n msg = (\"You are about pay a {2} amount for {0} and it will cost you {1} credits. \"\r\n \"Are you sure you wish to pay {1} for {0}?\".format(player.name, cost, t_bail))\r\n\r\n await ctx.send(msg)\r\n response = await self.bot.wait_for('MESSAGE', timeout=15, check=lambda x: x.author == author)\r\n\r\n if response is None:\r\n await ctx.send(\"You took too long. canceling transaction.\")\r\n return\r\n\r\n if \"yes\" in response.content.lower():\r\n msg = (\"Congratulations {}, you are free! Enjoy your freedom while it \"\r\n \"lasts...\".format(player.display_name))\r\n await bank.withdraw_credits(author, cost)\r\n await self.thief.set_member_free(author)\r\n await self.thief.set_member_oob(author, False)\r\n elif \"no\" in response.content.lower():\r\n msg = \"Canceling transaction.\"\r\n else:\r\n msg = \"Incorrect response, canceling transaction.\"\r\n\r\n await ctx.send(msg)", "def prepare_funding(self):\n entity_miner = self.entities[0]\n\n entity_miner.send_bitcoins(entity_miner.address)\n entity_miner.purchase_mastercoins(500.0)\n\n self.generate_block()\n self.check_balance(entity_miner.address, MSC, '50000.00000000', '0.00000000')\n self.check_balance(entity_miner.address, TMSC, '50000.00000000', '0.00000000')", "def test_excess_quantity(self):\n excess = self._uncertain_demand.excess_stock\n avg_order = sum([int(item) for item in self._data_set.values()]) //len(self._data_set)\n variance = [(item - avg_order) for item in self._data_set.values()]\n stdev = pow(sum([pow(j, 2) for j in variance]) / len(self._data_set), 0.5)\n cal_safety = lambda x, y, z: x * y * (z ** 0.5)\n safety_stock = cal_safety(float(self._z_value), float(stdev), float(self._lead_time))\n cal_reorder_level = lambda x, y, z: ((x ** 0.5) * y) + z\n reorder = cal_reorder_level(float(self._lead_time), avg_order, float(safety_stock))\n cal_excess = lambda x, y, z: round(x - (y + (y - z)), 0) if x > y + (y - z) else 0\n test_excess = cal_excess(self._quantity_on_hand, reorder, safety_stock)\n self.assertEqual(int(excess), int(test_excess))", "def check_stock(self):\n if self.quantity > self.item.quantity:\n return \"%s Please adjust your cart.\" % CartItem.get_insufficient_stock_msg(self.item.quantity)\n return None", "def test_add_sale_with_price_below_one(self):\n self.register_admin_test_account()\n token = self.login_admin_test()\n\n response = self.app_test_client.post('{}/saleorder'.format(\n self.base_url), json={'name': 'Torch', 'price': -10, 'quantity': 5, 'totalamt': \"\"},\n headers=dict(Authorization=token),\n content_type='application/json')\n\n self.assertEqual(response.status_code, 400)\n\n self.assertEqual(general_helper_functions.convert_json(\n response)['message'], 'Bad request. The product price should be a positive number above 0.')", "def get_money(self, money: float):\n\n assert isinstance(money, float), f\"{money} must be float.\"\n assert money > 0.0, f\"{money} must be a positive number.\"\n assert self.money >= money,(\n f\"There's no enough {money} in the account. \" \n f\"Current money: {self.money}\"\n )\n self.money -= money", "def __check_for_dividends(self) -> None:\n excess = self._excess_to_distribute.get()\n daofund = self._daofund_to_distirbute.get()\n\n Logger.debug(f'Found treasury excess of {excess}.', TAG)\n if excess > 0:\n try:\n Logger.debug(f'Trying to send to ({self._dividends_score.get()}): {excess}.', TAG)\n self.icx.transfer(self._dividends_score.get(), excess)\n self.FundTransfer(self._dividends_score.get(), excess, \"Excess made by games\")\n Logger.debug(f'Sent div score ({self._dividends_score.get()}) {excess}.', TAG)\n self._total_distributed.set(self._total_distributed.get() + excess)\n self._excess_to_distribute.set(0)\n except BaseException as e:\n Logger.debug(f'Send failed. Exception: {e}', TAG)\n revert('Network problem. Excess not sent. '\n f'Exception: {e}')\n\n if daofund > 0:\n try:\n self._daofund_to_distirbute.set(0)\n self.icx.transfer(self._daofund_score.get(), daofund)\n self.FundTransfer(self._daofund_score.get(), daofund, \"Excess transerred to daofund\")\n except BaseException as e:\n revert('Network problem. DAOfund not sent. '\n f'Exception: {e}')", "def test_collect_money_handles_excess_funds_over_max_value(self):\n # Params\n f_max_value = 100.00\n f_quarters = 2000\n f_dimes = 1\n f_nickels = 5\n\n # Returns\n return_1 = 'Machine can\\'t hold more than $100.00... Dispensing coins inserted.'\n\n # Calls\n string_1 = collect_money(f_max_value, f_quarters, f_dimes, f_nickels)\n\n # Asserts\n self.assertEqual(string_1, return_1)", "def add_to_excess(self) -> None:\n if self.msg.value <= 0:\n revert(\"No amount added to excess\")\n self._treasury_balance.set(self.icx.get_balance(self.address))\n self.FundReceived(self.msg.sender, self.msg.value, f\"{self.msg.value} added to excess\")", "def withdraw(self, user_id, money, **kwargs):\n user = User.objects(user_id=user_id).first()\n\n if money > 0:\n if user.balance >= money:\n print('Cantidad retirada: ', money)\n user.balance = float(user.balance) - float(money)\n user.save()\n else:\n print('No hay fondos suficientes para realizar el retiro.')\n else:\n print('No es posible retirar valores negativos.')", "def test_positive_price_details(self):\n with self.client:\n response = self.add_meal(\"beef\", -15000)\n data = json.loads(response.data.decode())\n self.assertEqual(data.get('message'),\n \"Price must be a positive number\")\n self.assertEqual(response.status_code, 400)", "def withdraw(self, amount):\n if amount < 0:\n return \"Amount must be >= 0\"\n elif self._balance < amount:\n return \"Insufficient funds\"\n else:\n self._balance -= amount\n return None", "def charge(self, price):\n if not isinstance(price, (int, float)):\n raise TypeError()\n \n if self._balance + price <= self._limit:\n self._balance += price\n return True\n else: return False", "def test_underpayment(self):\n debit_jobs([(self.job, A(500), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(480), A(0), A(0))], D(480))\n diff = A(500) - A(480)\n self.assert_balances(\n bank=A(480, 0, 0),\n invoiced=A(500),\n paid=A(-480),\n partial=A(480).net_amount,\n tax=A(480).tax_amount,\n balance=diff,\n promised=diff,\n ) # <- negative balances because of overpayment", "async def balance(self, ctx):\n try:\n cash = await ctx.bot.pool.fetchrow(f'select cash from wallet where id={ctx.author.id}')\n\n if cash is None:\n await ctx.bot.pool.execute(f'insert into wallet values ({ctx.author.id}, 0);')\n return await ctx.send('You do not have a wallet yet.')\n\n if cash[0] is None:\n return await ctx.send('You do not have a wallet yet.')\n\n await ctx.send(f'You have {cash[0]} robux.')\n except Exception as e:\n await ctx.send(e)", "def test_deal_sufficient_cards(self):\n cards = self.deck._deal(10)\n self.assertEqual(len(cards), 10)\n self.assertEqual(self.deck.count(), 42)", "def test_account_net_worth_3(account_checking, asset_usd):\n deposit(account_checking, asset_usd, 1000)\n\n net_worth = account_checking.net_worth(base_asset=asset_usd)\n assert net_worth == 1000", "def _validateSale(self, player: Player, company: PublicCompany, amount: int, kwargs: MutableGameState):\n my_purchases = kwargs.purchases[kwargs.stock_round_count].get(player, [])\n\n my_stock = player.hasStock(company)\n potential_owners = company.potentialPresidents()\n\n validations = [\n err(company not in my_purchases,\n \"You can't sell something you already bought: {} {}\",\n company.id, company.short_name),\n\n err(\n my_stock >= amount,\n \"You must have as much stock than you are trying to sell {}\",\n amount\n ),\n\n err(\n company.availableStock(StockPurchaseSource.BANK) + amount <= 60,\n \"You can't sell that much ({}); the bank can only have 50 shares max.\",\n amount\n ),\n\n err(\n len(company.potentialPresidents() - {player}) > 0 or my_stock - amount >= 20,\n \"There are no other potential presidents, so you can't sell your shares. {} / {} (original stock: {})\",\n \",\".join([p.id for p in company.potentialPresidents()]),\n company.name,\n str(company.owners.get(player))\n\n ),\n\n err(amount % STOCK_CERTIFICATE == 0,\n \"You can only sell in units of 10 stocks ({})\".format(amount),\n ),\n\n err(kwargs.stock_round_count > 1,\n \"You can only sell after the first stock round.\")\n ]\n\n return self.validate(validations)", "def balance_money_check():\r\n print(balance_money)", "def resolve_to_fail(self):\n\t\tself.player.money -= self.get_cost()\n\t\tself.player.save()\n\n\t\tself.resolve_failure()\n\t\treturn False", "def calculate_reserves(self):\n # TODO: Add back cash dividends and deduct exchange costs\n console.print(\"Still has to be build.\")", "def _check_capacity_limit(self, res, amt, balance, meta, raven_vars, dispatch, t, level):\n # note \"amt\" has units of AMOUNT not RATE (resource, not resource per second)\n sign = np.sign(amt)\n # are we storing or providing?\n #print('DEBUGG supposed current level:', level)\n if sign < 0:\n # we are being asked to consume some\n cap, meta = self.get_capacity(meta, raven_vars, dispatch, t)\n available_amount = cap[res] - level\n #print('Supposed Capacity, Only calculated ins sign<0 (being asked to consumer)',cap)\n else:\n # we are being asked to produce some\n available_amount = level\n # the amount we can consume is the minimum of the requested or what's available\n delta = sign * min(available_amount, abs(amt))\n return {res: delta}, meta", "def test_account_net_worth_4(account_checking, asset_usd):\n deposit(account_checking, asset_usd, 1000, parse_datetime(\"2018-08-30 23:00:00\"))\n\n net_worth = account_checking.net_worth(\n base_asset=asset_usd, evaluated_at=parse_date(\"2018-08-30\")\n )\n assert net_worth == 1000", "def spend_cash(self, num):\r\n self.cash -= num\r\n return self.cash > num", "def deposit_money_check(amt):\r\n global balance_money\r\n print(\"Deposit money is : \", amt)\r\n balance_money = balance_money + amt", "def test_get_damage_out_of_limit(self):\n self.sold.health = 0.2\n self.sold.get_damage(0.32)\n self.assertEqual(self.sold.health, 0)", "def test_bidding_round_handle_transactions(self):\n self.order_1.save()\n self.order_2.save()\n self.order_3.save()\n self.order_4.save()\n self.order_5.save()\n self.order_6.save()\n self.order_7.save()\n self.order_8.save()\n self.order_9.save()\n self.order_10.save()\n self.order_11.save()\n self.order_12.save()\n self.order_13.save()\n\n # =================================================================\n # test: sell order has more stocks then sell-person\n # =================================================================\n\n self.person_2.number_of_stocks = 0\n self.person_2.save()\n\n try:\n self.bidding_round_manager.handle_transactions(bidding_rounds=[self.bidding_round])\n raise AssertionError('ExceedMaxSellSharesException expected')\n except ExceedMaxSellSharesException:\n pass", "def _award_accounts(self):\n\n prize_money = 0\n for i in xrange(len(self.accounts)):\n # Each savings account has a 1% chance of quadrupling their principal. The\n # chance is independent between accounts.\n if random.randint(1, 100) == 1:\n prize_money += 3 * self.accounts[i]\n self.accounts[i] *= 4\n return prize_money", "def check_for_offer(self, bid, commodity, limit, actual, quantity, price):\n if bid:\n if len(self.trades[\"buys\"][commodity]) == 0:\n return 0\n else: # tally up how much trying to buy.\n total = 0.0\n total_price = 0.0\n for offer in self.trades[\"buys\"][commodity]:\n total += offer.quantity\n total_price += offer.price\n\n avg_price = total_price / len(self.trades[\"buys\"][commodity])\n\n # if total < limit:\n # #PLACE MORE BIDS.\n return total\n\n else:\n if len(self.trades[\"asks\"][commodity]) == 0:\n return 0\n else: # tally up how much trying to buy.\n total = 0.0\n total_price = 0.0\n for offer in self.trades[\"asks\"][commodity]:\n total += offer.quantity\n total_price += offer.price\n\n avg_price = total_price / len(self.trades[\"asks\"][commodity])\n #\n # if total < limit:\n # #PLACE MORE asks.\n # return total\n # if total < limit:\n # #PLACE MORE asks.\n return total # - limit", "def bet(self, amount):\n if amount >self.budget:\n print 'you cannot bet because of little money'\n else:\n self.bet_amount = amount\n print 'you bet %s' % (amount)", "def give_raise(self, amount=5000):\n self.salary += amount", "def all_in():\r\n\r\n raise_bet(player.get_cash())", "def delete(self):\n existing_balance = self.account.calculated_balance\n\n if not self.active:\n pass\n elif (existing_balance - self.amount) < 0:\n raise AccountBalanceError(\n 'Balance of account {} would be brought below 0'.format(self.account)\n )\n\n super().delete()", "def is_resource_sufficient(self, drink):\n can_make = True\n for item in drink.ingredients:\n if drink.ingredients[item] > self.resources[item]:\n print(f\"Sorry there is not enough {item}.\")\n can_make = False\n return can_make", "def test_count_current_money_quantity_in_offers_with_greater_quantity(offer_instances):\n\n offer_purchase_instance = offer_instances[0]\n\n result = check_user_balance(\n user_id=offer_purchase_instance.user.id,\n quantity=get_available_quantity_stocks(offer_id=offer_purchase_instance.id),\n price=100000,\n )\n\n assert result == False", "async def deposit(ctx, money:int):\n author = ctx.message.author\n if str(author) in settings.BOT_ADMIN:\n database.add_pokedollars(author, money)\n await ctx.send(\"funds deposited\")\n else:\n await ctx.send(\"You are not the bot admin. Go awai.\")", "def Buy(self, X, Y):\n if self.money - (int(Y) * self.price[X][0] * (1 + self.taxe)) < 0:\n raise TradeError(\"Not Enough Money\")\n self.share[X] += int(Y)\n self.money -= int(Y) * self.price[X][0] * (1 + self.taxe)\n print(f\"BUY:{str(int(Y))}:{str(X)}\", flush = True)", "def buying_price(self):\n buy_price = self.standard_init_price()\n # Special status and resources price adaptation\n if self.planet.status in [self.tradeitem.dps]:\n buy_price = (buy_price * 5) / 3\n\n elif self.planet.special in [self.tradeitem.cr]:\n buy_price = (buy_price * 3) / 4\n\n elif self.planet.special in [self.tradeitem.er]:\n buy_price = (buy_price * 4) / 3\n\n # randomize a bit\n moins = random.randrange(self.tradeitem.var)\n plus = random.randrange(self.tradeitem.var)\n buy_price = buy_price - moins + plus\n\n # price can't be negative\n if buy_price < 0:\n buy_price = 0\n\n return int(buy_price)", "def _warn_and_lock_if_needed(self, transaction: Transaction) -> None:\n budget = self.budget_manager.get_budget(transaction.budget_category)\n exceeded_ratio = budget.exceeded_ratio\n if exceeded_ratio > 1:\n self._notify_exceeded_budget(budget)\n self._lock_budget(budget)\n self.print_transactions_for_review(budget)\n if self.budget_manager.no_locked_budgets >= 2:\n self._locked = True\n print('YOUR BANK ACCOUNT HAS BEEN LOCKED!')\n elif exceeded_ratio > 0.5:\n self._warn_nearing_exceed_budget(budget, 50)\n self.print_transactions_for_review(budget)", "def test_e2e_order_book_amount_less_than_max_bal(self):\n\n cli = \"--balance 1 offline --test -ob test_data/order_books.csv\"\n deal = self._run_bot_offine(cli)\n\n self.assertAlmostEqual(0.06000734789047485, float(deal.data_row[\"start-qty\"]), 4)\n self.assertEqual(0.002407822109525136, float(deal.data_row[\"result-fact-diff\"]))\n\n # prices from order book\n self.assertNotEqual(float(deal.data_row[\"leg1-price\"]), float(deal.data_row[\"leg1-ob-price\"]))" ]
[ "0.7311504", "0.71432513", "0.6958015", "0.6837109", "0.68072706", "0.6756763", "0.6750037", "0.6689844", "0.66139734", "0.6550322", "0.6531817", "0.65297884", "0.65116364", "0.6493205", "0.64698845", "0.6448786", "0.6435723", "0.64158696", "0.6403044", "0.6386391", "0.637806", "0.6367301", "0.6363629", "0.63480663", "0.6330774", "0.63027394", "0.62940073", "0.62726486", "0.62241006", "0.6206649", "0.62053514", "0.6204239", "0.61787593", "0.6161867", "0.61595154", "0.61536986", "0.61105114", "0.60786915", "0.60726553", "0.6059603", "0.60577035", "0.6054566", "0.604287", "0.60317373", "0.6023612", "0.6021611", "0.600755", "0.6002556", "0.6002274", "0.5992249", "0.5990113", "0.5989351", "0.5988149", "0.5987271", "0.5983391", "0.59811586", "0.5978757", "0.5968703", "0.5967311", "0.59666026", "0.5960688", "0.5959439", "0.59581155", "0.5956115", "0.59524935", "0.59521574", "0.59383166", "0.5938057", "0.5935934", "0.5934481", "0.5932357", "0.59314567", "0.5919402", "0.5899565", "0.58874613", "0.58726263", "0.5864868", "0.5864082", "0.5851269", "0.58496684", "0.58453006", "0.5842775", "0.5833519", "0.5833262", "0.5824071", "0.5818609", "0.58168656", "0.58160996", "0.580957", "0.58094066", "0.5806917", "0.5804129", "0.5797667", "0.57925624", "0.5789356", "0.5780064", "0.5776925", "0.5776523", "0.5775649", "0.57755667", "0.57751644" ]
0.0
-1
If the last person passes, you should assign the new owner asap.
def testAutopurchaseOnLastPass(self): _pass_move = self._pass_move() context = self._context() bfpc = BiddingForPrivateCompany() self.assertTrue(bfpc.run(_pass_move, context), bfpc.errors()) self.assertEqual(_pass_move.move_type, BidType.PASS) self.assertEqual(len(context.private_companies[1].passed_by), 1) self.assertTrue(context.private_companies[1].hasOwner()) self.assertNotEqual(context.private_companies[1].belongs_to, _pass_move.player, context.private_companies[1].belongs_to.id ) self.assertEqual( context.private_companies[1].belongs_to, context.players[1], context.private_companies[1].belongs_to.id )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testOwnershipAfterCreate(self):\n self.simulateATGUIInteraction(task='create')\n self.failUnlessEqual(self.person.getOwnerTuple()[1], 'abc123')", "def add_to(self, newowner):\n self.prevai = newowner.ai\n newowner.ai = self", "def possessed_by(self, other):\r\n self.owner = other", "def renounceOwnership():\n\n assert msg.sender == self.owner, \"Access is denied.\"\n\n log.OwnershipRenounced(msg.sender)\n self.owner = ZERO_ADDRESS", "def test_add_account(self):\n person1 = self.owner\n person2 = Person(\n self.initial_year, \"Spouse\", self.initial_year - 20,\n retirement_date=self.retirement_date,\n gross_income=Money(50000),\n spouse=person1, tax_treatment=self.tax_treatment)\n # Add an account and confirm that the Person passed as owner is\n # updated.\n account1 = Account(owner=person1)\n account2 = Account(owner=person1)\n self.assertEqual(person1.accounts, {account1, account2})\n self.assertEqual(person2.accounts, set())", "def set_owner(self, data):\n self._owner = self._uni(data)\n self.add_payload('owner', data)", "def testOwnershipAfterEdit(self):\n self.simulateATGUIInteraction(task='edit')\n self.failUnlessEqual(self.person.getOwnerTuple()[1], 'abc123')", "def visit_take(self, take):\n new_owner_id = self.event_json['new_owner']['id']\n new_owner = self.world.entities[new_owner_id]\n take.new_owner = new_owner", "def test_transfer_new_inherited_owner(self):\n self.assertEqual(self.project.get_owner().user, self.user_owner)\n self.assertEqual(\n self.project.get_owners(inherited_only=True)[0].user,\n self.user_owner_cat,\n )\n url = reverse(\n 'projectroles:api_role_owner_transfer',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'new_owner': self.user_owner_cat.username,\n 'old_owner_role': PROJECT_ROLE_CONTRIBUTOR,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.assertEqual(self.project.get_owner().user, self.user_owner_cat)\n self.owner_as.refresh_from_db()\n self.assertEqual(self.project.get_role(self.user_owner), self.owner_as)\n self.assertEqual(self.owner_as.role, self.role_contributor)\n self.assertEqual(\n self.project.get_role(self.user_owner_cat),\n RoleAssignment.objects.get(\n project=self.project,\n user=self.user_owner_cat,\n role=self.role_owner,\n ),\n )", "def update_owner(current_owner_email: str, new_owner_email: str):\n current_owner_id = find_user_id(current_owner_email)\n new_owner_id = find_user_id(new_owner_email) \n \n \"\"\" This block is executed to check if email addresses provided are associated with two Looker users \"\"\"\n \n if type(new_owner_id) != int and type(new_owner_id) != int:\n print(\"The email addresses for both the current owner and the new owner are not associated with any Looker user id\")\n\n elif type(current_owner_id) != int: \n print(\"The email address for the current owner is not associated with any Looker user id\")\n\n elif type(new_owner_id) != int:\n print(\"The email address for the new owner is not associated with any Looker user id\")\n\n else: \n body = {}\n body['user_id'] = new_owner_id\n find = find_schedules(current_owner_id) \n for i in find.values(): \n sdk.update_scheduled_plan(i,body)\n print(\"Successfully transfer all schedules of \" + current_owner_email + \" to \" + new_owner_email)", "def _testAssistantOwnershipAfter(self, person=None, task='create'):\n if not person:\n person = self.person\n \n newperson = self.getPerson(id='def456', firstName=\"Test\", lastName=\"Assistant\")\n person.setAssistants([newperson.UID(),])\n self.simulateATGUIInteraction(person=person, task=task)\n owners = person.users_with_local_role('Owner')\n \n return 'def456' in owners", "def test_is_owner_inherited_and_local(self):\n self.make_assignment(self.project, self.user_alice, self.role_owner)\n self.assertTrue(self.project.is_owner(self.user_alice))", "def test_transfer_new_inherited_member(self):\n self.make_assignment(\n self.category, self.user_new, self.role_contributor\n )\n self.assertEqual(\n self.project.get_owners(inherited_only=True)[0].user,\n self.user_owner_cat,\n )\n url = reverse(\n 'projectroles:api_role_owner_transfer',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'new_owner': self.user_new.username,\n 'old_owner_role': PROJECT_ROLE_CONTRIBUTOR,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.assertEqual(self.project.get_owner().user, self.user_new)\n self.owner_as.refresh_from_db()\n self.assertEqual(self.project.get_role(self.user_owner), self.owner_as)\n self.assertEqual(self.owner_as.role, self.role_contributor)", "def transferOwnership(_newOwner: address):\n assert msg.sender == self.owner, \"Access is denied.\"\n assert _newOwner != ZERO_ADDRESS, \"Invalid owner supplied.\"\n\n log.OwnershipTransferred(msg.sender, _newOwner)\n self.owner = _newOwner", "def test_transfer_old_inherited_owner(self):\n self.owner_as_cat.user = self.user_owner\n self.owner_as_cat.save()\n self.assertEqual(self.project.get_role(self.user_owner), self.owner_as)\n url = reverse(\n 'projectroles:api_role_owner_transfer',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'new_owner': self.user_guest.username,\n 'old_owner_role': PROJECT_ROLE_OWNER,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.assertEqual(self.project.get_owner().user, self.user_guest)\n self.assertIsNone(\n RoleAssignment.objects.filter(\n project=self.project, user=self.user_owner\n ).first()\n )\n self.assertEqual(\n self.project.get_role(self.user_owner), self.owner_as_cat\n )\n self.assertEqual(self.owner_as.role, self.role_owner)", "def test_transfer_old_inherited_member(self):\n self.make_assignment(\n self.category, self.user_owner, self.role_contributor\n )\n self.assertEqual(self.project.get_role(self.user_owner), self.owner_as)\n url = reverse(\n 'projectroles:api_role_owner_transfer',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'new_owner': self.user_guest.username,\n 'old_owner_role': PROJECT_ROLE_CONTRIBUTOR,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.assertEqual(self.project.get_owner().user, self.user_guest)\n self.owner_as.refresh_from_db()\n self.assertEqual(self.project.get_role(self.user_owner), self.owner_as)\n self.assertEqual(self.owner_as.role, self.role_contributor)", "def transfer_ownership(self, user):\n new_owner = get_user_model().objects.filter(is_active=True) \\\n .get(pk=user.pk)\n self.owner = new_owner", "def unorphaned(self):\n return self.new_owner == self.user", "def pre_save(self, obj):\n obj.owner = self.request.user", "def testAssistantOwnershipAfterCreate(self):\n self.failUnless(self._testAssistantOwnershipAfter(task='create'), \"designated assistant is not listed as an owner\")", "def save_model(self, request, obj, form, change):\n try:\n owner = form.instance.owner\n except models.Application.owner.RelatedObjectDoesNotExist:\n form.instance.owner = request.user\n\n super().save_model(request, obj, form, change)", "def test__put_owner_into():\n user = User.precreate(202211270016)\n team = Team.precreate(202211270017)\n \n for input_value, defaults, expected_output in (\n (ZEROUSER, False, {}),\n (ZEROUSER, True, {'owner': None, 'team': None}),\n (user, True, {'owner': user.to_data(defaults = True, include_internals = True), 'team': None}),\n (team, True, {'owner': team.to_data_user(), 'team': team.to_data(defaults = True, include_internals = True)}),\n ):\n output = put_owner_into(input_value, {}, defaults)\n vampytest.assert_eq(output, expected_output)", "def _add_owner(parent_id, child_id):\n db.session.add(\n pam.BivAccess(\n source_biv_id=parent_id,\n target_biv_id=child_id\n )\n )", "def set_owner_allowed(self, data):\n self._owner_allowed = self._uni(data)", "def test_update_owner(cards_db):\n i = cards_db.add_card(Card(\"foo\", owner=\"me\"))\n cards_db.update_card(i, Card(owner=\"not me\", state=None))\n\n mod = cards_db.get_card(i)\n assert mod == Card(\"foo\", owner=\"not me\")", "def testAssistantOwnershipAfterEdit(self):\n self.failUnless(self._testAssistantOwnershipAfter(task='edit'), \"designated assistant is not listed as an owner\")", "def _assign(request, obj, person_id):\n try:\n if request.method == \"POST\":\n person_id = request.POST.get('person_1', None)\n\n if person_id is None:\n obj.assigned_to = None\n else:\n person = Person.objects.get(pk=person_id)\n obj.assigned_to = person\n\n obj.save()\n\n except Person.DoesNotExist:\n raise Http404(\"No person found matching the query.\")", "def _init_owners(self, identity, record, **kwargs):\n # if the given identity is that of a user, we add the\n # corresponding user to the owners (record.access.owned_by)\n is_sys_id = system_process in identity.provides\n if not record.access.owned_by and not is_sys_id:\n record.access.owned_by.add({\"user\": identity.id})", "def mark_complete(self, winner):\n # print(\"Run mark_complete\")\n # Game.objects.get(pk=self.pk).update(winner=winner, completed=True)\n self.winner = winner\n self.completed = True\n self.save(update_fields=['winner', 'completed'])\n if (winner == self.creator):\n self.creator.profile.add_win_match()\n self.opponent.profile.add_lose_match()\n else:\n self.opponent.profile.add_win_match()\n self.creator.profile.add_lose_match()", "def test_patch_project_owner(self):\n new_owner = self.make_user('new_owner')\n url = reverse(\n 'projectroles:api_project_update',\n kwargs={'project': self.project.sodar_uuid},\n )\n patch_data = {'owner': str(new_owner.sodar_uuid)}\n response = self.request_knox(url, method='PATCH', data=patch_data)\n self.assertEqual(response.status_code, 400, msg=response.content)", "def reallocate_person(self, person, new_room):\n pass", "def test_validate_owner(self):\n with self.assertRaises(ValidationError):\n self.make_assignment(self.category, self.user_bob, self.role_owner)", "def owner(self, owner):\n self._owner = owner", "def owner(self, owner):\n self._owner = owner", "def owner(self, owner):\n self._owner = owner", "def owner(self, owner):\n self._owner = owner", "def form_valid(self, form):\n form.instance.founder = self.request.user\n print('Project Create user:', self.request.user)\n form.save()\n\n tc_lib.generate_user_matches(form)\n\n return super(ProjectCreate, self).form_valid(form)", "def test_model_can_create_a_person(self):\n old_count = People.objects.count()\n self.actor.save()\n new_count = People.objects.count()\n self.assertNotEqual(old_count, new_count)", "def test_channel_addowner_owner_flockr():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def change_ownership(obj, userid):\n assert isinstance(userid, string_types)\n old_owner = obj.creators[0]\n if userid == old_owner:\n return\n #Remove Owner group from old owner\n obj.local_roles.remove(old_owner, ROLE_OWNER)\n #Add new owner\n obj.local_roles.add(userid, ROLE_OWNER)\n #Set new owner in creators attr - this will also trigger reindex catalog event so keep it last!\n obj.set_field_appstruct({'creators': (userid,)})\n return userid", "def add_owner(self, user):\n user_in = user.get_groups()\n member = False\n for group in user_in:\n if self.usergroup_node == group.usergroup_node:\n member = True\n ownership = Relationship(user.get(), 'owns', self.usergroup_node)\n graph.create(ownership)\n if not member:\n membership = Relationship(user.get(), 'in', self.usergroup_node)\n graph.create(membership)\n return self.usergroup_node", "def test_channel_addowner_already_an_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n with pytest.raises(InputError):\n assert channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def approve_person(message, target):\n users = hf.get_users()\n if target == 'me':\n return\n for user in users:\n if user[\"name\"] == target:\n approver = message._get_user_id()\n admins = hf.get_admins()\n for admin in admins:\n if admin[\"id\"] == approver:\n if user is not None:\n if user[\"approval_level\"] == \"unapproved\":\n message.reply(\"Approved user: <@{}>\".format(target))\n user[\"approval_level\"] = \"approved\"\n hf.save_users(users)\n return\n elif user[\"approval_level\"] == \"denied\":\n message.reply(Strings['MARKED_DENIED'])\n return\n else:\n message.reply(\":x: {} is already: {}.\".format(target,\n user[\"approval_level\"]))\n return\n else:\n message.reply(Strings['USER_NOT_FOUND'].format(target))\n return\n\n message.reply(Strings['CANT_APPROVE'])", "def test_channel_addowner_owner_flockr_not_member():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_first_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])", "def test_put_owner(self):\n url = reverse(\n 'projectroles:api_role_update',\n kwargs={'roleassignment': self.update_as.sodar_uuid},\n )\n put_data = {\n 'role': PROJECT_ROLE_OWNER,\n 'user': str(self.assign_user.sodar_uuid),\n }\n response = self.request_knox(url, method='PUT', data=put_data)\n self.assertEqual(response.status_code, 400, msg=response.content)", "def test_control_acl_new_people_update(self):\n person = factories.PersonFactory()\n add_person_global_role(person, 'Creator')\n with factories.single_commit():\n control = factories.ControlFactory()\n control.add_person_with_role_name(person, \"Admin\")\n access_control_list = {\n \"Admin\": [\n {\n \"email\": person.email,\n \"name\": person.name,\n }\n ],\n \"Principal Assignees\": [\n {\n \"email\": person.email,\n \"name\": person.name,\n },\n {\n \"email\": \"[email protected]\",\n \"name\": \"user2\",\n },\n {\n \"email\": \"[email protected]\",\n \"name\": \"user3\",\n },\n ]\n }\n\n response = self.api.put(control, control.id, {\n \"access_control_list\": access_control_list,\n })\n\n self.assert200(response)\n for expected_person in access_control_list[\"Admin\"]:\n user = all_models.Person.query.filter_by(\n email=expected_person[\"email\"]\n ).one()\n self.assertEqual(user.name, expected_person[\"name\"])\n self.assertEqual([ur.role.name for ur in user.user_roles], [\"Creator\"])\n control = all_models.Control.query.get(control.id)\n self.assert_obj_acl(control, access_control_list)", "def test_create_owner(self):\n url = reverse(\n 'projectroles:api_role_create',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'role': PROJECT_ROLE_OWNER,\n 'user': str(self.assign_user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400, msg=response.content)", "def form_valid(self, form):\n self.handle_balance_update(form)\n\n form.instance.owner = self.request.user\n return super().form_valid(form)", "def save_model(self, request, obj, form, change):\n if not change:\n obj.creator = request.user\n obj.save()", "def manage_changeOwnershipType(\n self,\n explicit=1,\n RESPONSE=None,\n REQUEST=None\n ):\n old = getattr(self, '_owner', None)\n if explicit:\n if old is not None:\n return\n owner = self.getOwnerTuple()\n if owner is not None and owner is not UnownableOwner:\n self._owner = owner\n else:\n if old is None:\n return\n new = aq_get(aq_parent(self), '_owner', None, 1)\n _m = object()\n if old is new and (self.__dict__.get('_owner', _m) is not _m):\n del self._owner\n\n if RESPONSE is not None:\n RESPONSE.redirect(REQUEST['HTTP_REFERER'])", "def is_still_owner(self):\n raise tooz.NotImplemented", "def attempt_to_acquire_leader(self, permanent=False):", "def enter_room(self, user):\n if self.user1 is None and self.user2 != user:\n self.user1 = user\n elif self.user1 != user and self.user2 is None:\n self.user2 = user\n else:\n return None\n self.save()\n return True", "def manage_owners():\n\n owner_data = request.get_json(force=True)\n return _get_owner_service().create_owner(owner_data)", "def set_owner(self, owner):\n self.__owner = owner", "def assign_store_owner(user_name, new_store_owner_name, store_name):\n\n user_name = auth.get_username_from_hash(user_name)\n permission_handler.is_permmited_to(user_name, Action.ADD_OWNER.value, store_name)\n permission_handler.assign_store_employee(action.OWNER_INITIAL_PERMISSSIONS,\n new_store_owner_name,\n store_name)\n user_handler.assign_store_employee(user_name, new_store_owner_name, store_name)\n publisher.subscribe(new_store_owner_name, store_name)", "def update_role(self):\n all_leader = []\n user_records = self.info\n per = Persons()\n for record in user_records:\n if record['leader'] not in all_leader:\n all_leader.append(record['leader'])\n # print len(all_leader)\n # print all_leader\n for leader in all_leader:\n # print leader\n fil = per.get_one({'dn':leader})\n # print fil\n if fil is None:\n print 'this leader %s is not in our db,please check' % leader\n else:\n per.update_one({'dn':leader},{'role':'leader'})", "def is_owner(self, is_owner):\n\n self._is_owner = is_owner", "def test_init_spouse(self):\n # Add a spouse and confirm that both Person objects are updated\n person1 = self.owner\n person2 = Person(\n self.initial_year, \"Spouse\", self.initial_year - 20,\n retirement_date=self.retirement_date,\n gross_income=Money(50000),\n spouse=person1, tax_treatment=self.tax_treatment)\n self.assertEqual(person1.spouse, person2)\n self.assertEqual(person2.spouse, person1)", "def create_person(self):", "def test_known_related_objects_identity_preservation(self):\n self.assertIs(self.aldous, self.brave_new_world.author)", "def test_store_saves_owner(self):\n self.stack = stack.Stack(self.ctx, 'owner_stack', self.tmpl)\n stack_ownee = stack.Stack(self.ctx, 'ownee_stack', self.tmpl,\n owner_id=self.stack.id)\n stack_ownee.store()\n db_stack = stack_object.Stack.get_by_id(self.ctx, stack_ownee.id)\n self.assertEqual(self.stack.id, db_stack.owner_id)", "def owner(self, owner):\n\n self._owner = owner", "def owner(self, owner):\n\n self._owner = owner", "def owner(self, owner):\n\n self._owner = owner", "def owner(self, owner):\n\n self._owner = owner", "def test_handle_force_assign(self):\r\n self.mock_facade.retrieve.return_value = Project(\"GTID\", [])\r\n team = Team(\"GTID\", \"team-name\", \"display-name\")\r\n team.team_leads.add(user)\r\n self.mock_facade.query.return_value = [team]\r\n self.assertTupleEqual(\r\n self.testcommand.handle(\"project assign ID team-name -f\",\r\n user),\r\n (\"Project successfully assigned!\", 200))", "def test_ownership(Ebb):\n assert Ebb.getOwner() == accounts[0]\n with pytest.reverts():\n Ebb.transferOwnership(ZERO_ADDRESS, {\"from\": accounts[0]})\n\n Ebb.transferOwnership(accounts[1], {\"from\": accounts[0]})\n assert Ebb.getOwner() == accounts[1]", "def test_transfer_old_inherited_owner_demote(self):\n self.owner_as_cat.user = self.user_owner\n self.owner_as_cat.save()\n self.assertEqual(self.project.get_role(self.user_owner), self.owner_as)\n url = reverse(\n 'projectroles:api_role_owner_transfer',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'new_owner': self.user_guest.username,\n 'old_owner_role': PROJECT_ROLE_DELEGATE,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400, msg=response.content)\n self.assertEqual(self.project.get_owner().user, self.user_owner)\n self.assertEqual(\n self.project.get_role(self.user_guest).role, self.role_guest\n )", "def test_channel_addowner_not_owner():\n clear()\n auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_forth_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n with pytest.raises(AccessError):\n assert channel_addowner(register_third_result['token'], randChannel_id['channel_id'], register_forth_result['u_id'])", "def add_person(self, name, email, typ, wants_accomodation='N'):\n if typ == \"FELLOW\":\n if not email in self.all_persons.keys():\n new_fellow = Fellow(name, email, wants_accomodation)\n self.fellows[email] = new_fellow\n self.allocate_room(new_fellow)\n return new_fellow\n else:\n return \"Email already used!\"\n elif typ == \"STAFF\":\n if not email in self.all_persons.keys():\n new_staff = Staff(name, email)\n self.staff[email] = new_staff\n self.allocate_room(new_staff)\n return new_staff\n else:\n return \"Email already used!\"\n else:\n return -1", "def _assert_on_device_owner_change(self, port_data, orig_dev_own):\n if orig_dev_own == constants.DEVICE_OWNER_LOADBALANCERV2:\n if (\"allowed_address_pairs\" in port_data and\n port_data[\"allowed_address_pairs\"]):\n msg = _('Loadbalancer port can not be updated '\n 'with address pairs')\n raise n_exc.InvalidInput(error_message=msg)\n\n if 'device_owner' not in port_data:\n return\n new_dev_own = port_data['device_owner']\n if new_dev_own == orig_dev_own:\n return\n\n err_msg = (_(\"Changing port device owner '%(orig)s' to '%(new)s' is \"\n \"not allowed\") % {'orig': orig_dev_own,\n 'new': new_dev_own})\n\n # Do not allow changing nova <-> neutron device owners\n if ((orig_dev_own.startswith(constants.DEVICE_OWNER_COMPUTE_PREFIX) and\n new_dev_own.startswith(constants.DEVICE_OWNER_NETWORK_PREFIX)) or\n (orig_dev_own.startswith(constants.DEVICE_OWNER_NETWORK_PREFIX) and\n new_dev_own.startswith(constants.DEVICE_OWNER_COMPUTE_PREFIX))):\n raise n_exc.InvalidInput(error_message=err_msg)\n\n # Do not allow removing the device owner in some cases\n if orig_dev_own == constants.DEVICE_OWNER_DHCP:\n raise n_exc.InvalidInput(error_message=err_msg)", "def save(self, **kwargs):\n owner = str(self.vhost.domain.owner())\n if not self.name.startswith(owner + '_'):\n self.name = owner + '_' + self.name\n try:\n super(Account, self).save(**kwargs)\n except IntegrityError:\n i = 1\n base_name = self.name\n while True:\n self.name = base_name + '-' + str(i)\n try:\n super(Account, self).save(**kwargs)\n return\n except IntegrityError:\n i += 1", "def set_owner(self, owner):\n self.settings[\"owner\"] = owner", "def add_owner_id(data=None, **kw):\n data['owner_id'] = current_user.id", "def which_owner(self):\n LOGGER.debug(self.details)\n for override_function, override_map in self.team_owner_overrides.items():\n for override_key, override_team in override_map.items():\n if override_key in self.details[override_function]: # pylint: disable=unsupported-membership-test\n self.details[\"owner\"] = override_team\n break\n\n # for chassis and blades in d42, use their role to determine ownership\n if re.match('^c[0-9]*b[0-9]*', self.details['function']) is not None:\n self.load_from_device42()\n\n if self.details[\"owner\"] == \"team-unclassified\":\n for owner, teamregex in self.team_ownership_regexes.items():\n if re.search(teamregex, self.details[\"function\"]):\n self.details[\"owner\"] = owner\n break", "def test_assigning_when_different_country(self):\n asset = BOAssetFactory(**{'owner': self.owner})\n old_hostname = asset.hostname\n self.assertNotIn(self.owner_country_name, asset.hostname)\n asset._try_assign_hostname(True)\n self.assertNotEqual(asset.hostname, old_hostname)\n self.assertIn(self.owner_country_name, asset.hostname)", "def test_transfer_inherit_equal(self):\n self.make_assignment(self.project, self.user_new, self.role_contributor)\n # Set category role for project owner\n self.make_assignment(\n self.category, self.user_owner, self.role_contributor\n )\n self.assertEqual(self.project.get_owner().user, self.user_owner)\n url = reverse(\n 'projectroles:api_role_owner_transfer',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'new_owner': self.user_new.username,\n 'old_owner_role': PROJECT_ROLE_CONTRIBUTOR,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.assertEqual(self.project.get_owner().user, self.user_new)\n self.assertEqual(\n self.project.get_role(self.user_owner).role, self.role_contributor\n )", "def test_transfer(self):\n # Assign role to new user\n self.make_assignment(self.project, self.user_new, self.role_contributor)\n self.assertEqual(self.project.get_owner().user, self.user_owner)\n url = reverse(\n 'projectroles:api_role_owner_transfer',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'new_owner': self.user_new.username,\n 'old_owner_role': PROJECT_ROLE_CONTRIBUTOR,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 200, msg=response.content)\n self.assertEqual(self.project.get_owner().user, self.user_new)", "def test_assign_managing_team(self):\n pass", "def clean_owner(self):\n username = self.cleaned_data['owner']\n owner = User.objects.filter(username=username).first()\n if owner is None:\n raise forms.ValidationError(\n _('User %(username)s does not exist'),\n params={'username': username},\n )\n if self.organization.owners.filter(username=username).exists():\n raise forms.ValidationError(\n _('User %(username)s is already an owner'),\n params={'username': username},\n )\n return owner", "def assign(self, assignee, created_by, unit):\n assignment = ReferralAssignment.objects.create(\n assignee=assignee,\n created_by=created_by,\n referral=self,\n unit=unit,\n )\n ReferralActivity.objects.create(\n actor=created_by,\n verb=ReferralActivityVerb.ASSIGNED,\n referral=self,\n item_content_object=assignee,\n )\n # Notify the assignee by sending them an email\n Mailer.send_referral_assigned(\n referral=self,\n assignment=assignment,\n assigned_by=created_by,\n )\n\n if self.state in [ReferralState.IN_VALIDATION, ReferralState.PROCESSING]:\n return self.state\n\n return ReferralState.ASSIGNED", "def test_auto_assign_one(self):\n shift1 = RegularWorkshift.objects.create(\n workshift_type=self.wtype1,\n pool=self.p1,\n hours=5,\n )\n unfinished = utils.auto_assign_shifts(self.semester)\n self.assertEqual([], unfinished)\n self.assertIn(self.profile, shift1.current_assignees.all())\n\n instances = WorkshiftInstance.objects.filter(weekly_workshift=shift1)\n self.assertGreater(instances.count(), 0)\n self.assertTrue(all(\n instance.workshifter == self.profile\n for instance in instances\n ))\n\n pool_hours = self.profile.pool_hours.get(pool=self.p1)\n self.assertEqual(\n pool_hours.assigned_hours,\n pool_hours.hours,\n )", "def originalownerpe(self) :\n\t\ttry :\n\t\t\treturn self._originalownerpe\n\t\texcept Exception as e:\n\t\t\traise e", "def add_person(self, per: str):\n if per not in self._people:\n self._people.append(per)\n else:\n raise IDAlreadyExists", "def save(self, *args, **kwargs):\n\n self._set_first_initial()\n self._set_user()\n super(AbstractHuman, self).save(*args, **kwargs)", "def owner(self):\n if self.get_team():\n return self.get_team()\n return None", "def take_control_over(self, other):\n a = self\n if a == other: return\n if util.onechancein(6): #make a master of b\n if other.master is not None:\n if other.master != a and a.master != other: #if b already had master, make a enemy of b.master\n a.history.append('In year %d %s tried to overtake the control over %s, but failed' % (world.year, a.name, other.name))\n other.master.conflict_with(a)\n else:\n if a.master == other: #if we overtook controll\n a.master = None\n try:\n other.minions.remove(a)\n except ValueError: pass\n try:\n other.master.minions.remove(other)\n except Exception : pass\n a.minions.append(other)\n other.master = a\n a.history.append('In year %d %s became boss over %s' %(world.year, a.name, other.name))", "def owner(self, owner: str):\n\n self._owner = owner", "def test_asset_assignee_is_created_when_a_user_is_saved(self):\n user = User.objects.create(\n email=\"[email protected]\", cohort=10, password=\"devpassword\"\n )\n self.assertEqual(len(AssetAssignee.objects.filter(user=user)), 1)", "def replace_person(self,p):\n p.age = 15\n p.days = 0\n \n self.age_group[4].remove(p.identifier)\n self.age_group[0].add(p.identifier)\n \n if np.random.random() < self.sexual_activity_high:\n p.sexual_activity = 1\n self.high_sexual_activity.add(p.identifier)\n else:\n p.sexual_activity = 0\n \n p.cure(self)\n \n #remove all partnerships where p is involved in\n for i,ps in enumerate(self.partnerships):\n if p.identifier in [ps.persons[0].identifier,ps.persons[1].identifier]:\n ps.remove(self)\n self.partnerships[i] = None\n #if deleting steady partnership\n if ps.type == 1:\n self.number_of_steady -= 1\n self.partnerships = list(filter(None,self.partnerships))", "def pay_amount_to_owner(self, contract: 'cn.ContractCancellation'):\n owner1_new_balance_delta = contract.amount_in_msat\n if self.is_owner1(contract.payee):\n self._owner2_htlc_locked_setter(int(self._owner2_htlc_locked - contract.amount_in_msat))\n else:\n self._owner1_htlc_locked_setter(int(self._owner1_htlc_locked - contract.amount_in_msat))\n owner1_new_balance_delta = -contract.amount_in_msat\n self._update_message_state(self._state.message_state.owner1_balance + owner1_new_balance_delta)\n contract.invalidate()\n self._state.htlc_contracts.remove(contract)", "def _update_leader(self):", "def create_entity_owner(self, owner_data):\n\t\treturn self._send_command_to_entity_server(us.SERVER_COMMAND_CREATE_ENTITY_OWNER, owner_data)", "def owner_id(self, owner_id):\n self._owner_id = owner_id", "def test_post_owner(self):\n self.client.force_authenticate(self.user)\n response = self.post(content='foo')\n self.assertEqual(response.data['owner'], self.user.pk)", "def _test_pre_fill_and_assign_humor(self):\n for i in range(1, 50):\n User.objects.create_user(username=\"u{0}\".format(i))\n pre_fill.main([\"--managers\", \"--workshift\"])\n utils.make_workshift_pool_hours(semester=self.semester)\n # Assign manager shifts beforehand\n manager_shifts = RegularWorkshift.objects.filter(\n pool=self.p1, workshift_type__auto_assign=False,\n )\n profiles = WorkshiftProfile.objects.all()\n for profile, shift in zip(profiles, manager_shifts):\n shift.current_assignees.add(profile)\n shift.save()\n unfinished = utils.auto_assign_shifts(\n self.semester, pool=WorkshiftPool.objects.get(title=\"Humor Shift\")\n )\n self.assertEqual([], unfinished)", "def random_assign(self, person, room_set):\n random_room = self.random_select(room_set)\n while room_set[random_room]['room'].allocate_room_space() == -1:\n random_room = self.random_select(room_set) # pragma: no cover\n if self.all_rooms[random_room]['room'].room_type == \"LivingSpace\":\n person.set_livingspace(\n self.living_spaces[random_room]['room'].name)\n occupant = person.name + \"\\t\" + person.email\n room_set[random_room]['occupants'].append(occupant)\n elif self.all_rooms[random_room]['room'].room_type == \"OfficeSpace\":\n occupant = person.name + \"\\t\" + person.email\n person.set_office(self.offices[random_room]['room'].name)\n room_set[random_room]['occupants'].append(occupant)", "def add_candidate(self, user):\n weight = (\n self.assignment_related_users.aggregate(models.Max(\"weight\"))[\"weight__max\"]\n or 0\n )\n defaults = {\"weight\": weight + 1}\n self.assignment_related_users.update_or_create(user=user, defaults=defaults)", "def test_not_creator_cannot_update(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)", "def test_channel_removeowner_last_owner():\n clear()\n register_first_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n register_second_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n randChannel_id = channels_create(register_second_result['token'], 'Random Channel', True)\n channel_join(register_first_result['token'], randChannel_id['channel_id'])\n #register_third_result = auth_register('[email protected]', 'password1234', 'Jane', 'Citizen')\n #channel_addowner(register_second_result['token'], randChannel_id['channel_id'], register_third_result['u_id'])\n # removing third user\n channel_removeowner(register_first_result['token'], randChannel_id['channel_id'], register_second_result['u_id'])" ]
[ "0.677503", "0.64477986", "0.6446304", "0.63864005", "0.6358", "0.62971616", "0.62954915", "0.6290303", "0.6241448", "0.6205976", "0.6128244", "0.6090834", "0.6000378", "0.5993534", "0.5971924", "0.594644", "0.5895443", "0.5857133", "0.5824147", "0.58180535", "0.58094245", "0.58030957", "0.5764741", "0.57531774", "0.5744056", "0.57248", "0.57219946", "0.57205", "0.57002306", "0.56992656", "0.5694449", "0.56930006", "0.5651175", "0.5651175", "0.5651175", "0.5651175", "0.5636407", "0.56270343", "0.5625853", "0.561571", "0.5610568", "0.5608957", "0.5604054", "0.55978876", "0.5593893", "0.558319", "0.55648386", "0.55563986", "0.55550927", "0.5555065", "0.5544797", "0.5536973", "0.5523115", "0.55174327", "0.549145", "0.54835874", "0.5479443", "0.54740846", "0.5471468", "0.54705185", "0.5463943", "0.5459681", "0.5443626", "0.5443626", "0.5443626", "0.5443626", "0.54404503", "0.5438907", "0.5438423", "0.54302174", "0.542979", "0.54295504", "0.53957915", "0.53948975", "0.5394643", "0.53939235", "0.53850114", "0.5383074", "0.53783447", "0.53762084", "0.5366848", "0.53622055", "0.53579926", "0.535482", "0.53543717", "0.53520346", "0.5339947", "0.5335777", "0.5333776", "0.5333338", "0.5328692", "0.5326376", "0.5321916", "0.5321011", "0.53206766", "0.53194785", "0.5317788", "0.53127754", "0.531005", "0.5304369", "0.52852124" ]
0.0
-1
Get the identifier of person
def identifier(self): return self._identifier
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_identifier(self):", "def getIdent (self) :\n return self.id", "def get_identifier(self) -> str:\n return self.identifier", "def get_person_id(person_data):\n person_ref = person_data['Casualty_Reference']\n veh_ref = person_data['Vehicle_Reference']\n acc_id = get_acc_id_from_data(person_data)\n person_id = common.get_gb_person_id(acc_id, int(veh_ref), int(person_ref))\n return person_id", "def get_identifier_string(self):\n return self.identifier", "def getID():", "def identifier(self):\r\n return self.id", "def person_id_for_name(name):\n person_ids = list(names.get(name.lower(), set()))\n if len(person_ids) == 0:\n return None\n elif len(person_ids) > 1:\n print(f\"Which '{name}'?\")\n for person_id in person_ids:\n person = people[person_id]\n name = person[\"name\"]\n birth = person[\"birth\"]\n print(f\"ID: {person_id}, Name: {name}, Birth: {birth}\")\n try:\n person_id = input(\"Intended Person ID: \")\n if person_id in person_ids:\n return person_id\n except ValueError:\n pass\n return None\n else:\n return person_ids[0]", "def person_id_for_name(name):\n person_ids = list(names.get(name.lower(), set()))\n if len(person_ids) == 0:\n return None\n elif len(person_ids) > 1:\n print(f\"Which '{name}'?\")\n for person_id in person_ids:\n person = people[person_id]\n name = person[\"name\"]\n birth = person[\"birth\"]\n print(f\"ID: {person_id}, Name: {name}, Birth: {birth}\")\n try:\n person_id = input(\"Intended Person ID: \")\n if person_id in person_ids:\n return person_id\n except ValueError:\n pass\n return None\n else:\n return person_ids[0]", "def getId(self):\n return self.identifier", "def identifier(self):\n return self.contact.identifier", "def identifier(self):\n return self.__id", "def identifier(self):\n return self._id", "def get_ident():\n return -1", "def getId(self):\n return self.getUserName()", "def getId(self):\n return self.getUserName()", "def getIdentification(self):\r\n self._update('getIdentification')\r\n return self.supervisord.options.identifier", "def get_id(self):\n return self.name", "def identifier(self) -> str:\n return self.doc['id']", "def identifier(self) -> Optional[str]:\n return pulumi.get(self, \"identifier\")", "def get_id(self):\n pass", "def get_id(self):\n pass", "def get_id(self):\n pass", "def get_id(self):\n pass", "def id(self):\n\t\tif self._record is not None:\n\t\t return self._record.id\n\t\telse:\n\t\t return \"NO_ID_ASSIGNED\"", "def get_id(self):\n return self.username", "def get_id(self):\n return self.username", "def get_id(self):\n return self.username", "def Ientifier(self, default=None):\n return self.data.get('identifier', default)", "def get_id(self):\n return self.email", "def get_id(self):\n return self.email", "def get_id(self, refobj):\n return cmds.getAttr(\"%s.identifier\" % refobj)", "def identifier(self) -> str:\n return self._identifier", "def get_id(self):\r\n return self.username", "def get_identity(self):\n return self.query_serial('*IDN?')", "def ident(self):\r\n return self.component.get(\"ID\", \"\")", "def identifier(self):\n\n return self._identifier", "def get_id(self): \n\t\treturn (self.user_id)", "def get_identifier(self, object):\n try:\n identifier = object[\"uri\"]\n except KeyError:\n identifier = object[\"ref\"]\n return identifier", "def getID(self) -> int:\n ...", "def get_id(self): # real signature unknown; restored from __doc__\n return \"\"", "def _get_identifier(self):\n\n if '_identifier' not in self.__dict__:\n\n object_or_string, args, kwargs = self._init_args\n\n # Get the identifier for the wrapped object, e.g. 'auth.user.1234'\n # If there is a lookup in the kwargs, then the following call\n # will figure out the object_pk. It caches these lookups.\n kwargs['_fail_silently'] = self._fail_silently\n self.__dict__['_identifier'] = get_identifier(object_or_string, *args, **kwargs)\n\n return self.__dict__['_identifier']", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def getIdentity():\n return Sentience.__IDENTITY.lower()", "def identity(self):\n return self.id", "def identifier(self):\n\n return self.name", "def get_person(self, requestId):\n return self.get_json('/verification/%s/person' % str(requestId))", "def identifier(self):", "def identity(self) -> str:\n return self.requester.uuid", "def id(self):\n # Might also be a first 12-characters shortcut.\n return self._id", "def get_identifier(self, identifier_type):\n if identifier_type == 'ID':\n retval = self._identity\n elif identifier_type == 'Title':\n retval = self._title\n else:\n raise ValueError('identifier_type is neither \\'ID\\' nor \\'Title\\'')\n return retval", "def identity_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"identity_id\")", "def person_name(self):\n return self._person_name", "def identifier(self):\n raise NotImplementedError", "def _get_id(self):\n return self.id", "def _extract_identifier(self, publication):\n return self._parse_identifier(publication.metadata.identifier)", "def identifier(self):\n return \"{}: {}\".format(self.id, self.name)" ]
[ "0.75057507", "0.7319578", "0.7309798", "0.7292267", "0.71864873", "0.7161316", "0.70634836", "0.6956172", "0.6956172", "0.6955756", "0.6924296", "0.6895861", "0.68554884", "0.68335015", "0.6826491", "0.6826491", "0.68212664", "0.68004686", "0.67956805", "0.67936885", "0.6743529", "0.6743529", "0.6743529", "0.6743529", "0.66905", "0.66871214", "0.66871214", "0.66871214", "0.6684929", "0.66819704", "0.66819704", "0.6644955", "0.6638075", "0.66338414", "0.6626651", "0.65884423", "0.65781385", "0.6566367", "0.65485674", "0.6536405", "0.6531147", "0.6527054", "0.65232366", "0.65232366", "0.65232366", "0.65232366", "0.65232366", "0.65232366", "0.65232366", "0.65232366", "0.65232366", "0.65232366", "0.65232366", "0.65232366", "0.65232366", "0.65232366", "0.65232366", "0.65232366", "0.65232366", "0.65232366", "0.65232366", "0.65232366", "0.65232366", "0.65232366", "0.65232366", "0.65232366", "0.65232366", "0.65232366", "0.65232366", "0.65232366", "0.65232366", "0.65232366", "0.65232366", "0.65232366", "0.65232366", "0.65232366", "0.65232366", "0.65232366", "0.6518977", "0.65141046", "0.651019", "0.6506063", "0.65010864", "0.6500793", "0.649555", "0.6486213", "0.64833635", "0.6482954", "0.6476852", "0.64537925", "0.6453119", "0.64514196" ]
0.68210834
21
Get the name of person
def name(self): return self._name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def person_name(self):\n return self._person_name", "def persona_name(self) -> str:\n return self._persona_name", "def get_name() -> str:", "def get_person_name(self, person_id):\n res = requests.get(url=\"https://api.ciscospark.com/v1/people/{}\".format(person_id),\n headers=self.headers)\n\n try:\n class person(object):\n firstName = res.json()['firstName']\n lastName = res.json()['lastName']\n\n return person\n except AttributeError as e:\n print(res.text)\n return None", "def get_name():\n\n return character['Name']", "def get_name() -> str:\n pass", "def get_name(self) :\n\n return self.factory.to_user_name(self.name)", "def _get_name(self):\n return self.name", "def get_name():", "def get_name(self) -> str:\r\n return self.name", "def get_name(self):\r\n return ('%s %s' % ( self.first_name, self.last_name ))", "def nombre(self):\n return self.persona.nombre", "def get_name(self):\n\t\treturn self.name", "def get_name(self):\n\t\treturn self.name", "def get_name(self):\n\t\treturn self.name", "def get_name():\n return \"Boss\"", "def get_name(self) -> str:\n return self.name", "def get_name(self) -> str:\n return self.name", "def get_name(self) -> str:\n return self.name", "def get_name(self):\n return \"%s %s\" % (\n self.first_name,\n self.last_name\n )", "def get_name(self): \r\n return self.name", "def get_name(self):\r\n return self.name", "def get_name(self):\n\n return self.name", "def get_name(self):\n return self.name # return the name", "def get_name(self):\n return self.name # return the name", "def get_name(self) -> str:\n pass", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self):\n return self.name", "def get_name(self) -> str:\n\n return self.name_", "def get_name(self):\n return self.attributes[\"name\"]", "def get_name_of_person_by_id(self, person_id):\r\n try:\r\n person_id = int(person_id)\r\n except ValueError:\r\n raise PersonIDException(\"Error! The person ID has to be a positive integer!\")\r\n if person_id <= 0:\r\n raise PersonIDException(\"Error! The person ID has to be a positive integer!\")\r\n\r\n found_person, _ = self.find_person_by_id(person_id)\r\n if found_person is None:\r\n raise PersonIDException(f\"Error! There is no person with ID '{person_id}' registered.\")\r\n return found_person.name", "def get_name(self):\n return self.name", "def name(self):\n return \"%s %s\" % (self.first_name, self.last_name)", "def GetName(self):\n if self.compound.preferred_name:\n return self.compound.preferred_name\n if self._name:\n return self._name\n return str(self.compound.FirstName())", "def get_given_name(self):\n return self.given_name", "def get_name(self):\n return self._name", "def get_name(username):\n print(\"We halo \" + username + \" , piye kabare?\")", "def get_name(self):\n\n return ri.RhinoInput(self.last).get_name()", "def get_name(self):\n return self.name", "def get_name(self):\n pass", "def get_name(self):\n pass", "def get_name(self):\n try:\n return self.profile_data[\"name\"]\n except Exception as e:\n error_msg = (\"Failed to retrieve player name: {}\"\n \"\".format(str(e)))\n raise PlayerDataException(error_msg)", "def get_name(self):\r\n return self._name", "def get_name(self):\r\n return self._name", "def get_name(self):\n if self.name != None: return self.name\n else: return self.get_name_from_items(self.items.values())", "def get_name(self, ):\n return self.get_parameter('name')", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def given_name(self):\n profile = self._json['author-profile']\n return profile.get('preferred-name', {}).get('given-name')", "def get_name(self) -> str:\n return self._name", "def get_name(self) -> str:\n return self._name", "def get_full_name(self):\n\n return self.name", "def get_full_name(self):\n return self.name #self is base and it hits name filed", "def get_display_name(member):\n if member.nick is None:\n name = member.name\n else:\n name = member.nick\n if User.objects.get(id=member.id).is_ironman:\n name += ' (IM)'\n return name", "def get_name(self):\n\n return self.name", "def get_name(self):\n\n return self.name", "def get_name(self):\n return self.normalize_name(self.name)", "def get_name(self):\n user = self.user\n name = \"%s %s\" % (user.first_name, user.last_name)\n name = name.strip()\n\n return self.display_name or name or user.email or user.username", "def get_full_name(self):\n return self.name+self.last_name", "def get_name(self):\n return self._g.get_name()", "def get_name(self):\n return self._name", "def get_name(self):\n return self._name", "def get_name(self):\n return self._name", "def get_name(self):\n return self._name", "def get_name(self):\n return self._name", "def get_name(self):\n return self._name", "def get_name(self):\n return self._name", "def get_name(self):\n return self._name", "def get_name(self):\n return self._name", "def get_name(self):\n return self._name", "def getUserName(self):\n user = User.by_id(self.user_id)\n return user.name", "def get_name(self):\n\n return \"Sawyer McLane\"", "def get_name(self):\n return self._player_name", "def get_name(self):\n \n # Return the player's name\n return self._name", "def getName(self):\r\n return self.name", "def getName(self):\n return self._get_name( )", "def get_name(self):\n\t\troot = self.get_xml()\n\t\treturn root.find(\"name\").text", "def get_full_name(self):\n return self.name", "def get_full_name(self):\n return self.name", "def get_full_name(self):\n return self.name", "def get_full_name(self):\n return self.name" ]
[ "0.8474906", "0.8018867", "0.78476787", "0.77369803", "0.76065636", "0.75772613", "0.7530738", "0.750647", "0.74815065", "0.7397122", "0.736096", "0.7346389", "0.73411", "0.73411", "0.73411", "0.73353255", "0.7317605", "0.7317605", "0.7317605", "0.7317226", "0.73119605", "0.73007786", "0.72847605", "0.72582203", "0.72582203", "0.7252969", "0.7239924", "0.7239924", "0.7239924", "0.7239924", "0.7239924", "0.7239924", "0.7239924", "0.7239924", "0.7239924", "0.7239924", "0.7239924", "0.7239924", "0.7239924", "0.7239924", "0.7239924", "0.7239924", "0.7239924", "0.7239924", "0.72355515", "0.7200822", "0.7184562", "0.7182538", "0.7171499", "0.7170292", "0.7155951", "0.714764", "0.71427673", "0.71302074", "0.7126151", "0.7120488", "0.7120488", "0.711684", "0.71141535", "0.71141535", "0.7109963", "0.7108516", "0.7092654", "0.7092654", "0.7092654", "0.7092654", "0.7092654", "0.7092654", "0.7090733", "0.7082652", "0.7082652", "0.70806646", "0.7077761", "0.7069189", "0.7069111", "0.7069111", "0.7068182", "0.70403373", "0.70325744", "0.701813", "0.70152307", "0.70152307", "0.70152307", "0.70152307", "0.70152307", "0.70152307", "0.70152307", "0.70152307", "0.70152307", "0.70152307", "0.69950664", "0.69839007", "0.6982753", "0.6977834", "0.6951793", "0.69458944", "0.69454306", "0.69453615", "0.69453615", "0.69453615", "0.69453615" ]
0.0
-1
Get the surname of person
def surname(self): return self._surname
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def surname(self):\n return self.__surname", "def surname(self):\n if \"surname\" in self._prop_dict:\n return self._prop_dict[\"surname\"]\n else:\n return None", "def surname(self):\n if \"surname\" in self._prop_dict:\n return self._prop_dict[\"surname\"]\n else:\n return None", "def return_added_person_surname(self, person):\n self.is_this_page()\n self.try_get_choose_surname().click()\n self.try_get_input_group().clear()\n self.try_get_input_group().send_keys(person.surname_ukr)\n self.try_get_ok_button().click()\n return self.try_get_searched_surname(person.surname_ukr).text.partition(' ')[0]", "def last_name():\r\n\r\n return surnames()", "def get_last_name(people, individual):\n surname = people[individual]['NAME'].split()\n return surname[1]", "def second_name(self, instance):\r\n return instance.user.profile.second_name", "def get_seller_surname(self, id):\n try:\n MySQLConnector().execute_query('select surname from salemen where id = {0};'.format(id))\n surname = MySQLConnector().get_results()[0][0]\n except Error as er:\n logging.getLogger(__name__).error(\"Something went wrong with database %s\" % er)\n return surname", "def last_name():\n return dice.choice(names.lastname)", "def last_name(self, instance):\r\n return instance.user.last_name", "def get_last_name(self) -> str:\n return self.last_name", "def getLastname(self):\n return self.lastname", "def getLastName(self):\n return self.lastName", "def getLastName(self):\n return self.lastName", "def getLastName(self):\n return self.lastName", "def getLastName(self):\n return self.lastName", "def getLastName(self):\n return self.lastName", "def get_random_lastname_nameson ():\n name = db.get_database(DB_LAST_NAMESON).random_pop()\n if not name:\n return get_random_lastname_simple()\n if name.endswith('s'):\n if coinflip():\n return name\n else:\n return \"%son\" % name\n return \"%sson\" % name", "def get_full_name(self):\n return \"{0} {1}\".format(self.first_name, self.last_surname)", "def get_user_lastname():\n if not is_authenticated() or 'samlUserdata' not in session:\n return None\n\n lastname = session.get('samlUserdata', {}).get(SAML_ATTRIBUTES.get('last_name', None), False)\n\n return lastname[0] if lastname else not_found('lastname')\n return None", "def ldap_get_lastname(self, user):\n result = super(Auth42, self)._search_not_empty(user)\n if result is not None:\n lastname = result.get(\"last-name\")[0]\n return lastname\n\n return None", "def lastname(self):\n return self._lastname", "def getLastName(self):\n\t\treturn self.LastName", "def male_middle_name():\r\n\r\n return male_first()", "def get_last_name(self):\n return self._last_name", "def searching_person_by_surname(self, given_surname):\n self.wait_until_page_generate()\n if len(self.rows_in_body) == 0:\n return None\n if self.is_element_present(self.SEARCHED_SURNAME):\n elem = self.driver.find_element(*self.SEARCHED_SURNAME)\n if elem.text.__contains__(given_surname):\n return elem\n else:\n return None", "def get_seller_name(self, id):\n try:\n MySQLConnector().execute_query('select name from salemen where id = {0};'.format(id))\n name = MySQLConnector().get_results()[0][0]\n MySQLConnector().execute_query('select surname from salemen where id = {0};'.format(id))\n surname = MySQLConnector().get_results()[0][0]\n MySQLConnector().execute_query('select surname from salemen where id = {0};'.format(id))\n surname = MySQLConnector().get_results()[0][0]\n name_surname = name +', ' + surname\n except Error as er:\n logging.getLogger(__name__).error(\"Something went wrong with database %s\" % er)\n return name_surname", "def person_name(self):\n return self._person_name", "def get_name(self):\r\n return ('%s %s' % ( self.first_name, self.last_name ))", "def get_patient_name(patient_bundle):\n names = patient_bundle['name']\n for name in names:\n if (name['use'] == 'official'):\n str = ''\n for g in name['given']:\n str += g + ' '\n str += name['family']\n # 'Rita460 Schowalter414'??\n return str", "def surnames():\r\n\r\n cursor.execute('SELECT * FROM surname order by RANDOM() limit 1')\r\n return cursor.fetchone()[0]", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n return self.name+self.last_name", "def last_name(self) -> str:\n return self._last_name", "def last_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"last_name\")", "def last_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"last_name\")", "def last_name(self):\n return self._last_name", "def last_name(self):\n return self._last_name", "def last_name(self):\n return self._last_name", "def last_name(self):\n return self._last_name", "def get_author_name(author_node):\n surname = author_node.xpath(\"string(./surname[1])\").extract_first()\n given_names = author_node.xpath(\"string(./given-name[1])\").extract_first()\n suffix = author_node.xpath(\"string(.//suffix[1])\").extract_first()\n author_name = \", \".join(el for el in (surname, given_names, suffix) if el)\n\n return author_name", "def get_full_name(self):\n # The user is identified by their email address\n return self.first_name+' '+self.last_name", "def first_last_name(obj):\n return '%s %s' % (obj.first_name, obj.last_name)", "def get_name(self):\n return \"%s %s\" % (\n self.first_name,\n self.last_name\n )", "def ldap_get_fullname(self, user):\n result = super(Auth42, self)._search_not_empty(user)\n if result is not None:\n fullname = (result.get(\"first-name\")[0], result.get(\"last-name\")[0])\n return ' '.join(str(name) for name in fullname)\n\n return None", "def get_user_firstname_lastname(self, record):\n lower_first_name, lower_last_name = self.clean_user_names(record)\n\n #No first name and last name check email\n if lower_first_name is None and lower_last_name is None:\n\n lower_first_name, lower_last_name = \\\n self.extract_name_from_email(record)\n\n return lower_first_name, lower_last_name", "def get_name(self):\n user = self.user\n name = \"%s %s\" % (user.first_name, user.last_name)\n name = name.strip()\n\n return self.display_name or name or user.email or user.username", "def nombre(self):\n return self.persona.nombre", "def get_full_name(self):\n return self.last_name + self.first_name", "def get_last_name(self):\n element = self.driver.find_element(*self.lastname_textbox_selector)\n return element.get_attribute(\"value\")", "def persona_name(self) -> str:\n return self._persona_name", "def get_random_lastname_family ():\n if one_chance_in(3):\n return get_random_lastname_irish ()\n elif coinflip():\n return get_random_lastname_scottish ()\n else:\n return get_random_lastname_nameson()", "def get_full_name(self):\n\t\tfull_name = '%s %s' % (self.first_name, self.last_name)\n\t\treturn full_name.strip()", "def get_random_lastname_simple ():\n return db_random_pop_default(DB_LAST_SIMPLE, \"Doe\")", "def name(self):\n return \"%s %s\" % (self.first_name, self.last_name)", "def get_full_name(self):\r\n full_name = '%s %s' % (self.first_name, self.last_name)\r\n return full_name.strip()", "def get_full_name(self):\r\n full_name = '%s %s' % (self.first_name, self.last_name)\r\n return full_name.strip()", "def last_name(self):\n\n return self._last_name", "def LastName(self, reg_last_name = VALUE_NOT_SET):\n if reg_last_name != VALUE_NOT_SET:\n self.__reg_last_name = reg_last_name\n try:\n self.__contact.AdditionalInfo().LastName(self.__reg_last_name)\n except:\n pass\n else:\n if not self.__reg_last_name:\n self.__reg_last_name = None\n return self.__reg_last_name", "def get_salutation(email):\n return email.split(\"@\")[0].replace(\".\", \" \").title()", "def get_name():\n return \"Boss\"", "def _get_full_name(self):\n if self.middle_name:\n return u'%s %s %s' % (self.first_name, self.middle_name,\n self.last_name)\n else:\n return u'%s %s' % (self.first_name, self.last_name)", "def given_name(self):\n profile = self._json['author-profile']\n return profile.get('preferred-name', {}).get('given-name')", "def get_full_name(self):\n full_name = f'{self.first_name} {self.last_name}' if self.first_name and self.last_name else self.username\n return full_name.strip()", "def getFirstName(self):\r\n return self.firstName", "def get_full_name(self):\n return self.first_name + ' ' + self.last_name", "def change_surname(change_account):\n change_data(change_account, changed_data='surname')", "def get_full_name(self):\n\t\treturn self.email", "def get_random_lastname_irish ():\n name = db.get_database(DB_LAST_GAELIC1).random_pop()\n if not name:\n return get_random_lastname_simple()\n return \"O'%s\" % name", "def get_random_lastname_scottish ():\n name = db.get_database(DB_LAST_GAELIC2).random_pop()\n if not name:\n return get_random_lastname_simple()\n return \"%s%s\" % (random.choice(('Mc', 'Mac')), name)", "def fullname(self):\n parts = []\n self.lastname and parts.append(self.lastname)\n self.firstname and parts.append(self.firstname)\n len(parts) == 0 and parts.append(self.userid)\n\n return ' '.join(parts)", "def surname(self, surname):\n\n self._surname = surname", "def get_full_name(self):\n full_name = \"%s %s\" % (self.firstname, self.lastname)\n return full_name.strip()", "def get_full_name(self):\n return self.first_name+\" \"+self.last_name", "def get_name(username):\n print(\"We halo \" + username + \" , piye kabare?\")", "def name_lookup(first_name):\n if first_name == \"Joe\": \n last_name = \"Warren\"\n elif first_name == \"Scott\": \n last_name = \"Rixner\"\n elif first_name == \"John\": \n last_name = \"Greiner\"\n elif first_name == \"Stephen\":\n last_name = \"Wong\"\n else: \n last_name = \"Error: Not an instructor\"\n return last_name", "def get_user_fullname(self):\n member = self.get_user()\n if member:\n return member.getProperty('fullname')", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()", "def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()" ]
[ "0.84594584", "0.8456766", "0.8456766", "0.7657546", "0.75671947", "0.7503995", "0.71551466", "0.7144786", "0.6926863", "0.68105274", "0.67284966", "0.66660845", "0.6651198", "0.6651198", "0.6651198", "0.6651198", "0.6651198", "0.66335547", "0.6632711", "0.65882415", "0.6583125", "0.6572899", "0.65433663", "0.6542136", "0.6538844", "0.65300226", "0.64923894", "0.6465234", "0.6463275", "0.6421242", "0.6410438", "0.6393426", "0.6382503", "0.6371339", "0.63670486", "0.63670486", "0.63669175", "0.63669175", "0.63669175", "0.63669175", "0.6327212", "0.63264155", "0.6324255", "0.6322742", "0.63141817", "0.6306357", "0.62985474", "0.62595093", "0.62559104", "0.62505275", "0.62427616", "0.62330645", "0.6217193", "0.620231", "0.6195343", "0.6181588", "0.6181588", "0.61742806", "0.6167332", "0.61640996", "0.6154568", "0.61515564", "0.6128411", "0.61205983", "0.61198986", "0.6118775", "0.6114568", "0.61099654", "0.6100268", "0.6074036", "0.6071181", "0.6070612", "0.606844", "0.60621196", "0.6046512", "0.60434324", "0.604168", "0.60323983", "0.60323983", "0.60323983", "0.60323983", "0.60323983", "0.60323983", "0.60323983", "0.60323983", "0.60323983", "0.60323983", "0.60323983", "0.60323983", "0.60323983", "0.60323983", "0.60323983", "0.60323983", "0.60323983", "0.60323983", "0.60323983", "0.60323983", "0.60323983", "0.60323983" ]
0.8435841
4
Get the phone of person
def phone(self): return self._phone
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getPhone(self):\n return self.phone", "def phone(self) -> str:\n return pulumi.get(self, \"phone\")", "def phone(self):\n return self._phone", "def phone(self):\n return self._phone", "def phone(self):\n return self._phone", "def personal_phone(self):\n return self._personal_phone", "def person_phone_number(self):\n return self._person_phone_number", "async def get_phone(self):\n\n e = await self.request.request(url='https://accountinformation.roblox.com/v1/phone', method='get')\n return e", "def phone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"phone\")", "def phone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"phone\")", "def telephone(self):\n return self._telephone", "def phone(self):\n raise NotImplementedError()", "def phone_number(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"phone_number\")", "def telephone(self) -> str:\n return self._telephone", "def get_user_phone(cls, userid):\n\n user = User.query.filter_by(user_id=userid).one()\n\n user_phone = user.mobile_phone\n\n return user_phone", "def number(self):\n return str(self._phone)", "def phone_number(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"phone_number\")", "def phone_number(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"phone_number\")", "def phone_primary(self, instance):\r\n return instance.user.profile.phone_primary", "def phone_mobile(self, instance):\r\n return instance.user.profile.phone_mobile", "def phone_number(self):\n\n return self._phone_number", "def mobile_phone(self):\n if \"mobilePhone\" in self._prop_dict:\n return self._prop_dict[\"mobilePhone\"]\n else:\n return None", "def mobile_phone(self):\n if \"mobilePhone\" in self._prop_dict:\n return self._prop_dict[\"mobilePhone\"]\n else:\n return None", "def get_phone(self, list_item):\n phone = list_item.find('span', {'class': 'biz-phone'})\n return phone.get_text().strip()", "def phone_number_detail(self):\n return self._phone_number_detail", "def ad_rep_lead_phone(obj):\n if obj.phone_number is None:\n phone_number = ''\n else:\n phone_number = format_phone(obj.phone_number)\n return \"%s\" % phone_number", "def extract_phone(self, response):\n\n telephones = response.xpath('//*/a[contains(@href,\"tel:\")]/text()').extract()\n if telephones:\n telephones = [phone.replace(\"Tel:\", \"\").replace(\"Phone:\", \"\").replace(\"Handynummer:\", \"\").strip() for phone\n in telephones if phone]\n return \",\".join(filter(None, telephones)).strip()\n else:\n telephones = response.xpath('//*/p[contains(text(),\"nummer\")]/text()').extract()\n if telephones:\n telephones = [phone.replace(\"Tel:\", \"\").replace(\"Phone:\", \"\").replace(\"Handynummer:\", \"\").strip() for\n phone in telephones if (not \"Firmanummer\" in phone)]\n return \",\".join(filter(None, telephones)).strip()\n return \"\"", "def get_phone_number(user_id):\n try:\n student = _UserProfile.objects.get(user_id=user_id)\n except _UserProfile.DoesNotExist as exception:\n log.exception(exception)\n return None\n return student.phone_number or None", "def get_phone_number():\r\n phone_number = input(\"What is the customer's phone number?: \")\r\n\r\n return phone_number", "def get_billing_phone(self):\n if self.billing_phone:\n return self.billing_phone\n else:\n return self.contact.phone", "def ldap_get_number(self, user):\n result = super(Auth42, self)._search_not_empty(user)\n if result is not None:\n number = result.get(\"mobile-phone\")[0]\n return number\n\n return None", "def get_phone(self, node, *, area_codes=[], error=True):\n\n if isinstance(node, etree._ElementUnicodeResult):\n match = re.search(\n r\"(?:\\A|\\D)(\\(?\\d{3}\\)?\\D?\\d{3}\\D?\\d{4}(?:\\s*(?:/|x|ext[.:]?|poste)[\\s-]?\\d+)?)(?:\\D|\\Z)\", node\n )\n if match:\n return match.group(1)\n match = node.xpath('.//a[contains(@href,\"tel:\")]')\n if match:\n return match[0].attrib[\"href\"].replace(\"tel:\", \"\")\n if area_codes:\n for area_code in area_codes:\n match = re.search(\n r\"(?:\\A|\\D)(\\(?%d\\)?\\D?\\d{3}\\D?\\d{4}(?:\\s*(?:/|x|ext[.:]?|poste)[\\s-]?\\d+)?)(?:\\D|\\Z)\" % area_code,\n node.text_content(),\n )\n if match:\n return match.group(1)\n else:\n match = re.search(\n r\"(?:\\A|\\D)(\\(?\\d{3}\\)?\\D?\\d{3}\\D?\\d{4}(?:\\s*(?:/|x|ext[.:]?|poste)[\\s-]?\\d+)?)(?:\\D|\\Z)\",\n node.text_content(),\n )\n if match:\n return match.group(1)\n if error:\n raise Exception(\"No phone pattern in {}\".format(node.text_content()))", "def harvest_by_phone(client, phone):\n try:\n entity = client(users.GetFullUserRequest(id=phone))\n except ValueError:\n return 'There is no account connected to this phone number'\n\n return harvest_user(client, entity)", "def phone(self):\n\n\t\telement = Element(driver=self.driver,\n\t\t explicit_wait_time=self.explicit_wait_time,\n\t\t locator=BillPayPageLocator.PHONE_INPUT)\n\t\treturn element.element_value", "def phone(raw_phone):\n\n phone = raw_phone.replace('+33', '0')\n phone = '{} {} {} {} {}'.format(\n phone[0:2],\n phone[2:4],\n phone[4:6],\n phone[6:8],\n phone[8:10])\n return phone", "def is_phone(self):\n return self.name in PHONES", "def parse_phone(phone):\n if isinstance(phone, int):\n return str(phone)\n else:\n phone = re.sub(r'[+()\\s-]', '', str(phone))\n if phone.isdigit():\n return phone", "def getPhoneUuid(phoneName):\n return searchForPhone(phoneName)['uuid']", "def lookup_phone(ikey, skey, host, phone):\n response = client.call_json_api(\n ikey, skey, host, 'GET', '/verify/v1/lookup/phone.json',\n phone=[phone])\n return response", "def lookup_phone_number(phone):\n \n #create Twilio client\n client = Client(ACCOUNT_SID, AUTH_TOKEN)\n\n try:\n\n #check if number is real number using Twilio lookup\n phone_number = client.lookups \\\n .phone_numbers(phone) \\\n .fetch(type=['carrier'])\n\n #returns formmatted phone number\n return phone_number.phone_number\n\n #checks Twilio exception responses if number not real\n except TwilioRestException as e:\n\n #Number not found - return False\n if e.code == 20404:\n\n return False\n\n else:\n\n raise e", "def type_phone(self, phone):\n\n\t\twith allure.step(\"Type payee phone\"):\n\t\t\telement = Element(driver=self.driver,\n\t\t\t explicit_wait_time=self.explicit_wait_time,\n\t\t\t locator=BillPayPageLocator.PHONE_INPUT)\n\t\t\telement.write(phone)\n\t\t\treturn None", "def get_all_phone_numbers(self):\r\n return [person.phone_number for person in self.__person_repository.elements]", "def get_user_by_phone(self, phone):\n sql = 'select id ,first_name' \\\n ',last_name' \\\n ',password' \\\n ',phone ' \\\n 'from account_user ' \\\n 'where phone = %s'\n user = User.objects.raw(sql, [phone])[0];\n return user", "def phone_number():\r\n\r\n x = ''.join(str(_random.randrange(0, 10)) for i in xrange(10))\r\n y = '%s-%s-%s' % (x[0:3], x[3:6], x[6:])\r\n return y", "def business_phone(self):\n return self._business_phone", "def phonecall():\n phone_number = choice(phone_numbers)\n r = twiml.Response()\n r.dial(phone_number)\n return str(r)", "def telephone(value, arg=None):\n \n # Normalise a number\n value = value.replace(\" \", \"\").replace(\"-\", \"\")\n if value.startswith(\"0\"):\n value = \"+44\" + value[1:]\n normalised = value\n \n # Check if it's a number which is formatted in a special way\n if normalised in UNUSUAL_NUMBERS:\n value = UNUSUAL_NUMBERS[normalised]\n else:\n # Figure out how to format that number\n \n # Convert UK numbers into national format\n if value.startswith(\"+44\"):\n value = \"0\" + value[3:]\n \n # Now apply rules on how to split up area codes\n if value[:8] in ('01332050', '01382006'):\n # Direct dial only\n value = value[:5] + \" \" + value[5:]\n elif value[:7] in ('0141005', '0117101') or value[:6] in ('011800',):\n # Direct dial only\n value = value[:4] + \" \" + value[4:7] + \" \" + value[7:]\n elif value[:7] in ('0200003',):\n # Direct dial only\n value = value[:3] + \" \" + value[3:7] + \" \" + value[7:]\n elif value.startswith('01'):\n if value[2] == '1' or value[3] == '1':\n # 4 digit area codes\n area_code = value[:4]\n local_part = value[4:7] + \" \" + value[7:]\n elif value[:6] in (\n '013873', # Langholm\n '015242', # Hornby\n '015394', # Hawkshead\n '015395', # Grange-over-Sands\n '015396', # Sedbergh\n '016973', # Wigton\n '016974', # Raughton Head\n '016977', # Brampton\n '017683', # Appleby\n '017684', # Pooley Bridge\n '017687', # Keswick\n '019467', # Gosforth\n ):\n # 6 digit area codes\n area_code = value[:4] + \" \" + value[4:6]\n local_part = value[6:]\n else:\n # 5 digit\n area_code = value[:5]\n local_part = value[5:]\n \n value = \"(%s) %s\" % (area_code, local_part)\n \n elif value.startswith('02'):\n # 3 digit area codes\n value = \"(%s) %s %s\" % (value[:3], value[3:7], value[7:])\n \n elif value.startswith('0500') or value.startswith('0800'):\n # direct dial - 4 digit prefix, short following\n value = \"%s %s\" % (value[:4], value[4:])\n \n elif value.startswith('03') or value.startswith('08') or value.startswith('09'):\n # direct dial - 4 digit prefix\n value = \"%s %s %s\" % (value[:4], value[4:7], value[7:])\n \n elif value.startswith('05') or value.startswith('070'):\n # direct dial - 3 digit prefix\n value = \"%s %s %s\" % (value[:3], value[3:7], value[7:])\n \n elif value.startswith('07'):\n # direct dial - 5 digit prefix, short following\n value = \"%s %s\" % (value[:5], value[5:])\n\n # Now apply University rules:\n if value[:10] in ('(01865) 27', '(01865) 28', '(01865) 43', '(01865) 61'):\n # Oxford - list of internal number prefixes here:\n # http://www.oucs.ox.ac.uk/telecom/directories/intdiraccess.xml\n value = \"(01865 \" + value[8] + \")\" + value[9:]\n\n if arg == 'nolink':\n return value\n else:\n return mark_safe('<a href=\"tel:%s\">%s</a>' % (normalised, value))", "def searchForPhone(phoneName, attributes=['uuid']):\n returnedTagsForApi = {}\n for attr in attributes:\n returnedTagsForApi[attr] = True\n phoneJson = client.service\\\n .listPhone(searchCriteria={'name': '%s%s' % ('CSF', phoneName)}, returnedTags=returnedTagsForApi)\n if (not phoneJson['return'] or not phoneJson['return']['phone']):\n return None\n if (len(phoneJson['return']['phone']) > 1):\n raise RuntimeError('found more then one phone with the same name, the name => ' + phoneName)\n return phoneJson['return']['phone'][0]", "def support_phone_number(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"support_phone_number\")", "def check_format_user_phone(phone):\n match = re.match(r'^\\+[0-9]{10,}$', phone)\n if not match:\n raise exceptions.ValidationError('phone is not valid!')\n return phone", "def validate_phone(self, data):\n value = data.strip()\n if re.match(constant.NUMBER_ONLY, value):\n if User.objects.filter(phone=value).exists():\n raise serializers.ValidationError('phone number already registered')\n return value\n raise serializers.ValidationError(VALIDATION['phone']['invalid'])", "def get_phone_number(phone_number):\n return input(\"Enter Updated Phone Number for {phone}: \".format(phone=phone_number))", "def setPhone(self,phone):\r\n self.phone = phone", "def test_get_phone(self, raw, expected):\n\n field_mapper = FieldMapper(Row([raw], ['Mobile_Phone']))\n actual = field_mapper.get_phone()\n self.assertEqual(expected, actual)", "def personal_phone(self, personal_phone):\n\n self._personal_phone = personal_phone", "def __init__(self, phone):\r\n self.phone = phone", "def __init__(self, phone):\r\n self.phone = phone", "def __init__(self, phone):\r\n self.phone = phone", "def __init__(self, phone):\r\n self.phone = phone", "def __init__(self, phone):\r\n self.phone = phone", "def phone(self, phone):\n if self.local_vars_configuration.client_side_validation and phone is None: # noqa: E501\n raise ValueError(\"Invalid value for `phone`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n phone is not None and len(phone) > 16):\n raise ValueError(\"Invalid value for `phone`, length must be less than or equal to `16`\") # noqa: E501\n\n self._phone = phone", "def get_phone_asset(self, asset_id):\n return self.get_asset(asset_id, 'PHONE')", "def phone(self, phone):\n if self.local_vars_configuration.client_side_validation and phone is None: # noqa: E501\n raise ValueError(\"Invalid value for `phone`, must not be `None`\") # noqa: E501\n\n self._phone = phone", "def number(self, new_phone):\n returned_num = self.get_valid_num(new_phone)\n if returned_num is None:\n raise ValueError\n self._phone = returned_num", "def get_user_by_phone(phone_num):\n\n user = db.session.query(User).filter(phone_num == User.phone_num)\n return user\n \n # User.query.filter(User.phone_num == phone_num).one()", "def phone_numbers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TelephonyPhoneNumbersArgs']]]]:\n return pulumi.get(self, \"phone_numbers\")", "def phone(self, phone):\n\n self._phone = phone", "def phone(self, phone):\n\n self._phone = phone", "def phone(self, phone):\n\n self._phone = phone", "def phone(self, phone):\n\n self._phone = phone", "def phone(self, phone):\n\n self._phone = phone", "def test_get_a_common_area_phone(self):\n pass", "def test_good_phone():\n good_phone = \"213-555-1212\"\n m = CannedRe.PHONE.match(good_phone)\n# print getmembers(m)\n assert m is not None, \"Canned RegEx phone test failed for %s\" % good_phone\n assert m.string == good_phone", "def setPhone(self, phone):\n self.phone = phone\n return self", "def generate_random_phone():\n first = str(random.randint(100, 999))\n second = str(random.randint(1, 888)).zfill(3)\n last = (str(random.randint(1, 9998)).zfill(4))\n while last in ['1111', '2222', '3333', '4444', '5555', '6666', '7777', '8888']:\n last = (str(random.randint(1, 9998)).zfill(4))\n return '{}-{}-{}'.format(first, second, last)", "def phone(self, new_number):\n self._phone.number = new_number", "def support_phone_number(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"support_phone_number\")", "def support_phone_number(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"support_phone_number\")", "def phone_number_organizer(self, key):\n\t\ttry:\n\t\t\tphone_number = key[u'phone']\n\t\t\tformat_number = '(' + phone_number[0:3] + ') ' + phone_number[3:6] + '-' + phone_number[6:]\n\t\t\treturn format_number\n\t\texcept KeyError:\n\t\t\tprint [u'name'], \"requires manual phone number verification.\"\n\t\t\treturn \"Manual Input\"", "def is_phone(elem):\n return elem.attrib['k'] == 'phone'", "def get_user_by_phone(phone_num):\n\n user = db.session.query(User).filter(phone_num == User.phone_num).first()\n return user\n \n # SELECT * FROM users WHERE phone_num == phone_num\n # User.query.filter(User.phone_num == phone_num).one()", "def create_phone_number(n):", "def clean_phone(self):\n phone = self.cleaned_data.get('phone')\n if Profile.objects.filter(phone=phone). \\\n exclude(pk=self.instance.pk).exists():\n raise ValidationError(\n u'This phone is already registered.',\n code='invalid'\n )\n\n return phone", "def get(self, phone):\n\n #args = argParser()\n #phone = args.parse_args().get(\"fromPhone\")\n\n if not UserExist(phone):\n return jsonify(generateReturnDictionary(301, \"Sorry, Mobile Wallet Account does not exists!, create an account.\", \"FAILURE\"))\n\n try:\n retJson = mongo.db.Register.find({\n \"Phone\": phone\n }, {\n \"Password\":0, # projection\n \"_id\":0,\n \"FirstName\":0,\n \"LastName\":0,\n \"Email\":0,\n \"Phone\":0,\n \"Network\":0,\n \"Username\":0,\n \"Password\":0,\n \"Debt\":0,\n \"DateTimeCreated\":0,\n \"apiKeys\":0\n })[0]\n return make_response(jsonify(retJson), 200)\n except Exception as e:\n retJson = {\n \"code\": 409,\n \"message\": \"There was an error while trying to check your wallect balance -> , try again!\",\n \"status\": \"FAILURE: {0}\".format(e.message)\n }\n return jsonify(retJson)", "def validate_telephone(self, data):\n value = data.strip()\n if re.match(constant.NUMBER_ONLY, value):\n if User.objects.filter(telephone=value).exists():\n raise serializers.ValidationError('telephone number already registered')\n return value\n raise serializers.ValidationError(VALIDATION['phone']['invalid'])", "def search_by_phone_number(self, phone_number):\r\n if len(re.findall(\"[^0-9-+ ]+\", phone_number)) or len([c for c in phone_number if c == '+']) > 1:\r\n raise PersonPhoneNumberException(\"Invalid phone number search input. Can only contain digits, hyphens,\"\r\n \"spaces, and a plus sign(+).\")\r\n phone_number = phone_number.replace(' ', '')\r\n phone_number = phone_number.replace('-', '')\r\n phone_number = phone_number.replace('+4', '')\r\n return self.__filter(self.get_all_persons(), lambda x: phone_number in x.phone_number.replace(' ', ''))", "def read_phone_contacts(person_id):\n try:\n conn = sqlite3.connect(settings.database_name)\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute(\"PRAGMA foreign_keys = ON\")\n c.execute(\"SELECT c.personid, c.contactid, p.typeid, p.sequenceno, p.areacode, p.exchange, p.trunk, \"\n \"co.typecode, co.typedescription \"\n \"FROM contact AS c \"\n \"JOIN phone AS p ON c.contactid = p.contactid \"\n \"JOIN codes co on co.typeid = p.typeid \"\n \"WHERE c.personid = ? ORDER BY p.sequenceno ASC;\", (person_id,))\n\n phone_list = []\n for row in c:\n _phone = Phone()\n _phone.person_id = row[\"personid\"]\n _phone.contact_id = row[\"contactid\"]\n _phone.phone_type_id = row[\"typeid\"]\n _phone.sequence_number = row[\"sequenceno\"]\n _phone.area_code = row[\"areacode\"]\n _phone.exchange = row[\"exchange\"]\n _phone.trunk = row[\"trunk\"]\n _phone.type_code = row[\"typecode\"]\n _phone.phone_type_id = row[\"typeid\"]\n _phone.type_description = row[\"typedescription\"]\n phone_list.append(_phone)\n conn.close()\n return phone_list\n except:\n return []", "def _query_user(phone):\n if not is_valid_phone_number(phone):\n return None\n\n try:\n user = Profile.objects.get(mobile_phone=_remove_area_code(phone)).user\n\n return {\n 'first_name': user.first_name,\n 'last_name': user.last_name,\n 'groups': [group.name if group.name[0] != '_' else\n group.name[1:] for group in user.groups.all()]\n }\n except (ObjectDoesNotExist, MultipleObjectsReturned):\n # Expected output for a lot of calls. Not an error.\n return None", "def _format_caller(call_user, phone):\n # The phone number is private or not provided\n if not phone:\n return 'dolt nummer'\n\n if is_valid_phone_number(phone):\n # Set the phone number as a clickable link\n caller = '<tel:%s|%s>' % (phone, phone)\n else:\n caller = phone\n\n if call_user is not None:\n caller = '%s %s (%s)' % (\n call_user['first_name'],\n call_user['last_name'],\n caller\n )\n\n return caller", "def get(self, phone_number: str):\r\n args = authParser.parse_args()\r\n\r\n first_three = phone_number[:3]\r\n\r\n if first_three not in prefix_list and first_three != \"+23\":\r\n response = {\r\n \"status\": \"error\",\r\n \"details\": {\r\n \"message\": \"Input in a valid phone-number\"\r\n }\r\n }\r\n return response, http.client.BAD_REQUEST\r\n\r\n if len(phone_number) == 11 or len(phone_number) == 14:\r\n user = (UserModel.query.filter(\r\n UserModel.phone_number == phone_number).first())\r\n\r\n if not user:\r\n response = {\r\n \"status\": \"error\",\r\n \"detials\": {\r\n \"message\": \"User with phone number doesnt exist\"\r\n }\r\n }\r\n return response, http.client.NOT_FOUND\r\n\r\n user = UserModel.query.filter(\r\n UserModel.phone_number == phone_number).first()\r\n\r\n if not user:\r\n # The email doesnt exist\r\n return {\r\n \"status\": \"error\",\r\n \"details\": {\r\n \"message\": \"Not Found\"\r\n }\r\n }, http.client.OK\r\n user = admin_namespace.marshal(user, user_model)\r\n return {\r\n \"status\": \"success\",\r\n \"details\": {\r\n \"result\": user\r\n }\r\n }, http.client.OK", "def find_partner_from_phone_number(self, cr, uid, phone_number, context=None):\n _logger.debug('Phone number: %s' % phone_number)\n if context is None:\n context = self.pool.get('res.users').context_get(cr, uid, context=context)\n\n search_args = [\n '|',\n ('phone', '=', phone_number),\n ('mobile', '=', phone_number),\n ]\n address_obj = self.pool.get('res.partner.address')\n address_ids = address_obj.search(cr, uid, search_args, context=context)\n if not address_ids:\n return False, False\n\n address_id = address_ids[0]\n partner_id = address_obj.browse(cr, uid, address_id, context=context).partner_id\n partner_id = partner_id and partner_id.id or False\n\n return partner_id, address_id", "def phone_extension(self) -> Optional[str]:\n return pulumi.get(self, \"phone_extension\")", "def validate_phone(form, field):\n if len(field.data) > 16:\n raise ValidationError('Invalid phone number')\n try:\n input_number = phonenumbers.parse(field.data)\n if not (phonenumbers.is_valid_number(input_number)):\n raise ValidationError('Invalid phone number')\n except Exception:\n input_number = phonenumbers.parse('+1' + field.data)\n if not (phonenumbers.is_valid_number(input_number)):\n raise ValidationError('Invalid phone number')", "def __str__(self):\n phone_string = \"(\"\n first_three_digits = \"\"\n next_three_digits = \"\"\n last_four_digits = \"\"\n for i in range(self.NUM_OF_DIGITS):\n if i <= 2:\n first_three_digits += self._phone[i]\n elif 3 <= i <= 5:\n next_three_digits += self._phone[i]\n else:\n last_four_digits += self._phone[i]\n phone_string += first_three_digits + \") \" + \\\n next_three_digits + \"-\" + last_four_digits\n return phone_string", "def __str__(self):\n return self.mobile", "def get_valid_num(cls, phone_number):\n if type(phone_number) != str:\n return None\n elif cls.MAX_NUM_LEN < len(phone_number):\n return None\n else:\n extracted_num = cls.extract_digits(phone_number)\n if len(extracted_num) != cls.NUM_OF_DIGITS:\n return None\n return extracted_num", "def is_phone_number(elem):\n return (elem.attrib['k'] == \"phone\")", "def telephone(self, telephone: str):\n\n self._telephone = telephone", "def reg_phone(str_phone:str) -> object:\r\n\r\n [ind, nph]=str_phone.strip(\"+\").split(\" \")\r\n #Cut off the local 0\r\n #Create regexes for 3 cases : with IND and without 0, without IND and with 0, without IND and 0\r\n formats=[\\\r\n \"(?P<ind>{})? ?0?(?P<num>{})\".format(ind, ' ?'.join(list(nph.rstrip('0'))))\r\n ]\r\n return re.compile(f'({\"|\".join(formats)})')", "def normalize(phone):\n d = re.sub('\\D', '', phone)\n return '+7 (%s) %s-%s-%s' % (d[1:4], d[4:7], d[7:9], d[9:11])" ]
[ "0.7979565", "0.7934985", "0.7780116", "0.7780116", "0.7780116", "0.7700906", "0.76863", "0.75520843", "0.7512064", "0.7512064", "0.7222818", "0.7215174", "0.7091844", "0.70915896", "0.7069842", "0.69753623", "0.6879566", "0.6879566", "0.6869756", "0.681976", "0.6818524", "0.6798246", "0.6798246", "0.6784857", "0.6739928", "0.67312413", "0.67222345", "0.66140586", "0.65890074", "0.6468029", "0.6436578", "0.62673587", "0.6238302", "0.62362593", "0.62273985", "0.622442", "0.6207423", "0.6200947", "0.6188729", "0.6177947", "0.61719716", "0.6146174", "0.6135858", "0.61089236", "0.60690284", "0.6063358", "0.60231066", "0.5992381", "0.59221417", "0.58694625", "0.58292985", "0.58156717", "0.57810897", "0.57764024", "0.5765437", "0.57609373", "0.57609373", "0.57609373", "0.57609373", "0.57609373", "0.57602227", "0.5739336", "0.5738792", "0.57377464", "0.5735004", "0.57321197", "0.573017", "0.573017", "0.573017", "0.573017", "0.573017", "0.57190615", "0.5717693", "0.57163817", "0.57145315", "0.5707914", "0.5706341", "0.5706341", "0.57011247", "0.56980044", "0.56694674", "0.5667325", "0.5663184", "0.5630293", "0.5625368", "0.5622128", "0.5595772", "0.55851334", "0.5583994", "0.5569377", "0.5531439", "0.5528167", "0.55125177", "0.55081284", "0.55020547", "0.54976815", "0.54911655", "0.54800904", "0.5457417", "0.54444736" ]
0.7533967
8
Get the address of person
def address(self): return self._address
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getAddress(user):", "def get_address(self):\n if self.get_entity: # needs an entity to work\n if self.building:\n address = self.get_entity.get_institutional_address()\n address.extend(self.building.get_postal_address())\n return address\n else:\n return self.get_entity.get_address()", "def get_address(self, ):\n return self.get_parameter('address')", "def address(self, name):\n return self.query(name).response.answer[0].items[0].address", "def get_address(self):\n return self.address.line[0]+\", \"+self.address.city+\", \"+self.address.state+\", \"+self.address.country", "def get_address(self):\n if self.address:\n return self.address", "def get_address(self):\n entity = self\n if entity.abstract_entity:\n entity = self.get_real_ancestor()\n if entity:\n address = entity.get_institutional_address()\n building = entity.get_building()\n if building:\n if entity.building_recapitulates_entity_name: \n address.extend(building.get_postal_address()[1:])\n else:\n address.extend(building.get_postal_address())\n return address", "def address(self):\n return self.data.get('address')", "def nomad_address():\n\n print(nomad.get_address())", "def address1(self, instance):\r\n return instance.user.profile.address1", "def address(self) -> Optional[str]:\n return pulumi.get(self, \"address\")", "def address(self) -> Optional[str]:\n return pulumi.get(self, \"address\")", "def address(self) -> str:\n return pulumi.get(self, \"address\")", "def address(self) -> str:\n return pulumi.get(self, \"address\")", "def address(self) -> str:\n return pulumi.get(self, \"address\")", "def address(self):\n return str(self.street) + str(self.city) + str(self.state) + str(self.zipcode)", "def _get_address(self, soup):\n street, city, state, zipcode = None, None, None, None\n try:\n # property detail tag\n street = soup.find('div', class_='main-address').get_text().strip()\n # find address tag\n address = soup.find('div', class_='c-address')\n \n # pattern for the address in this website\n locality = address.find_all('span', class_='locality')\n city = locality[0].get_text().strip()\n if len(locality) > 1:\n city = locality[1].get_text().strip()\n state = address.find('span', class_='region').get_text().strip()\n zipcode = address.find('span', class_='postal-code').get_text().strip()\n return street, city, state, zipcode\n except:\n return street, city, state, zipcode", "def _get_address(self):\n return self.__address", "def get_address(self) -> Optional[str]:\n raise NotImplementedError()", "def get_address(self, address=None):\n return self.__get_addr_grp('address', address)", "def get_address(self):\n\n return \"{}\\n{}\\n{},\\n{},\\n{}\".format(\n self.address_line_1, self.city, self.state, self.postal_code, self.country\n )", "def get_address(query):\n address = \"Dis-moi, quel endroit tu cherches ?\"\n data = get_data(query)\n try:\n address_data = data[\"results\"][0][\"formatted_address\"]\n address = (\"Si je ne me trompe pas,\"\n \" l'adresse que tu cherche, c'est ... \" + address_data + \". Sinon\"\n \", dis-moi le nom de lieu exact\")\n except IndexError:\n address = \"Désolé, je n'ai pas compris quel endroit tu cherches ?\"\n finally:\n return address", "def do_getaddress(self,args):\n ppdict(bitstamp.get_depositaddress())", "def address(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"address\")", "def get_postal_address(self):\n # print \"getting postal address\"\n address = []\n if self.name:\n address.append(self.name)\n if self.number:\n address.append(self.number + \" \" + self.street) # because building numbers and street names go on the same line\n elif self.street:\n address.append(self.street)\n if self.additional_street_address:\n address.append(self.additional_street_address)\n if self.site.post_town:\n address.append(self.site.post_town + \" \" + self.postcode)\n elif self.postcode:\n address.append(self.postcode)\n return address", "def getReferencedAddress(program: ghidra.program.model.listing.Program, address: ghidra.program.model.address.Address) -> ghidra.program.model.address.Address:\n ...", "def getAddress(self) -> int:\n ...", "def address(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"address\")", "def address_details(self) -> 'outputs.AddressDetailsResponse':\n return pulumi.get(self, \"address_details\")", "def address2(self, instance):\r\n return instance.user.profile.address2", "def address(self, rel_id=None):\n rel = self.framework.model.get_relation(self.relation_name, rel_id)\n\n return rel.data[rel.app].get(\"address\")", "def address(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"address\")", "def get_apartment_address(self, soup, apartment_dict):\n\n info_class = soup.find_all('div', {'class': 'info'})\n if info_class and len(info_class) > 0:\n info_class = info_class[0]\n address = info_class.find('h2').text.strip()\n\n from parse import parse\n address = parse(\"Location: {}\", address)[0]\n apartment_dict['address'] = address\n else:\n logging.warning(\"Failed to parse apartment address\")\n return", "def address1(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"address1\")", "def address_str(self):\n return self._plrevgeoloc.addressString", "def test_get_address(self):\r\n note_data = self.tape.dataframe.iloc[0]\r\n note = self.tape._get_note_object(note_data)\r\n eq_(note.get_address(), '8 Brown St, Methuen, MA 01844')", "def get_address(self):\n \n return self._addr", "def get_address(self):\n \n return self._addr", "def _get_address(self, jdict):\n \n try:\n # access the location info dictionary\n loc_dict = jdict['props']['homeDetails']['location']\n state = loc_dict['stateCode']\n city = loc_dict['city']\n zipcode = loc_dict['zipCode']\n street = loc_dict['formattedLocation']\n return street, city, state, zipcode\n except:\n return None, None, None, None", "def _get_address(self, soup):\n\n try:\n # from the content tag, extract the tag that contains all the address info\n address_tag = soup.find('div', class_='flex flex-col md:flex-row')\n # street tag\n street_tag = address_tag.find('h1', class_='h3')\n # street information\n street = street_tag.get_text()\\\n .strip()\\\n .replace(',', '')\n # region tag \n region_tag = address_tag.find('h5', class_='listing-card-location') \\\n .get_text() \\\n .strip() \\\n .split(' ')\n # city information\n city = region_tag[0].replace(',', '').title()\n # state information\n state = region_tag[1]\n # zipcode information\n zipcode = region_tag[2]\n\n return street, city, state, zipcode\n \n except:\n # return None if any of the above parts failed\n # if there's any part that's missing in the address part,\n # the whole address becomes useless\n return None, None, None, None", "def getaccountaddress(self, account):\n return self.proxy.getaccountaddress(account)", "def getPCAdress(self) -> ghidra.program.model.address.Address:\n ...", "def __str__(self):\n return self.address", "def address(self):\n return self._address", "def address(self):\n return self._address", "def address(self):\n return self._address", "def address(self):\n return self._address", "def address(self):\n return self._address", "def get_address():\r\n address = input(\"What is the customer's address?: \")\r\n\r\n return address", "def address(self) -> object:\n return self._address", "def address1(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"address1\")", "def address1(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"address1\")", "def address(self):\n out = {'zip_code': '',\n 'city': '',\n 'street': '',\n 'phone': ''}\n if self.user.contract_member.exists():\n last_contract = self.user.contract_member.last()\n out['zip_code'] = last_contract.zip_code\n out['city'] = last_contract.city\n out['street'] = last_contract.street\n out['phone'] = last_contract.phone\n\n return out", "def get_address(self):\n \n if \"'\" in self.data.get(\"AddressInfo\").get(\"AddressLine1\") :\n self.data.get(\"AddressInfo\").get(\"AddressLine1\").replace(\"'\",\"\")\n\n return self.data.get(\"AddressInfo\").get(\"AddressLine1\")", "def get_party_address_by_id(party_id: int) -> str:\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select address from party where name = '{}';\".format(party_id)\n cursor.execute(query)\n data = cursor.fetchall()\n db.disconnect()\n return data[0][0]", "def Address(self) -> _n_5_t_0:", "def show_fresh_address(self):\n\t\treturn self.__fresh_account()[\"address\"]", "def getAddressAtIndex(self, index: int) -> ghidra.program.model.address.Address:\n ...", "def street_address1(self) -> str:\n return pulumi.get(self, \"street_address1\")", "def fetch_address(cpr: str) -> str:\n\n return \"Åbogade 15, 8200 Aarhus N\"", "def get_address(saved_home_id):\n\n sql = \"SELECT address FROM saved_homes WHERE saved_home_id= :saved_home_id\"\n\n cursor = db.session.execute(sql,{\"saved_home_id\": saved_home_id})\n\n old_address = cursor.fetchone()\n \n address = \" \".join(old_address)\n\n return address", "def __str__(self):\n return format_address(**self._get_elements())", "def deposit_address(self):\n response = self.query('deposit_address')\n return response", "def _get_address(self):\n return utf82unicode(pn_terminus_get_address(self._impl))", "def street_address(self):\n\t\tif self.address2:\n\t\t\treturn '{}, {}'.format(self.address, self.address2)\n\t\treturn self.address", "def address(self) -> str:\n return self._address", "def address(self) -> str:\n return self._address", "def getAbsoluteAddress(program: ghidra.program.model.listing.Program, address: ghidra.program.model.address.Address) -> ghidra.program.model.address.Address:\n ...", "def get_address(self, list_item):\n Address = namedtuple('Address', ['addr', 'city', 'state', 'zip'])\n extract = [text for text in list_item.find('address').stripped_strings]\n\n # Sometimes a street address is not given\n if len(extract) == 1:\n addr, rest = None, extract[0]\n else:\n addr, rest = extract\n\n city, rest = rest.split(',')\n state, zip = rest.strip().split(' ')\n return Address(addr, city, state, zip)", "def get_address(post_content):\n post_address = post_content.find(\"div\", {\"class\": \"mapaddress\"})\n address_attr = {\"address\": \"\"}\n if post_address is not None:\n address_attr[\"address\"] = post_address.text\n return address_attr", "def address2(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"address2\")", "def address(self):\n return str(self._address)", "def address(self):\n return self._ref_address", "def build_address(record):\n pass", "def street_address(self):\n if \"streetAddress\" in self._prop_dict:\n return self._prop_dict[\"streetAddress\"]\n else:\n return None", "def __str__(self):\n return str(self.address)", "def get_str_address(address):\n return \\\n get_ob_value_primitive(address, 'AddrLine1', exception_return_value='') + ' ' + \\\n get_ob_value_primitive(address, 'AddrLine2', exception_return_value='') + ' ' + \\\n get_ob_value_primitive(address, 'AddrLine3', exception_return_value='') + ', ' + \\\n get_ob_value_primitive(address, 'City', exception_return_value='') + ' ' + \\\n get_ob_value_primitive(address, 'County', exception_return_value='') + ' ' + \\\n get_ob_value_primitive(address, 'StateProvince', exception_return_value='') + ' ' + \\\n get_ob_value_primitive(address, 'ZipPostalCode', exception_return_value='')", "def get_full_address(postal_code):\n from app.models.address import Address\n\n address = Address.find_one(postal_code)\n if address:\n return jsonify(address.to_dict())\n return jsonify(dict())", "def __str__(self):\n if self._street_name != self.DEFAULT_STREET_NAME and \\\n self._house_num != self.DEFAULT_HOUSE_NUM and \\\n self._apt_num != self.DEFAULT_APT_NUM:\n address = f\"\\n{self._house_num} {self._street_name} Street, \" \\\n f\"#{self._apt_num}\"\n return address\n else:\n return \"<None>\"", "def _get_address(self, address1, address2):\n return f'{address1}\\n{address2}' if address2 else address1", "def find(self, text: unicode) -> ghidra.program.model.address.Address:\n ...", "def address_1(self):\n return self._address_1", "def getOneAddress(results):\n return getOnePayload(results).dottedQuad()", "def toAddr(self, addressString: unicode) -> ghidra.program.model.address.Address:\n ...", "def get_remit_to_address(self): \n return self.remit_to_address", "def address2(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"address2\")", "def address2(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"address2\")", "def get_address(self):\n return self.get_ipv4_address()", "def nameToAddress(self, name):\n pass", "def get_address(self):\n return logic.address(self.get_program())", "def address_str(self) -> str | None:\n pass", "def _compute_adress(self):\r\n\t\tfor leads in self:\r\n\t\t\tleads.address = leads.street + \" \" + leads.street2", "def street_address():\r\n\r\n return _random.choice(\r\n [\r\n '%d-%d %s' % (\r\n _random.randrange(999),\r\n _random.randrange(999),\r\n street_name()\r\n ),\r\n '%d %s' % (\r\n _random.randrange(999),\r\n street_name()\r\n ),\r\n '%s %d, %s' % (\r\n 'P.O. Box',\r\n _random.randrange(999),\r\n street_name()\r\n )\r\n ]\r\n )", "def address(self):\n ...", "def address(self):\n return f\"{self._type}.{self._id}\"", "def get_address(address_file):\n if not path.exists(address_file) :\n print(\"file not found :\", address_file)\n return None\n addr_file = open(address_file,'r')\n address = addr_file.readlines()\n return address[0]", "def _get_address(self, address_tag, hdr):\n\n # try to find all the span tags in the address tag, the span tags\n # include all the address information we need \n try:\n elements = address_tag.find_all('span')\n\n # scrape the text out of the span tags and remove\n # all the whitespaces and punctuation marks\n address = elements[0].get_text()\\\n .replace(',','')\\\n .strip()\n city = elements[1].get_text().strip()\n state = elements[2].get_text().strip()\n zipcode = elements[3].get_text().strip()\n return address, city, state, zipcode\n # however, sometimes the address tag does not include the street\n # info, in this case, use the text in the header tag, which serves\n # as a replacement for the address \n except:\n address = hdr.get_text()\n elements = address_tag.find_all('span')\n city = elements[0].get_text()\\\n .replace(',','')\\\n .strip()\n state = elements[1].get_text().strip()\n zipcode = elements[2].get_text().strip()\n return address, city, state, zipcode", "def _get_addr(self, protocol, address):\n if address:\n return address[0]\n else:\n return protocol.transport.getPeer().host", "def get_address(project, zone, instance):\n return gcloud(\n project,\n 'addresses',\n 'describe',\n '%s-ip' % instance,\n '--region=%s' % get_region(zone),\n '--format=value(address)',\n )", "def find_address():\n while True:\n business_object = query_business_name()\n if business_object == \"back\":\n return\n elif business_object is None:\n continue\n\n print(f'{business_object[\"name\"]}\\'s address is:'\n f'{business_object[\"address\"]}, {business_object[\"city\"]} '\n f'{business_object[\"state\"]}')" ]
[ "0.7992872", "0.7607708", "0.75525093", "0.73847014", "0.7379411", "0.737529", "0.73570526", "0.7323264", "0.722714", "0.71870494", "0.7097504", "0.7097504", "0.7092015", "0.7092015", "0.7092015", "0.7066334", "0.7032395", "0.69323504", "0.69010234", "0.69005394", "0.6900194", "0.6896562", "0.6862433", "0.6844339", "0.68338037", "0.6796834", "0.67907894", "0.6758096", "0.6752767", "0.674012", "0.67384005", "0.6730269", "0.67181826", "0.67061025", "0.66939443", "0.6683053", "0.66710603", "0.66710603", "0.665227", "0.66414404", "0.6627911", "0.660043", "0.6597471", "0.6590651", "0.6590651", "0.6590651", "0.6590651", "0.6590651", "0.6580112", "0.65726507", "0.65587896", "0.65587896", "0.6551523", "0.6538985", "0.6536171", "0.6530551", "0.6524091", "0.651884", "0.6511891", "0.64819187", "0.6475954", "0.64576924", "0.64416873", "0.64411414", "0.64342606", "0.64243925", "0.64243925", "0.64241105", "0.64206314", "0.64177287", "0.63962877", "0.6392082", "0.6380069", "0.6376215", "0.63739043", "0.63729084", "0.63466203", "0.63202864", "0.6304865", "0.6298065", "0.6296673", "0.62927943", "0.6276829", "0.6274931", "0.6268399", "0.62418216", "0.62418216", "0.6238508", "0.62215567", "0.61779886", "0.61771286", "0.61770326", "0.61680007", "0.61437714", "0.613672", "0.61296153", "0.6124873", "0.6104356", "0.6081901", "0.60590684" ]
0.6411183
70
Get the mail of person
def mail(self): return self._mail
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def recipient(self, recipient, model):\n if recipient == \"hod\":\n workmails = model.address_id, model.work_email\n workmail = {workmail for workmail in workmails if workmail}\n workmail = workmail.pop() if workmail else model.work_email\n if not isinstance(workmail, str):\n try:\n return workmail.email\n except:\n pass\n return workmail\n elif recipient == \"department_manager\":\n manager = model.manager_id\n return manager.work_email or manager.address_id.email", "def get_email(obj):\r\n return obj.user.email", "def mail(self):\n if \"mail\" in self._prop_dict:\n return self._prop_dict[\"mail\"]\n else:\n return None", "def mail(self):\n if \"mail\" in self._prop_dict:\n return self._prop_dict[\"mail\"]\n else:\n return None", "def get_email(self):\n return self.reference[REF_EMAIL_ADDRESS][REF_VALUE]", "def get_email(self):\n return self.email", "def get_user_email(self):\n member = self.get_user()\n if member:\n return member.getProperty('email')", "def get_contact_email():\n from shotglass2.shotglass import get_site_config\n \n site_config = get_site_config()\n \n to = None\n to_name = None\n to_addr = None\n \n \n rec = Pref(g.db).get(\"Contact Name\",user_name=site_config.get(\"HOST_NAME\"),default=site_config.get(\"CONTACT_NAME\",site_config.get(\"MAIL_DEFAULT_SENDER\",\"Site Contact\")))\n if rec:\n to_name = rec.value\n \n if site_config['TESTING']:\n rec = Pref(g.db).select_one(where=\"name='Contact Email Address' and user_name='test'\")\n else:\n rec = Pref(g.db).get(\"Contact Email Address\",user_name=site_config.get(\"HOST_NAME\"),\n default=site_config.get(\"CONTACT_EMAIL_ADDR\",\n site_config.get(\"MAIL_DEFAULT_ADDR\",\"info@{}\".format(site_config.get(\"HOST_NAME\",\"example.com\")))))\n if rec:\n to_addr = rec.value\n # split the addresses into a list if there are commas\n temp_addr_list = to_addr.split(',')\n if len(temp_addr_list) > 1:\n to = []\n for index, val in enumerate(temp_addr_list):\n if index == 0:\n to.append((to_name,val,))\n else:\n to.append((None,val,)) \n else:\n to = (to_name,to_addr,)\n \n return to", "def email(self):\n return self._dict.get('email')", "def get_email(self):\n return self._email", "def getEmail(self):\n\t\treturn self.Email", "def get(self):\n user_id = request.args.get('user_id')\n return get_email(user_id)", "def email(self):\n return '{}.{}@email.com'.format(self.fname,self.lname)", "def get_default_email(self):\n email = '[email protected]'\n sql = u'SELECT detail ' \\\n u'FROM communication_TBL ' \\\n u'WHERE person_ID = %s ' \\\n u'AND main = 1 ' \\\n u'AND communication_type = \"email\"'\n data = (self.login_details['person_ID'])\n\n if verify_user_company_schema(self.login_details):\n c, conn = connection(self.login_details['company_schema'])\n\n try:\n c.execute(sql, data)\n value = c.fetchone()\n\n if value is not None:\n email = value[0]\n finally:\n conn_close(c, conn)\n return email", "def getEmail(self):\n return self.email", "def email(self) -> str:\n return pulumi.get(self, \"email\")", "def email(self) -> str:\n return pulumi.get(self, \"email\")", "def email(self) -> str:\n return pulumi.get(self, \"email\")", "def contact_email(self) -> str:\n return pulumi.get(self, \"contact_email\")", "def email(self, instance):\r\n return instance.user.email", "def get_email(self, token):\n resp = requests.get(self.emails_url, params={\"access_token\": token.token})\n emails = resp.json().get(\"values\", [])\n email = \"\"\n try:\n email = emails[0].get(\"email\")\n primary_emails = [e for e in emails if e.get(\"is_primary\", False)]\n email = primary_emails[0].get(\"email\")\n except (IndexError, TypeError, KeyError):\n return \"\"\n finally:\n return email", "def get_email():\n headers = request.headers\n token = headers['Authorization'].split()[1]\n return Token.objects(access_token=token).first().email", "def get_default_email(self):\n email_address = None\n sql = u'SELECT detail ' \\\n u'FROM communication_TBL ' \\\n u'WHERE client_company_ID = %s ' \\\n u'AND communication_type = \"email\" ' \\\n u'AND main = 1'\n\n data = (self.id,)\n\n c, conn = connection(self.schema)\n\n try:\n c.execute(sql, data)\n\n address = c.fetchone()\n if address is not None:\n email_address = address[0]\n\n finally:\n conn_close(c, conn)\n\n return email_address", "def email(self):\n return self.__email", "def email(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"email\")", "def email(self):\n return \"{}.{}@company.com\".format(self.first, self.last)", "def getEmail(self):\n return self.__email", "def email(self):\n return self._email", "def email(self):\n return self._email", "def email(self):\n return self._email", "def email(self):\n return self._email", "def getMailRecipient(self, mail=None):\n\n to_field_list = []\n if mail is not None:\n message = message_from_string(mail)\n to_field = message.get('To')\n to_field_list = self.email_pattern.findall(to_field)\n return to_field_list", "def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")", "def get_email_for_nickname(cls, nickname):\n account = cls.get_account_for_nickname(nickname)\n if account is None:\n return None\n return account.email", "def Email(self, default=None):\n return self.data.get('email', default)", "def get_my_email():\n return check_output(['git', 'config', '--get',\n 'user.email']).strip().decode('utf-8')", "def get_donor_email(self):\n input_name = self.get_donor()\n if input_name in self.all_donors:\n print(self.r.hget(input_name, 'email'))", "def email(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"email\")", "def get_email(self, token, uid):\n\n email_info_resp = get_remote(get_config('login.weibo.email_info_url') + token)\n email_info_resp_json = json.loads(email_info_resp)\n\n if email_info_resp_json.get(\"error\") is not None:\n raise Exception(email_info_resp_json)\n\n return email_info_resp_json['email']", "def get_email(self, id_):\n\n query = self._db.User.select(self._db.User.c.id_ == id_)\n query = query.with_only_columns([self._db.User.c.email, ])\n\n record = query.execute().fetchone()\n return record[0]", "def get_email(self):\n # Scraping the Email Address from Contact Info (email)\n\n # > click on 'Contact info' link on the page\n # self.browser.execute_script(\n # \"(function(){try{for(i in document.getElementsByTagName('a')){let el = document.getElementsByTagName('a')[i]; \"\n # \"if(el.innerHTML.includes('Contact info')){el.click();}}}catch(e){}})()\")\n # time.sleep(loading_pause_time)\n #\n # # > gets email from the 'Contact info' popup\n # try:\n # email = self.browser.execute_script(\n # \"return (function(){try{for (i in document.getElementsByClassName('pv-contact-info__contact-type')){ let \"\n # \"el = \"\n # \"document.getElementsByClassName('pv-contact-info__contact-type')[i]; if(el.className.includes(\"\n # \"'ci-email')){ \"\n # \"return el.children[2].children[0].innerText; } }} catch(e){return '';}})()\")\n #\n # self.browser.execute_script(\"document.getElementsByClassName('artdeco-modal__dismiss')[0].click()\")\n # except:\n # email = 'N/A'", "def customer_email(customer):\n return customer.get(\"email\")", "def user_email(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"user_email\")", "def mail_nickname(self):\n if \"mailNickname\" in self._prop_dict:\n return self._prop_dict[\"mailNickname\"]\n else:\n return None", "def mail_nickname(self):\n if \"mailNickname\" in self._prop_dict:\n return self._prop_dict[\"mailNickname\"]\n else:\n return None", "async def get_guardian_email(guardian_id: UUID, angel_name: str) -> str:\n try:\n user = await User.get(id=guardian_id)\n except DoesNotExist:\n return False\n\n angels = await user.fetch_related(\"angels\")\n for angel in angels:\n if angel.name == angel_name:\n return user.email\n return False", "def GetMailInfo(self, UserList, LearnerNumber):\n MailTo = str(UserList[LearnerNumber]['ID'])\n MailAddr = UserList[LearnerNumber]['MailAddr']\n return MailTo, MailAddr", "def get_member_email(username=None, portal_membership=None):\n\n if portal_membership is None:\n portal = getSite()\n portal_membership = getToolByName(portal, 'portal_membership', None)\n if portal_membership is None:\n # unit test or non-CMF site\n return None\n\n if username is None:\n member = portal_membership.getAuthenticatedMember()\n else:\n member = portal_membership.getMemberById(username)\n if member is None:\n if username is not None and '@' in username:\n # Use case: explicitly adding a mailing list address\n # to the watchers.\n return username\n return None\n\n try:\n email = member.getProperty('email')\n except Unauthorized:\n # this will happen if CMFMember is installed and the email\n # property is protected via AT security\n email = member.getField('email').getAccessor(member)()\n return email", "def get_author_email(author, email):\n return encode_email(email, author, 'nav')", "def get_full_name(self):\n\t\treturn self.email", "def email(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"email\")", "def get(self, email):\n adm = Administration()\n pers = adm.get_person_by_google_mail(email)\n return pers", "def email(self) -> str:\n return self._email", "def get_email(nt_id):\n\n secret = json.loads(get_secret())\n username = secret['account_name']\n pw = secret['password']\n data = {\n \"domain\": \"corporate.t-mobile.com\",\n \"base_dname\": \"OU=Production,OU=Users,OU=Accounts,DC=Corporate,DC=T-Mobile,DC=com\",\n \"bind_dname\": \"CN=%s,OU=LDAPS,OU=Non-production,OU=Services,OU=Accounts,DC=corporate,DC=t-mobile,DC=com\" % username,\n \"password\": pw,\n \"obj_name\": nt_id,\n \"obj_class\": \"user\",\n \"attributes\": [\"mail\"],\n }\n lambda_client = boto3.client('lambda')\n invoke_response = lambda_client.invoke(\n FunctionName= os.environ.get(\"query_ldap\"),\n InvocationType= \"RequestResponse\",\n Payload= json.dumps(data)\n )\n if (\"FunctionError\" not in invoke_response):\n data = ast.literal_eval(json.load(invoke_response['Payload'])['body'])\n print(data)\n return data[0][0][1]['mail'][0]\n\n return None", "def get_domain(self, email):\r\n try:\r\n return str(email).split('r@')[1]\r\n except:\r\n return None", "def get_receive_mail(self):\n return self.__mail", "def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")", "def email(self):\n # Look for a primary address\n useremail = UserEmail.query.filter_by(user_id=self.id, primary=True).first()\n if useremail:\n return useremail\n # No primary? Maybe there's one that's not set as primary?\n useremail = UserEmail.query.filter_by(user_id=self.id).first()\n if useremail:\n # XXX: Mark at primary. This may or may not be saved depending on\n # whether the request ended in a database commit.\n useremail.primary = True\n return useremail\n # This user has no email address. Return a blank string instead of None\n # to support the common use case, where the caller will use unicode(user.email)\n # to get the email address as a string.\n return u''", "def recipient(self):\n\t\trecipient = re.search(r\"([Tt]\\s*o )(.*)(from.*)\",self.raw_text()[:250])\n\t\t\n\t\tif recipient: \t\n\t\t\trecipient = recipient.group(2) \t\n\t\t\trecipient = re.sub(r\"(\\w+\\s*\\w+),.*\",r\"\\1\",recipient) #attempting to clear out titles and such\n\t\t\t# recipient = re.sub(r\"([sS]ecre[a-z]+ of the \\w+).*\",\"Secretary of the Navy\",recipient) \t\n\t\t\treturn recipient\n\t\treturn \"Unknown\"", "def _get_contact_email(app):\n return app[CONTACT_EMAIL_KEY]", "def get_salutation(email):\n return email.split(\"@\")[0].replace(\".\", \" \").title()", "def other_mail_address(self):\n return (self.mail_address_2 + ' ' + \n self.mail_address_3 + ' ' +\n self.mail_address_4)", "def getEmail(self):\n return _libsbml.ModelCreator_getEmail(self)", "def ad_rep_email(obj):\n return '%s' % obj.ad_rep.email", "def displayname(self):\n return self.email", "def log_useremail(self):\n return self.user.email", "def get_email(self, company_code):\n return self.__get(\"export/table=name&search=\" + urllib.quote_plus(\"code=`\" + company_code + \"`\") + \"&format=[email]\").text", "def email(self):\n billing_contact = self.owner.organization_user.user\n return billing_contact.email", "def get_info(email):\n # Get the first user where _id=email\n user = models.User.objects.raw({\"_id\": email}).first()\n return user", "def get_user_email():\n email = input(\"Email address: \")\n menu.option_to_exit(email)\n try:\n if not is_valid_email(email):\n raise ValueError\n except ValueError:\n print(\"\\nOoops! That doesn't look like an email address.\\n\"\n \"Please try again.\\n\")\n return get_user_email()\n else:\n return email", "def ___str__(self):\n return self.email", "def get_primary_email(lookup_value, lookup_type=\"id\"):\n lookup_type = _validate_lookup_type(lookup_type, 'email')\n user_data = core.get_data('people', lookup_value, lookup_type, return_json=True)\n primary_email = user_data['emails'][0]['value']\n return primary_email", "def get_email(self,text):\r\n return self.driver.find_element(*SinginPage.email).send_keys(text)", "def generate_email_address(self):\n return \"%s.%s@%s\" % (uuid.uuid4(), self.mailbox, \"mailosaur.io\")", "def email_address(self) -> str:\n return self._email_address", "def account_email(self) -> str:\n return pulumi.get(self, \"account_email\")", "def get_id(self):\n return self.email", "def get_id(self):\n return self.email", "def get_email(khoros_object, user_settings=None, user_id=None, login=None, first_name=None, last_name=None,\n allow_multiple=False, display_warnings=True):\n user_settings = process_user_settings(user_settings, user_id=user_id, login=login,\n first_name=first_name, last_name=last_name)\n where_clause = _get_where_clause_for_email(user_settings)\n return _get_user_identifier(khoros_object, 'email', where_clause, allow_multiple, display_warnings)", "def email_user(user, template_path, from_address, context_dict):\n return email_list([user.email], template_path, from_address, context_dict)", "def get_xero_contact(user):\r\n\r\n if \"PORTAL_XERO_CONSUMER_KEY\" in os.environ:\r\n with open(xero_rsa) as keyfile:\r\n rsa_key = keyfile.read()\r\n credentials = PrivateCredentials(\r\n os.environ.get(\"PORTAL_XERO_CONSUMER_KEY\"), rsa_key\r\n )\r\n xero = Xero(credentials)\r\n email = xero.contacts.filter(EmailAddress=user.profile.email)\r\n name = xero.contacts.filter(Name=user.profile.get_full_name())\r\n\r\n if email:\r\n return email\r\n\r\n elif name:\r\n return name\r\n\r\n return None\r\n\r\n else:\r\n return \"Invalid Xero API details.\"", "def ldap_get_email(self, user):\n result = super(Auth42, self)._search_not_empty(user)\n if result is not None:\n alias = result.get(\"alias\")[1]\n return alias\n\n return None", "def email_address(self) -> \"str\":\n return self._attrs.get(\"emailAddress\")", "def get_sender_email(mail: Message) -> str:\n sender_pattern = re.compile(\"^(?P<name>.*)\\s<(?P<email>.*)>$\")\n from_header = mail['From'] # type: str\n\n sender = sender_pattern.match(from_header)\n if not sender:\n raise KeyError(\"Invalid From header on email\")\n\n return sender.group('email')", "def get_user_email(username):\r\n return '{0}@test.com'.format(username)", "def GetEmailAddress(user_id):\n user_id = user_id.strip()\n if '@' in user_id:\n email = user_id\n else:\n email = user_id + '@' + os.environ['AUTH_DOMAIN']\n\n if IsEmailValid(email):\n return email\n else:\n return None", "def GetEmail(prompt):\n last_email_file_name = os.path.expanduser(\"~/.last_codereview_email_address\")\n last_email = \"\"\n if os.path.exists(last_email_file_name):\n try:\n last_email_file = open(last_email_file_name, \"r\")\n last_email = last_email_file.readline().strip(\"\\n\")\n last_email_file.close()\n prompt += \" [%s]\" % last_email\n except IOError, e:\n pass\n email = raw_input(prompt + \": \").strip()\n if email:\n try:\n last_email_file = open(last_email_file_name, \"w\")\n last_email_file.write(email)\n last_email_file.close()\n except IOError, e:\n pass\n else:\n email = last_email\n return email", "def extract_email_address(logpart):\n # print \"Parsing for email address: {}\".format(logpart)\n return(logpart.split('<')[1].split('>')[0])", "def __str__(self):\n return self.email", "def __str__(self):\n return self.email", "def __str__(self):\n return self.email" ]
[ "0.7303463", "0.7285791", "0.72157747", "0.72157747", "0.7144919", "0.70124036", "0.6947377", "0.6876924", "0.6824107", "0.6776341", "0.67702425", "0.67517656", "0.6741758", "0.6727407", "0.67177004", "0.6696528", "0.6696528", "0.6696528", "0.6696325", "0.6682962", "0.6642721", "0.6602651", "0.6602456", "0.6588952", "0.6582581", "0.65704757", "0.65557754", "0.6546735", "0.6546735", "0.6546735", "0.6546735", "0.6521971", "0.65200293", "0.65200293", "0.65200293", "0.65200293", "0.65140635", "0.6497599", "0.64836365", "0.6478929", "0.6448881", "0.6439925", "0.6431865", "0.6402163", "0.640202", "0.6385245", "0.6359115", "0.6359115", "0.63565403", "0.63558125", "0.63547397", "0.63546014", "0.63258296", "0.6287964", "0.62783176", "0.6245823", "0.6236907", "0.6232957", "0.6226827", "0.6215353", "0.6215353", "0.6215353", "0.6215353", "0.6215353", "0.6215353", "0.6215353", "0.62148607", "0.62121093", "0.61991626", "0.6166893", "0.61656237", "0.6155506", "0.61312604", "0.6120064", "0.61008394", "0.6100045", "0.60917294", "0.6064425", "0.6061892", "0.6024859", "0.5985376", "0.59717894", "0.59620863", "0.5955169", "0.5945049", "0.59402", "0.59402", "0.59305125", "0.5911715", "0.59104186", "0.5905166", "0.5902094", "0.5893868", "0.58910334", "0.5875857", "0.5854164", "0.58401155", "0.58395475", "0.58395475", "0.58395475" ]
0.6388883
45
Get the url of person
def url(self): return self._url
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getURLForThing(thing):", "def get_absolute_url(self):\n if self.kind == \"persona_profile\":\n p = Persona.query.filter(Persona.profile_id == self.id).first()\n return url_for(\"persona\", id=p.id)\n elif self.kind == \"group_profile\":\n g = Group.query.filter(Group.profile_id == self.id).first()\n return url_for(\"group\", id=g.id)\n elif self.kind == \"index\":\n p = Persona.query.filter(Persona.index_id == self.id).first()\n return url_for(\"persona\", id=p.id)", "def url(self) -> str:\n return getattr(\n self.auth_accounts[-1], \"url\" # pylint: disable=unsubscriptable-object\n )", "def Url(self) -> str:", "def get_url(self):\n return self.url", "def get_url(self):\n return self.url", "def url(self):\r\n return self.urlparts.geturl()", "def get_absolute_url(self):\n return '/profile/%s' % self.id", "def show_orion_url(self, obj):\n return obj.orion_url", "def get_url(self):\n\n return self.url", "def get_url(self):\n\n return self.url", "def geturl(self):\n return self.__url", "def get_url(self):\n return self.resource.url", "def get_absolute_url(self) -> str:\n return \"/users/%s/\" % self.email", "def url(self):\n _, body = self.request('/v1.1/url', 'GET')\n return body.get('url', None)", "def get_absolute_url(self):\n return reverse('profile', args=[str(self.id)])", "def url(self):\n return self.__values['url']", "def url(self):\n return self.data[\"attributes\"][\"URL\"]", "def get_url(self):\n return self._url", "def user_info_url(self):\n return self._user_info_url", "def url(self):\n return self.full()", "def url(self):\n url = self.url\n return url", "def get_absolute_url(self):\n\n return reverse('profile', args=[str(self.user.username)])", "def profile_url(self):\n return reverse(\"auth_profile\", args=[self.user.username])", "def _get_url(self, absolute):", "def get_uri(self):\n return self.url", "def url(self):\n return (urljoin(self.lodgeit.address, self.relative_url)\n if self.relative_url else None)", "def get_info_url(self):\n return self.get_info(\"URL\")", "def getUrl(self):\n return self.__get('url')", "def url(self) -> str:\n return self.url_as()", "def url(self):\n # type: () -> string_types\n return self._url", "def get_url():\r\n content = get_creds(CREDS_FILE)\r\n url = content[0]\r\n # get rid of trailing slash\r\n if url[len(url) - 1] == \"/\":\r\n return url[:len(url) - 1]\r\n return url", "def get_profile_url(lookup_value, lookup_type=\"id\"):\n if lookup_type == \"username\":\n username = lookup_value\n else:\n username = get_username(lookup_value, lookup_type)\n profile_url = f\"{base_url}/people/{username}\"\n return profile_url", "def get_url(self):\n return reverse(\"ui-accounts-retrieve\", args=[self.name])", "def url(self) -> Optional[str]:\n return pulumi.get(self, \"url\")", "def get_absolute_url(self):\n # return reverse('tutor-detail', args=[str(self.email_address)])\n return reverse('tutor-detail', args=[str(self.username)])", "def get_url(self, resource_name):\r\n return self.__resource_meta.get(resource_name,{}).get(\"url\", None)", "def get_room_url(request, inst):\n prefix = 'https' if request.is_secure() else 'http'\n return \"%s://%s.%s/room/%s/\" % (prefix, inst.name, settings.DOKKU_DOMAIN, inst.otree_room_name)", "def owner_url(self) -> str:\n return pulumi.get(self, \"owner_url\")", "def url(self) -> str:\n return pulumi.get(self, \"url\")", "def url(self) -> str:\n return pulumi.get(self, \"url\")", "def get_url(self) -> str:\n return urljoin(self._base_url, self.url)", "def url(result):\n return result.entities.get(u'urls')", "def url(self, name):\n return self.path(name)", "def get_url(self, *args, **kwargs):\n raise NotImplementedError", "def url(self):\n return url_for_item(self.key)", "def url(self):\n return url_for_item(self.key)", "def getUrl(self):\n\n return self.toUrlForm()", "def get_url(self, item):\n config = {}\n uuid = self.data.get('uuid', None)\n obj = uuidToObject(uuid)\n if uuid and obj:\n config = copy.copy(self.get_config(obj))\n\n url = u'{0}{1}'.format(self.view_url(obj), item.id.value)\n if config.get('modify_url', True):\n url = u'{0}___{1}-{2}'.format(\n url,\n item.title.value,\n item.location.value,\n )\n return url", "def get_url(self):\n if not self.get_video_id() or not self.get_username():\n return ''\n \n return 'http://www.livestream.com/%s/video?clipId=%s' % (self.get_username(), self.get_video_id())", "def URL(self):\r\n return self._URL", "def url(self) -> str:\n return self._url", "def url(self) -> str:\n return self._url", "def url(self) -> str:\n return self._url", "def get_url(self) -> str:\n\n return self.__page_url", "def related_url(self) -> pulumi.Output[Sequence['outputs.RelatedUrlResponse']]:\n return pulumi.get(self, \"related_url\")", "def url (self):\n return Links.createURL('/')", "def getLink(self):", "def getUrl(self):\n return self.url", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def get_url(self):\n if self.object_id is None:\n return '{0}/{1}'.format(self.parent.get_url(), self.path)\n\n return '{0}/{1}/{2}'.format(self.parent.get_url(), self.path,\n self.object_id.replace('/', '-'))", "def url(self, part):\n return self._server.url(part, includeToken=True) if part else None", "def get_user_url(self):\n print(\"Please enter the a website you wish to monitor\")\n url = input().lower()\n return url", "def url(self):\n\n return self._url", "def url(self):\n\n return self._url", "def url(self):\n\n return self._url", "def url(self):\n ...", "def getFullURL(self):\n return self.FullURL", "def get_redirect_url(self):\n return reverse(\"accounts:profile\",kwargs={\"username\": self.user.username})", "def url(self):\n\n if not hasattr(self, \"_url\"):\n query = db.Query(\"query_term u\", \"u.value\")\n query.join(\"query_term t\", \"t.doc_id = u.doc_id\")\n query.where(f\"u.path = '{self.URL_PATH}'\")\n query.where(f\"t.path = '{self.TERM_PATH}'\")\n query.where(query.Condition(\"t.int_val\", self.id))\n rows = query.execute(self.loader.cdr_cursor).fetchall()\n self._url = rows[0].value if rows else \"\"\n return self._url", "def get_absolute_url(self):\n return reverse(\"users:detail\", kwargs={\"username\": self.username})", "def get_absolute_url(self):\n return reverse(\"users:detail\", kwargs={\"username\": self.username})", "def get_absolute_url(self):\n return reverse(\"users:detail\", kwargs={\"username\": self.username})", "def get_absolute_url(self):\n return reverse('blogs-by-author', args=[str(self.id)])", "def requestURL(userID): #@NoSelf", "def url(self) -> str:\n return self.HTTP.url if self.HTTP else self._url", "def get_url(self, username):\n base_url = reverse(\n self.namespaced_url,\n kwargs={\n 'course_id': self.course_key,\n }\n )\n return f\"{base_url}?username={username}\"", "def get_absolute_url(self):\n # return reverse('tutor-detail', args=[str(self.email_address)])\n return reverse('school-detail', args=[str(self.slug)])", "def url(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"url\")", "def theLinky(self):\n theLink = self.absolute_url()\n return theLink", "def url(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"url\")", "def get_full_url(self):\n full_url = home_page + self.source_link\n return full_url" ]
[ "0.72001034", "0.7117052", "0.71160406", "0.7037918", "0.7014563", "0.7014563", "0.6958404", "0.6889335", "0.6881808", "0.68336815", "0.68336815", "0.67946136", "0.67801034", "0.677251", "0.6765108", "0.6752407", "0.6691526", "0.6678413", "0.6663654", "0.6660274", "0.6658939", "0.66532606", "0.6641852", "0.6633203", "0.6621162", "0.65919477", "0.6574358", "0.6568192", "0.65634686", "0.6545753", "0.6541528", "0.6536462", "0.65108585", "0.64858806", "0.6477658", "0.6475036", "0.64719015", "0.6465833", "0.6462423", "0.6459869", "0.6459869", "0.6454374", "0.641542", "0.6405946", "0.6378189", "0.63633615", "0.63633615", "0.63605034", "0.63477767", "0.6347747", "0.63453776", "0.6338427", "0.6338427", "0.6338427", "0.63230884", "0.63190335", "0.6318932", "0.6317786", "0.63122606", "0.63027084", "0.63027084", "0.63027084", "0.63027084", "0.63027084", "0.63027084", "0.63027084", "0.6301013", "0.62933075", "0.6292725", "0.6291912", "0.6291912", "0.6291912", "0.62859595", "0.62781703", "0.6272026", "0.62672144", "0.62656224", "0.62656224", "0.62656224", "0.62646234", "0.6248472", "0.6246631", "0.6239265", "0.6230168", "0.6225581", "0.6225581", "0.62130475", "0.62090755", "0.62090755", "0.62090755", "0.61975193" ]
0.6515759
40
Model with no features.Always predicts a passenger did not survive.
def predictions_0(data): predictions=[] for _, passenger in data.iterrows(): #Predict the survival of 'passenger' predictions.append(0) #Return our predictions return pd.Series(predictions) #make the predictions predictions=predictions_0(data) ''' Question 1 Using the RMS Titanic data,how accurate would a prediction be that none of the passengers survived? Hint:Run the code cell below to see the accuracy of this prediction. ''' print accuracy_score(outcomes,predictions)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict_only(self):", "def nonlearning():\n\taT.featureAndTrain(['../../AudioData/chunked_data_sorted/pos', '../../AudioData/chunked_data_sorted/neg'], \n\t\t\t\t\t\t1.0, 1.0, aT.shortTermWindow, aT.shortTermStep, \n \"svm\", \"emotion_classifier\", True)", "def train_naive(): # add arguments as needed\n pass", "def predict(self):\n for column in self.data_to_predict.columns:\n if column not in list(self.selected_features_):\n self.data_to_predict.drop(column, axis=1, inplace=True)\n for column in list(self.selected_features_):\n if column not in self.data_to_predict.columns:\n self.data_to_predict.loc[:, column] = 0\n self.predictions = self.model.predict(\n self.data_to_predict[self.selected_features_])", "def predict_model():\n data = request.json\n\n if data:\n predictor.pred_dict[\"model\"] = data[\"model\"]\n else:\n pass\n\n return 'Non tam praeclarum est scire latine, quam turpe nescire'", "def predictive_model (train_x, train_y, test_x, model_name):\n \n assert model_name in ['logisticregression', 'nn', 'randomforest',\n 'gaussiannb', 'bernoullinb', 'multinb',\n 'svmlin', 'gbm', 'extra trees',\n 'lda','passive aggressive', 'adaboost',\n 'bagging', 'xgb']\n \n # Define model\n if model_name == 'logisticregression':\n model = LogisticRegression()\n elif model_name == 'nn': \n model = MLPClassifier(hidden_layer_sizes=(200,200))\n elif model_name == 'randomforest': \n model = RandomForestClassifier()\n elif model_name == 'gaussiannb': \n model = GaussianNB()\n elif model_name == 'bernoullinb': \n model = BernoulliNB()\n elif model_name == 'multinb': \n model = MultinomialNB()\n elif model_name == 'svmlin': \n model = svm.LinearSVC()\n elif model_name == 'gbm': \n model = GradientBoostingClassifier() \n elif model_name == 'extra trees':\n model = ExtraTreesClassifier(n_estimators=20)\n elif model_name == 'lda':\n model = LinearDiscriminantAnalysis() \n elif model_name == 'passive aggressive':\n model = PassiveAggressiveClassifier()\n elif model_name == 'adaboost':\n model = AdaBoostClassifier()\n elif model_name == 'bagging':\n model = BaggingClassifier()\n elif model_name == 'xgb':\n model = XGBRegressor() \n \n # Train & Predict\n if model_name in ['svmlin', 'Passive Aggressive']: \n model.fit(train_x, train_y)\n test_y_hat = model.decision_function(test_x)\n \n elif model_name == 'xgb':\n model.fit(np.asarray(train_x), train_y)\n test_y_hat = model.predict(np.asarray(test_x))\n \n else:\n model.fit(train_x, train_y)\n test_y_hat = model.predict_proba(test_x)[:,1]\n \n return model, test_y_hat", "def _train_model(self):\n raise NotImplementedError()", "def model_fn(features,labels,mode,params):\n tf.logging.info('*** Features ***')\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s ,shape = %s\" % (name,features[name].shape))\n\n input_ids = features['input_ids']\n input_mask = features['input_mask']\n segment_ids = features['segment_ids']\n label_ids = features['label_ids']\n if 'is_real_example' in features:\n is_real_example = tf.cast(features['is_real_example'],dtype=tf.float32)\n else:\n is_real_example = tf.ones(tf.shape(label_ids),dtype=tf.float32)\n\n is_training = (mode == tf_estimator.estimator.ModeKeys.TRAIN)\n\n (total_loss,per_example_loss,probabilities,predictions) = \\\n create_model(albert_config,is_training,input_ids,input_mask,\n segment_ids,label_ids,num_labels,\n use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n if init_checkpoint:\n (assignment_map,initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars,init_checkpoint)\n tf.train.init_from_checkpoint(init_checkpoint,assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf_estimator.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(total_loss,\n learning_rate,\n num_train_steps,\n num_warmup_steps,use_tpu=False)\n output_spec = tf_estimator.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n )\n elif mode == tf_estimator.estimator.ModeKeys.EVAL:\n def metric_fn(per_example_loss,label_ids,logits,is_real_example):\n accuracy = tf.metrics.accuracy(\n labels=label_ids,predictions=predictions,\n weights=is_real_example\n )\n loss = tf.metrics.mean(\n values=per_example_loss,weights=is_real_example\n )\n return {\n 'eval_accuracy':accuracy,\n 'eval_loss':loss,\n }\n\n eval_metrics = metric_fn(per_example_loss,label_ids,predictions,is_real_example)\n output_spec = tf_estimator.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metric_ops=eval_metrics\n )\n else:\n output_spec = tf_estimator.estimator.EstimatorSpec(\n mode=mode,\n predictions={\n 'probabilities':probabilities,\n 'predictions':predictions,\n },\n )\n\n return output_spec", "def train_X(self):\n raise Exception(\"You cannot train a base predictor.\")", "def fit_predict(self):\n raise AttributeError", "def ts_predict_with_no_decompose(self, model_name: str, train_params: dict, pred_params: dict):\n self.is_decompose = False\n if self.insample:\n self.train = self.data[:-self.future]\n self.test = self.data[-self.future:]\n else:\n self.train = self.data\n model = self.model_dispatcher.dispatch(model_name, component=self.RAW)\n model.set_parameters(self.train, train_params)\n self.no_decompose_predict = model.predict(self.future, pred_params)\n print(\"\\nEvaluation\")\n print(\"*\" * 40)\n if self.insample:\n self.eval(name=self.RAW, v1=self.test, v2=self.no_decompose_predict)\n else:\n print(\"Out-of-sample predictions. There is no RMSE or R-squared.\")\n print(\"*\" * 40)\n print()", "def naive_forecaster_model_with_regressor(data_longley):\n y_train, _, X_train, _ = data_longley\n model = NaiveForecaster()\n return model.fit(y_train, X_train)", "def trainModel( self, featureTrain, classTrain):", "def model_switch_to_training(self):\n pass", "def is_predict_only(self):\n return self.model.is_predict_only", "def _untrain(self):\n # untrain the mapper\n if self.__mapper is not None:\n self.__mapper.untrain()\n # let base class untrain as well\n super(MappedClassifier, self)._untrain()", "def predict(self):\n raise NotImplementedError", "def predict(self, model, x_test):\n pass", "def model_fn(features, labels, mode, params):\n # obtain the data\n _info('*** Features ***')\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features['input_ids'] # [batch_size, seq_length]\n input_mask = features['input_mask'] # [batch_size, seq_length]\n\n # if mode != tf.estimator.ModeKeys.PREDICT:\n # # segment_idx = features['segment_dis']\n # masked_lm_positions = features['masked_lm_positions'] # [batch_size, seq_length], specify the answer\n # masked_lm_ids = features['masked_lm_ids'] # [batch_size, answer_seq_length], specify the answer labels\n # masked_lm_weights = features['masked_lm_weights'] # [batch_size, seq_length], [1, 1, 0], 0 refers to the mask\n # # next_sentence_labels = features['next_sentence_labels']\n # else:\n masked_lm_positions = features['masked_lm_positions']\n masked_lm_ids = features['masked_lm_ids']\n masked_lm_weights = features['masked_lm_weights']\n\n if bert_config.train_type == 'seq2seq':\n _info('Training seq2seq task.')\n elif bert_config.train_type == 'lm':\n _info('Training language model task.')\n \n # build model\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n model = BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask)\n \n # compute loss\n loss, per_loss, log_probs, logits = get_masked_lm_output(bert_config,\n model.get_sequence_output(),\n model.embedding_table,\n model.projection_table,\n masked_lm_positions,\n masked_lm_ids,\n masked_lm_weights,\n mode)\n \n if mode == tf.estimator.ModeKeys.PREDICT:\n masked_lm_predictions = tf.reshape(tf.argmax(log_probs, axis=-1, output_type=tf.int32), [-1])\n output_spec = tf.estimator.EstimatorSpec(mode, predictions=masked_lm_predictions)\n else:\n if mode == tf.estimator.ModeKeys.TRAIN: \n # restore from the checkpoint,\n # tf.estimator automatically restore from the model typically,\n # maybe here is for restore some pre-trained parameters\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n if init_checkpoint:\n (assignment_map, initialized_variable_names) = get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n _info('*** Trainable Variables ***')\n for var in tvars:\n init_string = ''\n if var.name in initialized_variable_names:\n init_string = ', *INIT_FROM_CKPT*'\n _info('name = {}, shape={}{}'.format(var.name, var.shape, init_string))\n \n train_op = optimization.create_optimizer(\n loss, bert_config.learning_rate, num_train_steps, bert_config.lr_limit)\n\n # learning_rate = tf.train.polynomial_decay(bert_config.learning_rate,\n # tf.train.get_or_create_global_step(),\n # num_train_steps,\n # end_learning_rate=0.0,\n # power=1.0,\n # cycle=False)\n # optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n # gradients = tf.gradients(loss, tvars, colocate_gradients_with_ops=True)\n # clipped_gradients, _ = tf.clip_by_global_norm(gradients, 5.0)\n # train_op = optimizer.apply_gradients(zip(clipped_gradients, tvars), global_step=tf.train.get_global_step())\n output_spec = tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)\n elif mode == tf.estimator.ModeKeys.EVAL:\n is_real_example = tf.ones(tf.shape(masked_lm_ids), dtype=tf.float32)\n \n def metric_fn(loss, label_ids, logits, is_real_example):\n \"\"\"\n Args:\n loss: tf.float32.\n label_ids: [b, s].\n logits: [b, s, v].\n \"\"\"\n # [b * s, v]\n logits = tf.reshape(logits, [-1, logits.shape[-1]])\n # [b * s, 1]\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n # [b * s]\n label_ids = tf.reshape(label_ids, [-1])\n accuracy = tf.metrics.accuracy(\n labels=label_ids, predictions=predictions)\n loss = tf.metrics.mean(values=loss)\n return {'eval_accuracy': accuracy, 'eval_loss': loss}\n \n eval_metrics = metric_fn(loss, masked_lm_ids, logits, is_real_example)\n output_spec = tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metrics)\n\n return output_spec", "def train(self)->None:", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n # INFO:tensorflow: name = input_ids, shape = (?, 180)\n # INFO:tensorflow: name = input_mask, shape = (?, 180)\n # INFO:tensorflow: name = is_real_example, shape = (?,)\n # INFO:tensorflow: name = label_ids, shape = (?,)\n # INFO:tensorflow: name = masked_lm_ids, shape = (?, 180)\n # INFO:tensorflow: name = masked_lm_positions, shape = (?, 180)\n # INFO:tensorflow: name = masked_lm_weights, shape = (?, 180)\n # INFO:tensorflow: name = segment_ids, shape = (?, 180)\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n masked_lm_positions = features[\"masked_lm_positions\"]\n masked_lm_ids = features[\"masked_lm_ids\"]\n masked_lm_weights = features[\"masked_lm_weights\"]\n #next_sentence_labels = features[\"next_sentence_labels\"]\n \n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n \n gcn_embedding = build_gcn_output(adj_mat, w2n, n2w, model.get_embedding_table(), bert_config, is_training)\n \n (masked_lm_loss,\n masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output(\n bert_config, model.get_sequence_output(), gcn_embedding,\n masked_lm_positions, masked_lm_ids, masked_lm_weights)\n\n\n masked_lm_loss = tf.identity(masked_lm_loss, name=\"masked_lm_loss\")\n\n\n total_loss = masked_lm_loss\n\n total_loss = tf.identity(total_loss, name='total_loss')\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint and (not FLAGS.use_horovod or hvd.rank() == 0):\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n if not FLAGS.use_horovod or hvd.rank() == 0:\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu, FLAGS.use_horovod)\n\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op)\n return output_spec\n elif mode == tf.estimator.ModeKeys.PREDICT:\n\n #def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n # masked_lm_weights):#, next_sentence_example_loss,\n #next_sentence_log_probs, next_sentence_labels):\n \"\"\"Computes the loss and accuracy of the model.\"\"\"\n #masked_lm_log_probs = tf.reshape(masked_lm_log_probs,\n # [-1, masked_lm_log_probs.shape[-1]])\n masked_lm_predictions = tf.argmax(\n masked_lm_log_probs, axis=-1, output_type=tf.int32)\n # values=next_sentence_example_loss)\n\n predictions = {\n \"input_ids\": tf.reshape(input_ids, [-1]),\n \"predictions\": masked_lm_log_probs\n }\n\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions)\n #eval_metric_ops=eval_metrics)\n return output_spec\n else:\n def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n masked_lm_weights):\n \"\"\"Computes the loss and accuracy of the model.\"\"\"\n masked_lm_log_probs = tf.reshape(masked_lm_log_probs,\n [-1, masked_lm_log_probs.shape[-1]])\n masked_lm_predictions = tf.argmax(\n masked_lm_log_probs, axis=-1, output_type=tf.int32)\n masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])\n masked_lm_ids = tf.reshape(masked_lm_ids, [-1])\n masked_lm_weights = tf.reshape(masked_lm_weights, [-1])\n masked_lm_accuracy = tf.metrics.accuracy(\n labels=masked_lm_ids,\n predictions=masked_lm_predictions,\n weights=masked_lm_weights)\n masked_lm_mean_loss = tf.metrics.mean(\n values=masked_lm_example_loss, weights=masked_lm_weights)\n\n return {\n \"masked_lm_accuracy\": masked_lm_accuracy,\n \"masked_lm_loss\": masked_lm_mean_loss,\n }\n\n eval_metrics = metric_fn(\n masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n masked_lm_weights)\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metric_ops=eval_metrics)\n\n return output_spec", "def predict():\n if (not request.json):\n abort(400)\n \n product = {\n 'brand': request.json['brand'],\n 'category-1': request.json['category-1'],\n 'category-2': request.json['category-2'],\n 'category-3': request.json['category-3'],\n 'colour': request.json['colour'],\n 'fabric_type': request.json['fabric_type'],\n 'ftp_acrylic': request.json['ftp_acrylic'],\n 'ftp_cotton': request.json['ftp_cotton'],\n 'ftp_elastane': request.json['ftp_elastane'],\n 'ftp_linen': request.json['ftp_linen'],\n 'ftp_other': request.json['ftp_other'],\n 'ftp_polyamide': request.json['ftp_polyamide'],\n 'ftp_polyester': request.json['ftp_polyester'],\n 'ftp_polypropylene': request.json['ftp_polypropylene'],\n 'ftp_silk': request.json['ftp_silk'],\n 'ftp_viscose': request.json['ftp_viscose'],\n 'ftp_wool': request.json['ftp_wool'],\n 'gender': request.json['gender'],\n 'label': request.json['label'],\n 'made_in': request.json['made_in'],\n 'season': request.json['season'],\n 'size': request.json['size'],\n 'unspsc_code': request.json['unspsc_code'],\n 'weight': request.json['weight'],\n 'ML-model': request.json['ML-model']\n }\n\n product['co2_total'] = None\n ml_model = product.pop('ML-model', None)\n if (ml_model == None or ml_model == ''):\n print('Loading default model: LGBM')\n ml_model = 'lgbm_default'\n else:\n print(f'Loading model: {ml_model}')\n model = load_model(ml_model)\n print('Model loaded')\n \n pred_with_intervals = do_prediction_with_params(model, product, intervals=True)\n \n prediction = pred_with_intervals[0][0]\n percentile_5 = pred_with_intervals[0][1] if len(pred_with_intervals[0]) == 3 and pred_with_intervals[0][1] is not None else None\n percentile_95 = pred_with_intervals[0][2] if len(pred_with_intervals[0]) == 3 and pred_with_intervals[0][2] is not None else None\n result = {\n \"prediction\": prediction,\n \"5-percentile\": percentile_5,\n \"95-percentile\": percentile_95\n }\n \n print('CO2e prediction complete, returning result')\n print(result)\n \n resp = jsonify(result)\n resp.status_code = 201\n return resp", "def create_model(X_train, lyrs=[16], act=\"relu\", opt='Adam', dr=0.2):\n\n # set random seed for reproducibility\n seed(42)\n tf.random.set_seed(42)\n\n model = Sequential()\n\n # create first hidden layer\n model.add(Dense(lyrs[0], input_dim=X_train.shape[1], activation=act))\n\n # create additional hidden layers\n for i in range(1, len(lyrs)):\n model.add(Dense(lyrs[i], activation=act))\n\n # dropout\n model.add(Dropout(dr))\n\n # create output layer\n model.add(Dense(1, activation=\"sigmoid\")) # output layer\n model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])\n\n return model", "def pick_model(self):\n self.x = self.train[self.use_columns]\n try:\n self.x = pd.get_dummies(self.x)\n except:\n pass # if no categorical features\n self.final_columns = self.x.columns\n print(self.x.columns)\n self.scaler = StandardScaler()\n self.x = self.scaler.fit_transform(self.x)\n self.y = self.train['y']\n\n if len(np.unique(self.y))<50:\n print('Consider using classification, probably not continuos target variable!')\n\n # for picking the best model\n lr = Ridge(max_iter=1500)\n rf = RandomForestRegressor(n_estimators=500, max_depth=20, min_samples_leaf=3,\n max_features='auto', n_jobs=-1)\n svr = SVR(max_iter=-1)\n\n self.models = {'lr': lr, 'rf': rf, 'svr': svr}\n self.scores = {'lr': [], 'rf': [], 'svr': []}\n print('selecting model')\n for i, (train_index, test_index) in enumerate(self.kf.split(self.x, self.y)):\n x_tr, x_val = self.x[train_index], self.x[test_index]\n y_tr, y_val = self.y[train_index], self.y[test_index]\n if len(x_tr)>10000:\n print('reduced train size')\n y_tr.index, y_val.index = range(len(y_tr)), range(len(y_val))\n mask_train = np.random.choice(range(len(x_tr)),size=10000)\n x_tr, y_tr = x_tr[mask_train], y_tr[mask_train]\n for k, model in self.models.items():\n print('fold: ', i+1)\n print('model: ', k)\n model = clone(self.models[k])\n model.fit(x_tr, y_tr)\n p = model.predict(x_val)\n # score = mean_squared_error(y_val, p)\n score = mean_absolute_error(y_val, p)\n self.scores[k] = self.scores[k] + [score]\n\n self.best_score = 9e10\n self.old_score = 9e10\n self.best_model = ''\n self.old_model = ''\n for k, l in self.scores.items():\n mean = np.mean(l)\n if mean < self.best_score:\n self.old_score = self.best_score\n self.old_model = self.best_model\n self.best_score = mean\n self.best_model = k\n print(self.best_model, self.best_score)", "def train():\n pass", "def predict():\n\n predict_cfg = get_predict_args()\n device = get_device()\n print(device)\n\n # load checkpoint\n ckpt_path = find_ckpt_in_directory(predict_cfg.ckpt)\n ckpt = torch.load(ckpt_path, map_location=device)\n best_iter = ckpt[\"best_iter\"]\n cfg = ckpt[\"cfg\"]\n aspect = cfg[\"aspect\"]\n\n for k, v in cfg.items():\n print(\"{:20} : {:10}\".format(k, str(v)))\n\n eval_batch_size = 64\n\n print(\"Loading data\")\n dev_data = list(beer_reader(cfg[\"dev_path\"]))\n test_data = beer_annotations_reader(cfg[\"test_path\"], aspect=aspect)\n\n print(\"dev\", len(dev_data))\n print(\"test\", len(test_data))\n\n print(\"Loading pre-trained word embeddings\")\n vocab = Vocabulary()\n vectors = load_embeddings(cfg[\"embeddings\"], vocab) # required for vocab\n\n # build model\n model = build_model(cfg[\"model\"], vocab, cfg=cfg)\n\n # load parameters from checkpoint into model\n print(\"Loading saved model..\")\n model.load_state_dict(ckpt[\"state_dict\"])\n model.to(device)\n print(\"Done\")\n\n print(model)\n print_parameters(model)\n\n print(\"Evaluating\")\n dev_eval = evaluate_loss(\n model, dev_data, batch_size=eval_batch_size,\n device=device, cfg=cfg)\n test_eval = evaluate_loss(\n model, test_data, batch_size=eval_batch_size,\n device=device, cfg=cfg)\n\n if hasattr(model, \"z\"):\n path = os.path.join(\n cfg[\"save_path\"], \"final_rationales.txt\")\n test_precision, test_macro_prec = evaluate_rationale(\n model, test_data, aspect=aspect, device=device,\n batch_size=eval_batch_size, path=path)\n else:\n test_precision = 0.\n test_macro_prec = 0.\n test_eval[\"precision\"] = test_precision\n test_eval[\"macro_precision\"] = test_macro_prec\n\n dev_s = make_kv_string(dev_eval)\n test_s = make_kv_string(test_eval)\n\n print(\"best model iter {:d} dev {} test {}\".format(\n best_iter, dev_s, test_s))", "def predict(self, model, context, data):\n pass", "def _predict_preproc_model(self, model_cfg, model,):\n model = self._make_model(model_cfg['model_name'], databunch=self._data)\n model.model_param = model_cfg['model_param']\n model.wrapper_params = model_cfg['wrapper_params']\n return(model)", "def train(self):\n\t\tself.model.fit(self.training_data, self.training_labels)", "def train_model(self):\r\n alpha, accuracy_rate = self.select_model()\r\n # Initialize logistic regression with alpha(learning rate)\r\n lg = logisticregression(C=alpha)\r\n # Train the model.\r\n lg.fit(self.training_data, self.target_data)\r\n # Save the trained model as .pkl file.\r\n joblib.dump(value=lg, filename=self.intention_id+'.pkl', compress=1)\r\n print \"Estimated Parameters of Logistic Regression\"\r\n # Estimated parameters of logistic regression.\r\n print lg.get_params()", "def train_full_model(X,y_train):\n scaler = MinMaxScaler()\n x_train = scaler.fit_transform(X)\n\n #chose model\n # model = RandomForestClassifier()\n model = GradientBoostingClassifier()\n\n\n #train\n print(\"train model\")\n tic = time.time()\n model.fit(x_train,y_train)\n tac = time.time()\n print(\"elapsed time\", tac-tic)\n\n #save data\n save_data(model,scaler,x_train,y_train)", "def predict(self, params, exog=None, *args, **kwargs):\n raise NotImplementedError # pragma: no cover", "def fit_predict(self):\n self.classifier = self.model\n self.classifier.fit(self.X_sample, self.y_sample)\n self.y_pred = self.classifier.predict(self.X_test)", "def predict(model, features):\n result = model.predict(features)\n return result", "def fit_model(self):\n logger.info('Fitting model')\n if self.traj_dict is None:\n self.traj_dict = self.get_traj_dict()\n self.model.fit(self.traj_dict.values())", "def my_attack_model(features, labels, mode, params):\n # Create three fully connected layers.\n net = tf.feature_column.input_layer(features, params['feature_columns'])\n\n for units in params['hidden_units']:\n net = tf.layers.dense(net, units=units, activation=tf.nn.relu)\n net = tf.layers.batch_normalization(net, training=True)\n net = tf.nn.relu(net)\n \n # Compute logits (1 per class).\n logits = tf.layers.dense(net, params['n_classes'], activation=None)\n\n #logits = logits + 0.9\n\n # Compute predictions.\n predicted_classes = tf.argmax(logits, 1)\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n 'class_ids': predicted_classes[:, tf.newaxis],\n 'probabilities': tf.nn.softmax(logits),\n 'logits': logits,\n }\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)\n\n # Compute loss.\n loss = tf.losses.sparse_softmax_cross_entropy(labels=tf.argmax(labels,1), logits=logits)\n\n # Compute evaluation metrics.\n accuracy = tf.metrics.accuracy(labels=tf.argmax(labels,1),\n predictions=predicted_classes,\n name='acc_op')\n metrics = {'accuracy': accuracy}\n tf.summary.scalar('accuracy', accuracy[1])\n\n if mode == tf.estimator.ModeKeys.EVAL:\n return tf.estimator.EstimatorSpec(mode, loss=loss, eval_metric_ops=metrics)\n\n # Create training op.\n assert mode == tf.estimator.ModeKeys.TRAIN\n # optimizer = tf.train.ProximalAdagradOptimizer(\n # learning_rate=params['learning_rate'],\n # l2_regularization_strength=0.001\n # )\n optimizer = tf.train.AdamOptimizer(\n learning_rate=tf.train.exponential_decay(\n learning_rate=params['learning_rate'],\n global_step=tf.train.get_global_step(),\n decay_steps=1000,\n decay_rate=0.96)) \n # optimizer = tf.train.RMSPropOptimizer(learning_rate=params['learning_rate'])\n train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())\n return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)", "def model_fn(features, mode, params): # pylint: disable=unused-argument\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"input_type_ids\"]\n # label_ids = features[\"label_ids\"]\n vocab = vocab_list\n vocab_size = len(vocab_list)\n\n is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n\n\n \n # TRAIN\n if not is_predicting:\n\n (loss, predictions, log_probs) = create_model(\n is_predicting, input_ids, input_mask, segment_ids, vocab, vocab_size, bert_config, use_one_hot_embeddings)\n\n train_op = bert.optimization.create_optimizer(\n loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=FLAGS.use_tpu)\n\n # if mode == tf.estimator.ModeKeys.TRAIN:\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n ## else:\n # return tf.estimator.EstimatorSpec(mode=mode,\n # loss=loss,\n # eval_metric_ops=eval_metrics)\n else:\n (predictions, log_probs) = create_model(\n is_predicting, input_ids, input_mask, segment_ids, vocab, vocab_size, bert_config, use_one_hot_embeddings)\n\n predictions = {\n 'probabilities': log_probs,\n 'predictions': predictions\n }\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode, predictions=predictions, scaffold_fn=scaffold_fn)\n\n return output_spec if use_tpu else output_spec.as_estimator_spec()", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n unique_ids = features[\"unique_ids\"]\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n input_type_ids = features[\"input_type_ids\"]\n\n model = modeling.BertModel(config=bert_config,\n is_training=False,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=input_type_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n if mode != tf.estimator.ModeKeys.PREDICT:\n raise ValueError(\"Only PREDICT modes are supported: %s\" % (mode))\n\n tvars = tf.trainable_variables()\n scaffold_fn = None\n (assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars, \n init_checkpoint)\n if use_tpu:\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold() \n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape, init_string)\n\n all_layers = model.get_all_encoder_layers()\n\n predictions = {\"unique_id\": unique_ids}\n for (i, layer_index) in enumerate(layer_indexes):\n predictions[\"layer_output_%d\" % i] = all_layers[layer_index]\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, \n predictions=predictions, \n scaffold_fn=scaffold_fn)\n return output_spec", "def run(self) -> None:\n self.model = self.trainer.train_model(self.model, self.data)", "def train_models(self, clf, silent, feature_names=None, target_names=None, live=False):\n X_train, X_test, y_train, y_test = self.X_train, self.X_test, self.y_train, self.y_test\n t0 = time()\n clf.fit(X_train, y_train)\n train_time = time() - t0\n pred = clf.predict(X_test)\n test_time = time() - t0\n accuracy = metrics.accuracy_score(y_test, pred)\n fbeta = metrics.fbeta_score(y_test, pred,1,labels=self.dataset['label'].unique(),average='weighted')\n name = clf.name[0]\n if False:\n score_stats = f'Model : {name} | Score : {accuracy} | F-beta : {fbeta}'\n print(score_stats)\n\n if self.best_score_ledger[name][0] < accuracy:\n last = self.best_score_ledger[name][0]\n print(name)\n self.best_score_ledger[name] = [accuracy,fbeta]\n score_stats = f'Model : {name} | Score : {accuracy} | F-beta : {fbeta}'\n print(self.stemmer, ' ', self.transform)\n print(score_stats)\n\n if accuracy > self.best_models[name] and last != 0.0 and self.tuning_depth in ['normal','maximal']:\n new_model,score = self.hyperparameter_tuning(name,clf)\n if score > accuracy:\n self.best_score_ledger[name][0] = score\n clf = new_model\n dump(clf, os.path.join(os.getcwd(), self.file_term, 'models', f'{\"_\".join([self.uid_base, name])}'))\n\n\n\n if not silent:\n if hasattr(clf, 'coef_'):\n print(\"dimensionality: %d\" % clf.coef_.shape[1])\n print(\"density: %f\" % density(clf.coef_))\n\n if True and feature_names is not None:\n print(\"top 10 keywords per class:\")\n for i, label in enumerate(target_names):\n top10 = np.argsort(clf.coef_[i])[-10:]\n print(trim(\"%s: %s\" % (label, \" \".join(feature_names[top10]))))\n print()\n\n if True:\n print(\"classification report:\")\n print(metrics.classification_report(y_test, pred,\n target_names=target_names))\n\n if True:\n print(\"confusion matrix:\")\n print(metrics.confusion_matrix(y_test, pred))\n # if no model exists for the current settings, create one by default. Prevents issues if models are deleted.\n elif not os.path.exists(\n os.path.join(os.getcwd(), self.file_term, 'models', f'{\"_\".join([self.uid_base, name])}')):\n dump(clf, os.path.join(os.getcwd(), self.file_term, 'models', f'{\"_\".join([self.uid_base, name])}'))\n clf_descr = str(clf).split('(')[0]\n return clf_descr, accuracy, train_time, test_time", "def train_model(train: Train, tf: Featureset(\"transaction_features\")) -> Any:\n\n # We create the training and label data\n train_df = tf.to_pandas()\n X = train_df.drop([\"transactionId\", \"is_fraud\"], axis=1)\n Y = train_df[\"is_fraud\"]\n\n random_state = 13\n test_size = 0.2\n train.log_parameter(\"random_state\", random_state)\n train.log_parameter(\"test_size\", test_size)\n trainX, testX, trainY, testY = train_test_split(X, Y, test_size=test_size,\n random_state=random_state)\n\n # Here we register input & output of the train. Layer will use\n # this registers to extract the signature of the model and calculate\n # the drift\n train.register_input(trainX)\n train.register_output(trainY)\n\n max_depth = 3\n objective = 'binary:logitraw'\n train.log_parameter(\"max_depth\", max_depth)\n train.log_parameter(\"objective\", objective)\n\n # Train model\n param = {'max_depth': max_depth, 'objective': objective}\n dtrain = xgb.DMatrix(trainX, label=trainY)\n model_xg = xgb.train(param, dtrain)\n\n dtest = xgb.DMatrix(testX)\n preds = model_xg.predict(dtest)\n\n # Since the data is highly skewed, we will use the area under the\n # precision-recall curve (AUPRC) rather than the conventional area under\n # the receiver operating characteristic (AUROC). This is because the AUPRC\n # is more sensitive to differences between algorithms and their parameter\n # settings rather than the AUROC (see Davis and Goadrich,\n # 2006: http://pages.cs.wisc.edu/~jdavis/davisgoadrichcamera2.pdf)\n auprc = average_precision_score(testY, preds)\n train.log_metric(\"auprc\", auprc)\n\n # We return the model\n return model_xg", "def train_model(evidence, labels):\n model = sklearn.neighbors.KNeighborsClassifier(n_neighbors = 1)\n model.fit(evidence,labels)\n return model", "def predict_proba(self):\n ...", "def init_model(self):\n model = Sequential()\n model.add(Dense(units=24, input_dim=self.input_shape[0],\n activation='relu'))\n model.add(Dense(units=24, activation='relu'))\n # We want rewards instead of probability, so use linear here\n model.add(Dense(units=self.output_num, activation='linear'))\n model.compile(loss='mse', optimizer=Adam(lr=self.eta))\n return model", "def train(self) -> Any:\n pass", "def create_model_testing(lyrs, act, opt='Adam', dr=0.2):\n\n # set random seed for reproducibility\n seed(42)\n tf.random.set_seed(42)\n\n # create sequential model\n model = Sequential()\n\n # create first hidden layer\n model.add(Dense(lyrs[0], input_dim=X_train.shape[1], activation=act))\n\n # create additional hidden layers\n for i in range(1,len(lyrs)):\n model.add(Dense(lyrs[i], activation=act))\n\n # add dropout, default is none\n model.add(Dropout(dr))\n\n # create output layer\n model.add(Dense(1, activation=\"sigmoid\")) # output layer\n model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])\n\n return model", "def eval(self):\n self.train(mode=False)", "def predict(self, review):\n raise NotImplementedError", "def forward_train(self, *args, **kwargs):\n pass", "def train_model(self, *args, **kwargs):\n raise NotImplementedError", "def model_fn(features, labels, mode, params):\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n\n # TRAIN and EVAL\n if not is_predicting:\n\n (loss, predicted_labels, log_probs) = ClassifierModel.create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n train_op = bert.optimization.create_optimizer(\n loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False)\n\n # Calculate evaluation metrics.\n def metric_fn(label_ids, predicted_labels):\n accuracy = tf.metrics.accuracy(label_ids, predicted_labels)\n f1_score = tf.contrib.metrics.f1_score(\n label_ids,\n predicted_labels)\n auc = tf.metrics.auc(\n label_ids,\n predicted_labels)\n recall = tf.metrics.recall(\n label_ids,\n predicted_labels)\n precision = tf.metrics.precision(\n label_ids,\n predicted_labels)\n true_pos = tf.metrics.true_positives(\n label_ids,\n predicted_labels)\n true_neg = tf.metrics.true_negatives(\n label_ids,\n predicted_labels)\n false_pos = tf.metrics.false_positives(\n label_ids,\n predicted_labels)\n false_neg = tf.metrics.false_negatives(\n label_ids,\n predicted_labels)\n\n return {\n \"eval_accuracy\": accuracy,\n \"f1_score\": f1_score,\n \"auc\": auc,\n \"precision\": precision,\n \"recall\": recall,\n \"true_positives\": true_pos,\n \"true_negatives\": true_neg,\n \"false_positives\": false_pos,\n \"false_negatives\": false_neg\n }\n\n eval_metrics = metric_fn(label_ids, predicted_labels)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op)\n else:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n eval_metric_ops=eval_metrics)\n else:\n (predicted_labels, log_probs) = ClassifierModel.create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n predictions = {\n 'probabilities': log_probs,\n 'labels': predicted_labels\n }\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)", "def predict(self, instances):\r\n raise NotImplementedError", "def TrainOneStep(self):\n pass", "def setup_to_transfer_learn(model):\n for layer in model.layers:\n layer.trainable = False\n\n #model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def predict(self, **kwargs):\n raise NotImplementedError", "def create_model(self, model_input, vocab_size, num_frames, **unused_params):\n num_frames_t=num_frames\n num_frames = tf.cast(tf.expand_dims(num_frames, 1), tf.float32)\n feature_size = model_input.get_shape().as_list()[2]\n iterations=5#150\n if FLAGS.is_train: \n iterations=120\n model_input = utils.SampleRandomFrames(model_input[:,15:,:], num_frames-15-15,\n iterations)\n # iterations=50\n # model_input=model_input[:,20:-30:5,:]\n model_input=model_input+tf.random_normal(shape=tf.shape(model_input), mean=0.0, stddev=1e-3, dtype=tf.float32)\n\n # print('model_input is', model_input)\n # print('vocab_size is',vocab_size)\n aggregated_model = getattr(video_level_models,\n FLAGS.video_level_classifier_model)\n\n video_attention = AttentionLayers(1024,iterations,256)#256\n audio_attention = AttentionLayers(128,iterations,256/4)#256/4\n\n model_input = slim.batch_norm(\n model_input,\n center=True,\n scale=True,\n is_training=True,\n scope=\"model_input_bn\")\n\n with tf.variable_scope(\"video_Attention\"):\n attention_video = video_attention.forward(model_input[:,:,0:1024]) \n # print('vlad_video is',vlad_video)\n with tf.variable_scope(\"audio_Attention\"):\n attention_audio = audio_attention.forward(model_input[:,:,1024:])\n\n pooled=tf.concat([attention_video,attention_audio],axis=1)\n #instance_att#tf.reduce_mean(pooledi,axis=1)\n\n print('pooled is',pooled)\n\n dr2 = tf.get_variable(\"dr2\",\n [feature_size,1024],\n initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(feature_size)))\n pooled=tf.matmul(pooled,dr2)\n\n pooled = slim.batch_norm(\n pooled,\n center=True,\n scale=True,\n is_training=True,\n scope=\"pooled_bn\")\n\n gating_weights = tf.get_variable(\"gating_weights_2\",\n [1024, 1024],\n initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(1024))) \n gates = tf.matmul(pooled, gating_weights) \n gates = slim.batch_norm(\n gates,\n center=True,\n scale=True,\n is_training=True,\n scope=\"gating_bn\")\n gates = tf.sigmoid(gates)\n pooled = tf.multiply(pooled,gates)\n\n return aggregated_model().create_model(\n model_input=pooled, vocab_size=vocab_size, **unused_params)", "def train_model(X, y, model_type, ngram_type, label_type):\n assert(label_type in ['oh', 'ed'])\n assert(model_type in ['linear', 'mlp'])\n assert(ngram_type in ['word', 'char'])\n\n # tensorflow models aren't fork safe, which means they can't be served via uwsgi\n # as work around, we can serve a pure sklearn model\n # we should be able to find another fix\n\n if label_type == 'oh' and model_type == 'linear':\n\n y = np.argmax(y, axis = 1)\n\n clf = Pipeline([\n ('vect', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('clf', LogisticRegression()),\n ])\n\n params = {\n 'vect__max_features': 10000,\n 'vect__ngram_range': (1,2),\n 'vect__analyzer' : ngram_type,\n 'tfidf__sublinear_tf' : True,\n 'tfidf__norm' :'l2',\n 'clf__C' : 10,\n }\n else:\n if label_type == 'oh':\n y = one_hot(y)\n print(np.unique(y))\n\n clf = Pipeline([\n ('vect', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('to_dense', DenseTransformer()),\n ('clf', KerasClassifier(build_fn=make_mlp, output_dim = y.shape[1], verbose=False)),\n ])\n cv_results = pd.read_csv('cv_results.csv')\n query = \"model_type == '%s' and ngram_type == '%s' and label_type == '%s'\" % (model_type, ngram_type, label_type)\n params = cv_results.query(query)['best_params'].iloc[0]\n params = json.loads(params)\n print(\"parameters\", params)\n return clf.set_params(**params).fit(X,y)", "def model_thresold(X_train,y_train,X_test,t,model,**param):\n if model == 'randomforest':\n rf = RandomForestClassifier(param)\n rf.fit(X_train, y_train)\n y_pred = rf.predict_proba(X_test)\n y_pred = adjusted_classes(y_pred,t)\n else:\n lgb_class = lgb.LGBMClassifier(param)\n lgb_class.fit(X_train,y_train)\n y_pred = lgb_class.predict_proba(X_test)\n y_pred = adjusted_classes(y_pred,t)\n\n return y_pred", "def predict_evidences(self, X):", "def train_model(evidence, labels):\n\n model = KNeighborsClassifier(n_neighbors=1)\n model.fit(evidence, labels)\n \n return model", "def model_fn(features, labels, mode, params):\n logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n masked_lm_positions = features[\"masked_lm_positions\"]\n masked_lm_ids = features[\"masked_lm_ids\"]\n masked_lm_weights = features[\"masked_lm_weights\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n model = getattr(models, model_config.model_name)(config=model_config,\n is_training=is_training)\n _ = model(input_ids, input_mask=input_mask, token_type_ids=segment_ids)\n\n # TODO (@zhaoshenjian.01): check conditional_jit_scope\n # split loss calculation across batch\n batch_splits = train_params.get(\"batch_splits\", 1)\n if batch_splits == 1:\n # sparse_softmax_cross_entropy_with_logits\n masked_lm_output_dict = get_masked_lm_output(model_config,\n model.get_sequence_output(),\n model.get_embedding_table(),\n masked_lm_positions,\n masked_lm_ids,\n masked_lm_weights)\n else:\n # use for large vocab\n masked_lm_output_dict = get_masked_lm_output_split_batch(\n model_config,\n model.get_sequence_output(),\n model.get_embedding_table(),\n masked_lm_positions,\n masked_lm_ids,\n masked_lm_weights,\n batch_splits=batch_splits)\n\n masked_lm_loss = masked_lm_output_dict[\"loss\"]\n\n use_nsp = train_params.get(\"use_nsp\", True)\n if use_nsp:\n next_sentence_labels = features[\"next_sentence_labels\"]\n next_sentence_output_dict = get_next_sentence_output(\n model_config, model.get_pooled_output(), next_sentence_labels)\n next_sentence_loss = next_sentence_output_dict[\"loss\"]\n else:\n next_sentence_loss = 0\n\n total_loss = masked_lm_loss + next_sentence_loss\n\n tvars = tf.compat.v1.trainable_variables()\n # run init\n init_checkpoint = train_params.get(\"init_checkpoint\")\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map,\n initialized_variable_names) = get_assignment_map_from_checkpoint(\n tvars, init_checkpoint)\n tf.compat.v1.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint,\n assignment_map)\n return tf.train.Scaffold()\n scaffold_fn = tpu_scaffold\n logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n logging.info(\" name = {}, shape = {} {}\".format(var.name, var.shape,\n init_string))\n\n # default `bert_decay` lr_scheduler\n lr_params = train_params.get(\n 'lr_scheduler', {\n 'name': 'bert_decay',\n 'learning_rate': 1e-4,\n 'warmup_steps': 10000,\n 'num_train_steps': 1000000\n })\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op, _ = optimizers.create_optimizer(\n loss=total_loss,\n init_lr=lr_params['learning_rate'],\n num_train_steps=lr_params['num_train_steps'],\n num_warmup_steps=lr_params['warmup_steps'])\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n return output_spec\n raise NotImplementedError", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n # tf.logging.info(\"*** Features ***\")\n # for name in sorted(features.keys()):\n # tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n is_real_example = None\n if \"is_real_example\" in features:\n is_real_example = tf.cast(features[\"is_real_example\"], dtype=tf.float32)\n else:\n is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)\n\n is_training = mode == tf.estimator.ModeKeys.TRAIN\n\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings,\n )\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (\n assignment_map,\n initialized_variable_names,\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu\n )\n\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode, loss=total_loss, train_op=train_op, scaffold=scaffold_fn\n )\n elif mode == tf.estimator.ModeKeys.EVAL:\n\n def metric_fn(per_example_loss, label_ids, probabilities, is_real_example):\n\n logits_split = tf.split(probabilities, num_labels, axis=-1)\n label_ids_split = tf.split(label_ids, num_labels, axis=-1)\n # metrics change to auc of every class\n eval_dict = {}\n for j, logits in enumerate(logits_split):\n label_id_ = tf.cast(label_ids_split[j], dtype=tf.int32)\n current_auc, update_op_auc = tf.metrics.auc(label_id_, logits)\n eval_dict[str(j)] = (current_auc, update_op_auc)\n eval_dict[\"eval_loss\"] = tf.metrics.mean(values=per_example_loss)\n return eval_dict\n\n\n eval_metrics = metric_fn(\n per_example_loss, label_ids, probabilities, is_real_example\n )\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metric_ops=eval_metrics,\n scaffold=scaffold_fn,\n )\n else:\n out = {\n \"input_ids\": input_ids,\n \"label_ids\": label_ids,\n }\n all_layers = model.get_all_encoder_layers()\n for (i, layer_index) in enumerate(layer_indexes):\n out[\"layer_output_%d\" % i] = all_layers[layer_index]\n\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode, predictions=out, scaffold=scaffold_fn\n )\n return output_spec", "def __init__(self) :\n self.prediction_ = None", "def __init__(self) :\n self.prediction_ = None", "def __init__(self) :\n self.prediction_ = None", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n\n # TRAIN and EVAL\n if not is_predicting:\n\n (loss, predicted_labels,\n log_probs) = create_model(bert_model_hub, is_predicting,\n input_ids, input_mask, segment_ids,\n label_ids, num_labels)\n\n train_op = bert.optimization.create_optimizer(loss,\n learning_rate,\n num_train_steps,\n num_warmup_steps,\n use_tpu=False)\n\n # Calculate evaluation metrics.\n def metric_fn(label_ids, predicted_labels):\n accuracy = tf.metrics.accuracy(label_ids, predicted_labels)\n f1_score = tf.contrib.metrics.f1_score(label_ids,\n predicted_labels)\n auc = tf.metrics.auc(label_ids, predicted_labels)\n recall = tf.metrics.recall(label_ids, predicted_labels)\n precision = tf.metrics.precision(label_ids, predicted_labels)\n true_pos = tf.metrics.true_positives(label_ids,\n predicted_labels)\n true_neg = tf.metrics.true_negatives(label_ids,\n predicted_labels)\n false_pos = tf.metrics.false_positives(label_ids,\n predicted_labels)\n false_neg = tf.metrics.false_negatives(label_ids,\n predicted_labels)\n return {\n \"eval_accuracy\": accuracy,\n \"f1_score\": f1_score,\n \"auc\": auc,\n \"precision\": precision,\n \"recall\": recall,\n \"true_positives\": true_pos,\n \"true_negatives\": true_neg,\n \"false_positives\": false_pos,\n \"false_negatives\": false_neg\n }\n\n eval_metrics = metric_fn(label_ids, predicted_labels)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op)\n else:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n eval_metric_ops=eval_metrics)\n else:\n (predicted_labels,\n log_probs) = create_model(bert_model_hub, is_predicting,\n input_ids, input_mask, segment_ids,\n label_ids, num_labels)\n\n predictions = {\n 'probabilities': log_probs,\n 'labels': predicted_labels\n }\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)", "def predict():\n to_predict = np.zeros(5).reshape(1, 5)\n features = ['is_male', 'num_interactions_with_cust_service', 'late_on_payment', 'age', 'years_in_contract']\n for i, feat in enumerate(features):\n if request.args.get(feat) is not None:\n to_predict[0][i] = request.args.get(feat)\n\n response = clf2.predict(to_predict)\n\n if response:\n return \"The customer is likely to churn\"\n else:\n return \"He is a loyal customer\"", "def test_fit_predict() -> None:\n mapie = MapieRegressor()\n mapie.fit(X_toy, y_toy)\n mapie.predict(X_toy)", "def train_model(evidence, labels):\n model = KNeighborsClassifier(n_neighbors=1)\n #evidence is all TRAINING data, so we need to fit our classifier\n #to it\n model.fit(evidence, labels)\n return model\n #raise NotImplementedError", "def build_model(train_inputs,train_labels,model_params,model_mode='classification',\n model_type='naive_bayes'):\n if model_mode == \"classification\":\n if model_type == \"naive_bayes\":\n model = GaussianNB()\n if model_type == \"knn\":\n model = KNeighborsClassifier(n_neighbors=50)\n if model_type == \"svm\":\n model = SVC(kernel='poly', degree =27, coef0 =1, C=5)\n if model_type == \"decision_tree\":\n model = DecisionTreeClassifier(min_samples_split=45,min_samples_leaf=45,criterion=\"gini\")\n #model = RandomForestClassifier(n_estimators=500, n_jobs=-1)\n\n if model_mode == \"regression\":\n if model_type == \"knn\":\n model = KNeighborsRegressor()\n if model_type == \"svm\":\n model = SVR()\n if model_type == \"decision_tree\":\n model = DecisionTreeRegressor()\n\n\n model.fit(train_inputs, train_labels)\n # for name, score in zip(train_inputs.columns,model.feature_importances_):\n # print(name, score)\n\n return model", "def predict_api():\n pass", "def set_vanilla_model(self):\n logging.debug(\"Setting vanilla model\")\n # Build model\n\n ## Embedding Layer\n word_embedding_layer = self.embed_word()\n pos_embedding_layer = self.embed_pos()\n\n ## Deep layers\n latent_layers = self.stack_latent_layers(self.num_of_latent_layers)\n\n ## Dropout\n dropout = Dropout(self.pred_dropout)\n\n ## Prediction\n predict_layer = self.predict_classes()\n\n ## Prepare input features, and indicate how to embed them\n inputs_and_embeddings = [(Input(shape = (self.sent_maxlen,),\n dtype=\"int32\",\n name = \"word_inputs\"),\n word_embedding_layer),\n (Input(shape = (self.sent_maxlen,),\n dtype=\"int32\",\n name = \"predicate_inputs\"),\n word_embedding_layer),\n (Input(shape = (self.sent_maxlen,),\n dtype=\"int32\",\n name = \"postags_inputs\"),\n pos_embedding_layer),\n ]\n\n ## Concat all inputs and run on deep network\n output = predict_layer(dropout(latent_layers(merge([embed(inp)\n for inp, embed in inputs_and_embeddings],\n mode = \"concat\",\n concat_axis = -1))))\n\n # Build model\n self.model = Model(input = map(itemgetter(0), inputs_and_embeddings),\n output = [output])\n\n # Loss\n self.model.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['categorical_accuracy'])\n self.model.summary()\n\n # Save model json to file\n self.save_model_to_file(os.path.join(self.model_dir, \"model.json\"))", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n unique_ids = features[\"unique_ids\"]\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n logits = create_model(\n bert_config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n \"unique_ids\": unique_ids,\n \"prediction\": tf.argmax(logits, axis=-1),\n }\n output_spec = tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n else:\n def compute_loss(logits, positions):\n one_hot_positions = tf.one_hot(\n positions, depth=3, dtype=tf.float32)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n loss = -tf.reduce_mean(\n tf.reduce_sum(one_hot_positions * log_probs, axis=-1))\n return loss\n\n label = features[\"label\"]\n\n loss = compute_loss(logits, label)\n predicted_classes = tf.argmax(logits, axis=-1)\n accuracy = tf.metrics.accuracy(labels=label, predictions=predicted_classes, name='acc_op')\n\n # global global_acc_list\n # global_acc_list.append(accuracy)\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(\n loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n metrics = {'accuracy': accuracy}\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n loss=loss,\n eval_metric_ops=metrics,\n train_op=train_op)\n elif mode == tf.estimator.ModeKeys.EVAL:\n metrics = {'accuracy': accuracy}\n output_spec = tf.estimator.EstimatorSpec(mode, loss=loss, eval_metric_ops=metrics)\n else:\n raise ValueError(\n \"Only TRAIN and PREDICT modes are supported: %s\" % (mode))\n\n return output_spec", "def test_no_parameters_gets_error_message(self):\n res = predict_model.predict()\n assert res == self.err_msg", "def predict_random_forest(X_test, model):", "def easy_drive():\n model = Sequential()\n model.add(Dense(10, activation=\"relu\",input_dim=2))\n model.add(Dense(10, activation=\"relu\"))\n #model.add(Dropout(0.9))\n model.add(Dense(2))\n model.compile(optimizer=\"adam\", loss=\"mse\", metrics=['accuracy'])\n return model", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n input_mask = features[\"input_mask\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n is_prediction = (mode == tf.estimator.ModeKeys.PREDICT)\n\n (total_loss, per_example_loss, logits, probabilities) = create_model(\n bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,\n num_labels, use_one_hot_embeddings, is_prediction)\n\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, False)\n\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n )\n elif mode == tf.estimator.ModeKeys.EVAL:\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n accuracy = tf.metrics.accuracy(\n labels=label_ids, predictions=predictions)\n loss = tf.metrics.mean(values=per_example_loss)\n eval_metrics = {\n \"eval_accuracy\": accuracy,\n \"eval_loss\": loss,\n }\n\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metric_ops=eval_metrics\n )\n else:\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n predictions={\"probabilities\": probabilities},\n export_outputs={'predict': tf.estimator.export.PredictOutput(outputs=probabilities)}\n )\n return output_spec", "def __setup_model(self, **kwargs):\n self.model_architecture = kwargs['model_architecture'].upper()\n self.model = Classifier.IMAGENET_MODELS[self.model_architecture](\n pretrained=True\n )\n\n if 'input_size' in kwargs: # Loading from a checkpoint\n self.input_size = kwargs['input_size']\n self.model.current_epoch = kwargs['current_epoch']\n\n else: # No checkpoint, will be creating a new classifier for the model\n # The number of features coming from the feature detector CNN\n if 'ALEXNET' in self.model_architecture:\n self.input_size = self.model.classifier[1].in_features\n elif 'VGG' in self.model_architecture:\n self.input_size = self.model.classifier[0].in_features\n elif 'DENSENET' in self.model_architecture:\n self.input_size = self.model.classifier.in_features\n\n # Freeze the feature detector parameters to prevent backpropagating\n # through them.\n for param in self.model.parameters():\n param.requires_grad = False\n\n self.model.current_epoch = 1\n\n self.output_size = kwargs['output_size']\n self.hidden_layers = kwargs['hidden_layers']\n self.learn_rate = kwargs['learn_rate']\n self.drop_p = kwargs['drop_p']\n\n self.model.class_to_idx = kwargs['class_to_idx']\n self.model.classifier = Network(self.input_size,\n self.output_size,\n self.hidden_layers,\n self.drop_p)\n\n if 'model_state_dict' in kwargs: # load the state from checkpoint\n self.model.load_state_dict(kwargs['model_state_dict'])\n\n self.criterion = nn.NLLLoss()\n self.optimizer = optim.Adam(self.model.classifier.parameters(),\n lr=self.learn_rate)\n\n if 'optimizer_state_dict' in kwargs: # load the state from checkpoint\n self.optimizer.load_state_dict(kwargs['optimizer_state_dict'])", "def test_fit_predict() -> None:\n mapie = MapieClassifier()\n mapie.fit(X_toy, y_toy)\n mapie.predict(X_toy)", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n\n # TRAIN and EVAL\n if not is_predicting:\n\n (loss, predicted_labels, log_probs) = create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n train_op = create_optimizer(\n loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False)\n\n # Calculate evaluation metrics.\n def metric_fn(label_ids, predicted_labels):\n accuracy = tf.compat.v1.metrics.accuracy(label_ids, predicted_labels)\n #f1_score = tf.contrib.metrics.f1_score(\n # label_ids,\n # predicted_labels)\n #auc = tf.metrics.auc(\n # label_ids,\n # predicted_labels)\n #recall = tf.metrics.recall(\n # label_ids,\n # predicted_labels)\n #precision = tf.metrics.precision(\n # label_ids,\n # predicted_labels)\n #true_pos = tf.metrics.true_positives(\n # label_ids,\n # predicted_labels)\n #true_neg = tf.metrics.true_negatives(\n # label_ids,\n # predicted_labels)\n #false_pos = tf.metrics.false_positives(\n # label_ids,\n # predicted_labels)\n #false_neg = tf.metrics.false_negatives(\n # label_ids,\n # predicted_labels)\n return {\n \"eval_accuracy\": accuracy,\n # \"f1_score\": f1_score,\n #\"auc\": auc,\n # \"precision\": precision,\n # \"recall\": recall,\n # \"true_positives\": true_pos,\n # \"true_negatives\": true_neg,\n # \"false_positives\": false_pos,\n # \"false_negatives\": false_neg\n }\n\n eval_metrics = metric_fn(label_ids, predicted_labels)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op)\n else:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n eval_metric_ops=eval_metrics)\n else:\n (predicted_labels, log_probs) = create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n predictions = {\n 'probabilities': log_probs,\n 'labels': predicted_labels\n }\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)", "def train_model(args, train_exs: List[SentimentExample]) -> SentimentClassifier:\n indexer = Indexer()\n stop_words = set(stopwords.words('english'))\n punkt = (',', '.', '...', '?', '\\'', '\\'\\'', '!', ':', ';')\n # Initialize feature extractor\n if args.model == \"TRIVIAL\":\n feat_extractor = None\n elif args.feats == \"UNIGRAM\":\n # Generate vocabulary\n for ex in train_exs:\n for word in ex.words:\n if word.lower() not in stop_words and word.lower() not in punkt:\n indexer.add_and_get_index(word.lower())\n feat_extractor = UnigramFeatureExtractor(indexer)\n elif args.feats == \"BIGRAM\":\n # Generate vocabulary\n for ex in train_exs:\n for i in range(0, len(ex.words) - 1):\n if stop_words.__contains__(ex.words[i]) and stop_words.__contains__(ex.words[i + 1]) or (\n punkt.__contains__(ex.words[i]) or punkt.__contains__(ex.words[i + 1])):\n continue\n bigram = ex.words[i] + ' ' + ex.words[i + 1]\n indexer.add_and_get_index(bigram.lower())\n feat_extractor = BigramFeatureExtractor(indexer)\n elif args.feats == \"BETTER\":\n # Generate vocabulary\n cnt = Counter()\n for ex in train_exs:\n cnt.update(\n word.lower() for word in ex.words if word.lower() not in stop_words and word.lower() not in punkt)\n cnt = dict(cnt.most_common(int(cnt.__len__() * 0.75)))\n for keys in cnt.keys():\n indexer.add_and_get_index(keys)\n feat_extractor = BetterFeatureExtractor(indexer)\n else:\n raise Exception(\"Pass in UNIGRAM, BIGRAM, or BETTER to run the appropriate system\")\n\n # Train the model\n if args.model == \"TRIVIAL\":\n model = TrivialSentimentClassifier()\n elif args.model == \"PERCEPTRON\":\n model = train_perceptron(train_exs, feat_extractor)\n elif args.model == \"LR\":\n model = train_logistic_regression(train_exs, feat_extractor)\n else:\n raise Exception(\"Pass in TRIVIAL, PERCEPTRON, or LR to run the appropriate system\")\n return model", "def baseline_model(optimizer='rmsprop', init='glorot_uniform', dropout=0.2):\n model = keras.models.Sequential()\n model.add(Dropout(dropout, input_shape=(12,)))\n model.add(Dense(12, input_dim=12, kernel_initializer=init, activation='relu'))\n model.add(Dropout(dropout))\n model.add(Dense(1, kernel_initializer=init))\n # Compile model\n model.compile(loss='mean_squared_error', optimizer=optimizer)\n return model", "def predict():\n\n\n json_payload = request.json\n #LOG.info(f\"JSON payload: %s\" %json_payload)\n inference_payload = pd.DataFrame(json_payload)\n #LOG.info(\"inference payload DataFrame: %s\" %inference_payload)\n scaled_payload = scale(inference_payload)\n prediction = list(clf.predict(scaled_payload))\n return jsonify({'prediction': prediction})", "def predict(self, X):", "def predict(self, X):", "def train(self, x={}, **kwargs):\n return 0", "def predict():\n import trace\n trace.predict()", "def modelo(hidden_layers=[1], activation='tanh',features=1, \r\n beta_1=0.9, beta_2=0.999,lr=0.001, decay=1e-6, dropout=0):\r\n \r\n input_layer = layers.Input(shape=(features,))\r\n vmiddle = layers.Dense(hidden_layers[0], \r\n kernel_initializer='random_uniform')(input_layer)\r\n vmiddle = layers.Activation(activation)(vmiddle)\r\n vmiddle = layers.Dropout(dropout)(vmiddle)\r\n \r\n if len(hidden_layers) != 1:\r\n\r\n for item in range(1,len(hidden_layers)):\r\n vmiddle = layers.Dense(hidden_layers[item], \r\n kernel_initializer='random_uniform')(vmiddle)\r\n vmiddle = layers.Activation(activation)(vmiddle)\r\n vmiddle = layers.Dropout(dropout)(vmiddle)\r\n vmiddle =layers.Dense(1, kernel_initializer='random_uniform')(vmiddle)\r\n vexit =layers.Activation('sigmoid')(vmiddle)\r\n \r\n else:\r\n vmiddle =layers.Dense(1, kernel_initializer='random_uniform')(vmiddle)\r\n vexit =layers.Activation('sigmoid')(vmiddle)\r\n\r\n model = models.Model(inputs=input_layer, outputs=vexit)\r\n model.compile(loss='binary_crossentropy', \r\n optimizer=optimizer.Adam(beta_1=beta_1, beta_2=beta_2, lr=lr, decay=decay,), \r\n metrics=['accuracy'])\r\n \r\n return model", "def test(self) -> None:\n\n self._predictions = self._lr.predict(self._X_test)", "def predict(self, X):\n ...", "def predict(self, X):\n ...", "def predict(self, X):\n ...", "def train():\n # YOUR TRAINING CODE GOES HERE", "def create_model(self):\n self.model = None\n pass", "def create_model(self, model_input, vocab_size, num_frames, **unused_params):\n num_frames_t=num_frames\n num_frames = tf.cast(tf.expand_dims(num_frames, 1), tf.float32)\n feature_size = model_input.get_shape().as_list()[2]\n iterations=5#150\n attention_size=8\n if FLAGS.is_train: \n iterations=120\n model_input = utils.SampleRandomFrames(model_input[:,15:,:], num_frames-15-15,\n iterations)\n model_input=model_input+tf.random_normal(shape=tf.shape(model_input), mean=0.0, stddev=1e-3, dtype=tf.float32)\n\n aggregated_model = getattr(video_level_models,\n FLAGS.video_level_classifier_model)\n\n video_attention = MultiAttentionLayers(1024,iterations,256,attention_size)#256\n audio_attention = MultiAttentionLayers(128,iterations,256/4,attention_size)#256/4\n\n model_input = slim.batch_norm(\n model_input,\n center=True,\n scale=True,\n is_training=True,\n scope=\"model_input_bn\")\n\n with tf.variable_scope(\"video_Attention\"):\n attention_video = video_attention.forward(model_input[:,:,0:1024]) \n with tf.variable_scope(\"audio_Attention\"):\n attention_audio = audio_attention.forward(model_input[:,:,1024:])\n\n pooled=tf.concat([attention_video,attention_audio],axis=1)\n #instance_att#tf.reduce_mean(pooledi,axis=1)\n\n print('pooled is',pooled)\n pooled=tf.reshape(tf.transpose(pooled,perm=[0,2,1]),[-1,1152])\n dr2 = tf.get_variable(\"dr2\",\n [feature_size,1024],\n initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(feature_size)))\n pooled=tf.matmul(pooled,dr2)\n\n pooled = slim.batch_norm(\n pooled,\n center=True,\n scale=True,\n is_training=True,\n scope=\"pooled_bn\")\n\n gating_weights = tf.get_variable(\"gating_weights_2\",\n [1024, 1024],\n initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(1024))) \n gates = tf.matmul(pooled, gating_weights) \n gates = slim.batch_norm(\n gates,\n center=True,\n scale=True,\n is_training=True,\n scope=\"gating_bn\")\n gates = tf.sigmoid(gates)\n pooled = tf.multiply(pooled,gates)\n\n results_temp=aggregated_model().create_model(\n model_input=pooled, vocab_size=vocab_size, **unused_params)\n results_temp['predictions']=tf.reduce_max(tf.reshape(results_temp['predictions'],[-1,attention_size,vocab_size]),axis=1)\n print(results_temp)\n return results_temp" ]
[ "0.7398218", "0.6274821", "0.60658264", "0.6061463", "0.6021806", "0.5954597", "0.5936909", "0.5917042", "0.59065545", "0.58827984", "0.5858222", "0.5814856", "0.5814105", "0.57477957", "0.5726169", "0.57161635", "0.5714358", "0.57077813", "0.568969", "0.56806886", "0.5669478", "0.5660518", "0.5647439", "0.56463206", "0.5640529", "0.56283927", "0.5618574", "0.561189", "0.5604516", "0.5603766", "0.56016314", "0.5597239", "0.55781496", "0.5577374", "0.55642563", "0.5559108", "0.5553547", "0.5550293", "0.5549026", "0.5548808", "0.5544032", "0.55422044", "0.553977", "0.5538755", "0.55355847", "0.55338067", "0.5533475", "0.5530844", "0.5530708", "0.5527729", "0.5526267", "0.5525381", "0.551965", "0.55017745", "0.5501308", "0.5501308", "0.5501308", "0.5501308", "0.5501308", "0.5500578", "0.5496427", "0.54946333", "0.5487868", "0.5482897", "0.5482708", "0.54789346", "0.54679203", "0.5465709", "0.5465709", "0.5465709", "0.5464189", "0.54623103", "0.5458813", "0.54586154", "0.5457345", "0.5447497", "0.54433566", "0.543973", "0.54392385", "0.5438526", "0.543781", "0.54368144", "0.54350793", "0.54340065", "0.54335505", "0.5430232", "0.5427608", "0.5421746", "0.54201853", "0.54201853", "0.5418464", "0.5417692", "0.54167986", "0.54160714", "0.5414119", "0.5414119", "0.5414119", "0.54139024", "0.54138327", "0.5411024" ]
0.5549601
38
Model with multiple features.Makes a prediction with an accuracy of at least 90%
def predictions_3(data): predictions=[] for _,passenger in data.iterrows(): if passenger['Sex']=='female' or passenger['Sex']=='male' and passenger['Age']<16 and passenger['SibSp']<2: predictions.append(1) else: predictions.append(0) #Return our predictions return pd.Series(predictions)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict_only(self):", "def trainModel( self, featureTrain, classTrain):", "def predict(self, model, arg):\n prediction = model.predict(arg)\n\n return prediction\n\n #def getAccuracyScore(self, n_splits):\n \"\"\"\n Gives an cross-validated accuracy score for the new model.\n\n Inputs:\n n_splits: number of sets to split the data into\n\n Returns:\n score: the accuracy score of the model.\n \"\"\"", "def predict(model, features):\n result = model.predict(features)\n return result", "def predict(self, features):\n return self.search_results.predict(features)", "def train_model(self, model, hyperparameter_dict, feature_col):\n if model == 'random_forest':\n clf = RandomForestClassifier(max_depth=hyperparameter_dict['depth'], n_estimators = hyperparameter_dict['tree_num'], random_state = 2021)\n elif model == 'XGBoost':\n clf = XGBClassifier(objective='binary:logistic', random_state=2021, max_depth = hyperparameter_dict['depth'], n_estimators = hyperparameter_dict['tree_num'])\n elif model == 'gbt':\n clf = GradientBoostingClassifier(n_estimators = hyperparameter_dict['tree_num'], max_depth = hyperparameter_dict['depth'], random_state = 2021)\n else:\n print(f'please enter model among [\"random_forest\", \"XGBoost\", \"gbt\"]')\n # return\n X_train = self.get_train_X()[feature_col]\n y_train = self.get_train_y()\n X_val = self.get_val_X()[feature_col]\n y_val = self.get_val_y()\n X_test = self.get_test_X()[feature_col]\n y_test = self.get_test_y()\n clf.fit(X_train, y_train)\n now_depth = hyperparameter_dict['depth']\n now_tree_num = hyperparameter_dict['tree_num']\n print(f'depth is : {now_depth}, tree_num : {now_tree_num}')\n\n train_result = clf.predict_proba(X_train)\n train_result = train_result[:,1]\n fpr, tpr, thresholds = metrics.roc_curve(y_train, train_result)\n print(f'train auc : {metrics.auc(fpr, tpr)}')\n\n val_result = clf.predict_proba(X_val)\n val_result = val_result[:,1]\n fpr, tpr, thresholds = metrics.roc_curve(y_val, val_result)\n print(f'validation auc : {metrics.auc(fpr, tpr)}')\n\n test_result = clf.predict_proba(X_test)\n test_result = test_result[:,1]\n fpr, tpr, thresholds = metrics.roc_curve(y_test, test_result)\n print(f'Test auc : {metrics.auc(fpr, tpr)}')\n \"\"\"\n plot aoc curve and lift chart\n \"\"\"\n self.plot_roc_graph(clf, feature_col)\n self.set_model(clf)\n score_list = pd.Series(test_result, name='score').to_frame().reset_index(drop=True)\n test_key = self.get_test()[['idd', 'ft_data_dt']].reset_index(drop=True)\n test = pd.concat([test_key, score_list], axis = 1)\n self.set_final_score(test)\n \n self.plot_lift_chart(test_result, y_test.to_numpy(), 20, 1)\n print(f'bin of score from infected patients')\n self.plot_lift_chart(test_result, y_test.to_numpy(), 20, 0)\n print(f'bin of score from non-infected patients')\n print('')\n # save model\n filename = model + '.sav'\n print(f'save model to {filename}')\n pickle.dump(clf, open(filename, 'wb'))\n return clf, filename", "def predict_random_forest(X_test, model):", "def trainAndPredict(self):\r\n print(\"train\")\r\n filename= 'finalized_model.sav'\r\n # train the algorithm on training data and predict using the testing data\r\n model = self.svc_model.fit(self.X.T, self.Y)\r\n pickle.dump(model, open(filename, 'wb'))\r\n #model = pickle.load(open(filename, 'rb'))\r\n pred1 =model.predict(self.TestSet.T)\r\n # print the accuracy score of the model\r\n print(\"LinearSVC accuracy : \", accuracy_score(self.TestSetY, pred1, normalize=True))", "def train(self):\n y_list = []\n y_hat_list = []\n for ex_dict in ut.EXAMPLES_LIST:\n y_list.append(ex_dict[1])\n y_hat_list.append(self.predict(ex_dict[0]))\n\n acc = ut.compute_accuracy(y_hat_list, y_list)\n return acc", "def train_model_for_shap(allFeatures, train_ml, test_ml, df_ml, classification_model, language_model, fold):\n # list of analyzed language models\n model = classification_model\n print(type(model).__name__)\n\n features = set(allFeatures[language_model][fold])\n preds = []\n trues = []\n\n train_index = train_ml[fold]\n test_index = test_ml[fold]\n\n train_data = df_ml[features].iloc[train_index]\n target_train_data = df_ml[\"target_ml\"].iloc[train_index]\n test_data = df_ml[features].iloc[test_index]\n target_test_data = df_ml.iloc[test_index][\"target_ml\"]\n model.fit(train_data, target_train_data)\n\n preds.append(model.predict(test_data).tolist())\n trues.append(target_test_data.tolist())\n\n print(language_model)\n mcc = metrics.matthews_corrcoef(y_true=sum(trues, []), y_pred=sum(preds, []))\n f1 = metrics.f1_score(y_true=sum(trues, []), y_pred=sum(preds, []), average=\"weighted\")\n print(\"MCC: \", round(mcc, 3))\n print(\"F1: \", round(f1, 3))\n return model, train_data, test_data", "def classification(self,a_train,a_test,c_train,c_test,classifier):\n le =LabelEncoder()\n le.fit(c_train)\n c_train = le.transform(c_train)\n c_test = le.transform(c_test)\n if classifier==\"GNB\": #Gaussian Naive Bayes\n gnb = GaussianNB()\n gnb.fit(a_train, c_train)\n c_pred = gnb.predict(a_test)\n elif classifier==\"DT\": #Decision Tree\n dt=DecisionTreeClassifier()\n dt.fit(a_train, c_train)\n c_pred = dt.predict(a_test)\n elif classifier==\"KNN\": #K-Next-Neighbors\n kn=KNeighborsClassifier(n_neighbors=5)\n kn.fit(a_train, c_train)\n c_pred = kn.predict(a_test)\n elif classifier==\"RF\": #Random Forest\n rf=RandomForestClassifier()\n rf.fit(a_train, c_train)\n c_pred = rf.predict(a_test)\n elif classifier==\"SVC\": # Support Vector Classifier\n \"\"\"\n SVC needs normalisation of Feature Values to scale of [-1,1] or [0,1] depending on sign of them\n \"\"\"\n if a_train.min()<0:\n mms = MinMaxScaler(feature_range=(-1,1))\n else:\n mms = MinMaxScaler()\n mms.fit(a_train)\n a_train = mms.transform(a_train)\n a_test = mms.transform(a_test)\n svc=SVC(cache_size=2000,C=1, probability=True,kernel='rbf')\n svc.fit(a_train,c_train)\n #c_pred = svc.predict(a_test) did not work, that's why it is predicted manual\n new_prob = svc.predict_proba(a_test)\n samples=new_prob.shape[0]\n c_pred= np.array\n for k in range(samples):\n c_pred=np.append(c_pred,new_prob[k].argmax())\n c_pred = c_pred[1:samples+1]\n elif classifier==\"DC\": #Dummy Classifier\n dc=DummyClassifier(strategy=\"uniform\")\n dc.fit(a_train, c_train)\n c_pred = dc.predict(a_test)\n elif classifier==\"GMM\": #Gaussian Mixture Modell\n #number of existing classes get passed to the GMM (n_classes)\n n_classes_train = len(np.unique(c_train))\n n_classes_test = len(np.unique(c_test))\n if n_classes_train>n_classes_test:\n n_classes = n_classes_train\n else:\n n_classes = n_classes_test\n #init_params='', because initial values get calculated manual\n gmm = GMM(n_components=n_classes,init_params='')\n #array of feature values of class i get extracted for further process\n gmm.means_=np.array([a_train[c_train==i,:].mean(axis=0) for i in xrange(n_classes)])\n gmm.weights_=np.array([a_train[c_train==i,:].shape[0]/float(c_train.shape[0]) for i in xrange(n_classes)])\n \n gmm_covars = np.zeros((a_train.shape[1]))\n for i in xrange(n_classes):\n valuesOfClassi = a_train[c_train==i,:]\n valuesOfClassi = np.asarray(valuesOfClassi).T\n matrixOfCov = np.cov(valuesOfClassi)+gmm.min_covar*np.eye(valuesOfClassi.shape[0])\n variance = np.array([matrixOfCov[j,j] for j in xrange(matrixOfCov.shape[0])])\n gmm_covars=np.vstack((gmm_covars,variance))\n gmm_covars=gmm_covars[1:,:] #deletes initial row with zeros\n \n gmm.covars_=gmm_covars\n c_pred = gmm.predict(a_test)\n \n c_pred=le.inverse_transform(c_pred)\n return c_pred", "def make_prediction(self, features):\r\n model = load_model(self.deep_neural_network_model, compile = True)\r\n prediction = model.predict(features)\r\n\r\n if prediction > 10:\r\n return 10\r\n else:\r\n return prediction", "def predict(self, predPoints=None):", "def ensemble_predictions(sampled_x,sampled_y,x_to_predict,top_models_name,top_models_params,n_to_average):\n preds=[None for _ in range(n_to_average)]\n for i in tqdm(range(n_to_average),desc=\"Ensemble models training\",\n position=0,total=n_to_average,\n leave=False, disable=False):\n major_model,specific_model=top_models_name[i].split(';')\n model = MldeModel(major_model, specific_model,\n model_params=top_models_params[i],\n training_params=DEFAULT_TRAINING_PARAMS[major_model],\n eval_metric=mse)\n model.train(sampled_x,sampled_y)\n preds[i],_= model.predict(x_to_predict)\n preds=np.array(preds)\n # preds=np.mean(preds,axis=0)\n return preds", "def train_full_model(X,y_train):\n scaler = MinMaxScaler()\n x_train = scaler.fit_transform(X)\n\n #chose model\n # model = RandomForestClassifier()\n model = GradientBoostingClassifier()\n\n\n #train\n print(\"train model\")\n tic = time.time()\n model.fit(x_train,y_train)\n tac = time.time()\n print(\"elapsed time\", tac-tic)\n\n #save data\n save_data(model,scaler,x_train,y_train)", "def predict(self, X):", "def predict(self, X):", "def train_accuracy(self):\n # Train accuarcy\n add = np.ones(len(self.X_train))\n X_add1 = np.c_[add, self.X_train]\n pred_train = np.dot(X_add1, self.w_result.T)\n pred_train[pred_train > 0] = 1\n pred_train[pred_train < 0] = 0\n print(pred_train)\n train_check_lable = np.isclose(pred_train, self.y_train)\n num_true_lable = np.sum(train_check_lable)\n num_all_lable = np.size(train_check_lable)\n train_accuracy = num_true_lable / num_all_lable\n print(\"train_accuracy is: %f\" %train_accuracy)\n return train_accuracy", "def calculatePrediction(self, a, X_train,x, t_train):\n pass", "def buildAndTrain(trainingData):\n\tname = trainingData.drop(['count', 'casual', 'registered'], axis=1).columns\n\ttarget = trainingData['count'].values\n\tfeature = trainingData.drop(['count', 'casual', 'registered'], axis=1).values\n\t# feature scaling\n\tfeature_scaled = preprocessing.scale(feature)\n\t# 0.5 cross validate\n\tcv = cross_validation.ShuffleSplit(len(feature_scaled), n_iter=5, test_size=0.2, random_state=0)\n\t# build model, then training and get accuracy of it\n\tprint('\\n---------岭回归结果--------\\n')\n\tfor train, test in cv:\n\t\tregLR = linear_model.Ridge().fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tregLR.score(feature_scaled[train], target[train]),\n\t\t regLR.score(feature_scaled[test], target[test])))\n\tprint('\\n---------svm结果--------\\n')\n\tfor train, test in cv:\n\t\tregSvm = svm.SVR().fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregSvm.score(feature_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregSvm.score(feature_scaled[test], target[test])))\n\tprint('\\n---------随机森林结果--------\\n')\n\tfor train, test in cv:\n\t\tregRF = RandomForestRegressor(n_estimators=100).fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRF.score(feature_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRF.score(feature_scaled[test], target[test])))\n\t# reduce some low correction feature\n\tfeatureReduced = trainingData.drop(['count', 'casual', 'registered', 'holiday', 'workingday', 'day'], axis=1).values\n\tfeatureReduced_scaled = preprocessing.scale(featureReduced)\n\tprint('\\n---------减少特征维度以避免过拟合后的随机森林结果--------\\n')\n\tfor train, test in cv:\n\t\tregRFImpr = RandomForestRegressor(n_estimators=100).fit(featureReduced_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFImpr.score(featureReduced_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFImpr.score(featureReduced_scaled[test], target[test])))\n\t# use grid search algorithm to improve random forest regression\n\tX_train, X_test, y_train, y_test = cross_validation.train_test_split(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeature_scaled, target, test_size=0.2, random_state=0)\n\ttuned_parameters = [{'n_estimators': [10,100,500], 'max_depth': [2,3,4,5,6,7,8,9,10]}]\n\tscores = ['r2']\n\n\tfor score in scores:\n\t\tprint(score)\n\t\tclf = GridSearchCV(RandomForestRegressor(), tuned_parameters, cv=5, scoring=score)\n\t\tclf.fit(X_train, y_train)\n\t\tprint(clf.best_estimator_)\n\t\tprint('each parameter combination is ')\n\t\tfor params, mean_score, scores in clf.grid_scores_:\n\t\t\tprint('{0:.3f} (+/-{1:.03f}) for {2}'.format(mean_score, scores.std()/2, params))\n\n\tprint('--------最优参数下的随机森林结果--------')\n\tfor train, test in cv:\n\t\tregRFBest = RandomForestRegressor(n_estimators=100, max_depth=10).fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFBest.score(feature_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFBest.score(feature_scaled[test], target[test])))\n\treturn regRFBest, feature_scaled, target", "def get_model_accuracy(self, features, labels):\n features_prediction = self._model.predict(features)\n accuracy = accuracy_score(features_prediction, labels)\n return accuracy", "def predict(self, instances):\r\n raise NotImplementedError", "def predict_all_features(input_data=\"not defined\"):\r\n X, y = splitting.get_x_and_y()\r\n output_dataframe = pd.DataFrame\r\n y_pred_dataframe = pd.DataFrame\r\n for actual_y in y:\r\n X_train, X_test, y_train, y_test = splitting.splitting_data(y=actual_y)\r\n y_pred, predicted_units = linear_regresstion_action(X_train, X_test, y_train, y_test, input_data)\r\n # not sure if scores[actual_y.name] works as well or even scores[actual_y]...\r\n # one need to test if input data is final\r\n output_dataframe[f\"{actual_y.name}\"] = predicted_units\r\n y_pred_dataframe[f\"{actual_y.name}\"] = y_pred\r\n return y_pred_dataframe, output_dataframe", "def train_predict_and_results(data, clf):\n tra_x, tst_x, tra_y, tst_y = data\n clf.fit(tra_x, tra_y)\n prd_y = clf.predict(tst_x)\n cnf = confusion_matrix(tst_y, prd_y)\n print (\"Classifier: %s \\tAccuracy score:%7.2f %%\"\n \"\\tTN:%5d FP:%5d FN:%5d TP:%5d\"\n % (clf.name, accuracy_score(tst_y, prd_y) * 100,\n cnf[0][0], cnf[0][1], cnf[1][0], cnf[1][1]))", "def train_model(classifier, X_train, y_train, X_test, y_test):\n\n # fit the training dataset on the classifier\n classifier.fit(X_train, y_train)\n \n # predict the labels on test dataset\n predictions = classifier.predict(X_test)\n \n return metrics.accuracy_score(predictions, y_test), metrics.confusion_matrix(predictions, y_test)", "def train_model(X_train, y_train, X_test, y_test, classifier, **kwargs):\r\n \r\n # instantiate model\r\n model = classifier(**kwargs)\r\n \r\n # train model\r\n model.fit(X_train,y_train)\r\n \r\n # check accuracy and print out the results\r\n fit_accuracy = model.score(X_train, y_train)\r\n test_accuracy = model.score(X_test, y_test)\r\n \r\n print(f\"Train accuracy: {fit_accuracy:0.2%}\")\r\n print(f\"Test accuracy: {test_accuracy:0.2%}\")\r\n \r\n return model", "def train_model_and_score(X,y_train):\n scaler = MinMaxScaler()\n X_scaled = scaler.fit_transform(X)\n\n #chose model\n # model = RandomForestClassifier()\n model = GradientBoostingClassifier()\n\n #split train/test\n x_train,x_test,y_train,y_test = train_test_split(X_scaled,y_train,test_size=0.33,random_state =42)\n\n #train\n model.fit(x_train,y_train)\n\n #evaluation\n sc = model.score(x_test,y_test), model.score(x_train,y_train)\n\n print(sc)\n\n return model,sc", "def accuracy(predictions, test_labels):\n return f1_score(test_labels, predictions, average='micro') * 100", "def run():\n\n df = read_input() # the parameters\n df = add_time_period(df) # a feature\n df = is_holiday(df) # a feature\n df = scale_continous(df) # continous feature transformation\n df = encode_dummy(df) # categorical feature transformation\n df = order_columns(df) # ordering model inputs\n model = load_model() # the multiple linear regression model\n prediction = int(model.predict(df)) # form a prediction\n return prediction # return the prediction", "def accuracy(predictions, targets):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n i1 = np.arange(0, len(targets), 1)\n i2 = np.argmax(predictions, axis = 1)\n accuracy = targets[i1, i2].sum()/targets.sum()\n ########################\n # END OF YOUR CODE #\n #######################\n\n return accuracy", "def fit(self, X_raw, y_made_claim, y_claims_amount):\n\n # YOUR CODE HERE\n\n # Remember to include a line similar to the one below\n # X_clean = self._preprocessor(X_raw)\n \n # made_metrics = [tf.keras.metrics.AUC(name=\"auc\")]\n # def made_nn_model(metrics, input_shape, lr=0.001):\n # model = tf.keras.Sequential([\n # tf.keras.layers.Dense(256,activation=\"relu\",input_shape=(input_shape,),kernel_regularizer=l2(l=0.05)),\n # tf.keras.layers.Dropout(0.5),\n # tf.keras.layers.Dense(64,activation=\"relu\",kernel_regularizer=l2(l=0.01)),\n # tf.keras.layers.Dropout(0.5),\n # tf.keras.layers.Dense(8,activation=\"relu\",kernel_regularizer=l2(l=0.001)),\n # tf.keras.layers.Dropout(0.5),\n # tf.keras.layers.Dense(1,activation=\"sigmoid\")\n # ])\n\n # model.compile(\n # optimizer=tf.keras.optimizers.Adam(lr=lr),\n # loss=tf.keras.losses.BinaryCrossentropy(),\n # metrics=metrics)\n\n # return model\n\n # claim_metrics = [tf.keras.metrics.MeanSquaredError(name=\"mse\")]\n # def claim_nn_model(metrics, input_shape, lr=0.001):\n # model = tf.keras.Sequential([\n # tf.keras.layers.Dense(256,activation=\"relu\",input_shape=(input_shape,)),\n # tf.keras.layers.Dropout(0.5),\n # tf.keras.layers.Dense(16,activation=\"relu\",input_shape=(input_shape,)),\n # tf.keras.layers.Dropout(0.5),\n # tf.keras.layers.Dense(8,activation=\"relu\",input_shape=(input_shape,)),\n # tf.keras.layers.Dropout(0.5),\n # tf.keras.layers.Dense(1)\n # ])\n \n # model.compile(\n # optimizer=tf.keras.optimizers.Adam(lr=lr),\n # loss=tf.keras.losses.MeanSquaredError(),\n # metrics=metrics)\n # return model\n\n \n # X_1, X_1val, y_1, y_1val, y_2, y_2val = train_test_split(X_raw,y_made_claim,y_claims_amount,test_size=0.05)\n # X_1, drop_index = self._preprocessor(X_1, train=True)\n # y_1 = y_1.drop(drop_index).values\n # y_2 = y_2.drop(drop_index).values\n \n # X_1val, drop_index = self._preprocessor(X_1val, train=False)\n # y_1val = y_1val.drop(drop_index).values\n # y_2val = y_2val.drop(drop_index).values\n \n # self.scaler = StandardScaler()\n # X_1 = self.scaler.fit_transform(X_1)\n # X_1val = self.scaler.transform(X_1val)\n \n # #prepare for claim amount\n # X_2 = X_1[y_1==1]\n # y_2 = y_2[y_1==1]\n # X_2val = X_1val[y_1val==1]\n # y_2val = y_1val[y_1val==1]\n \n # self.y_mean = np.mean(y_2)\n # self.y_std = np.std(y_2)\n # y_2 = (y_2 - self.y_mean)/self.y_std\n # y_2val = (y_2val - self.y_mean)/self.y_std\n\n # #fit made claim\n # logdir = \"log\" + datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n # tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir)\n # early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=10, mode=\"min\", restore_best_weights=True)\n \n # self.Model_made = made_nn_model(made_metrics, X_1.shape[1], lr=0.0003)\n # History_made = self.Model_made.fit(X_1,y_1,\n # class_weight={0:1,1:10},\n # callbacks=[tensorboard_callback, early_stopping],\n # validation_data = (X_1val, y_1val),\n # epochs=200,\n # batch_size=512)\n\n # #fit claim amount\n # early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=20, mode=\"min\", restore_best_weights=True)\n # logdir = \"log\" + datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n # tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir)\n \n # self.Model_claim = claim_nn_model(claim_metrics, X_2.shape[1], lr=0.0005)\n # History = self.Model_claim.fit(X_2,y_2,\n # callbacks=[tensorboard_callback, early_stopping],\n # validation_data=(X_2, y_2),\n # epochs=5000,\n # batch_size=512)\n \n \n X_1, drop_index = self._preprocessor(X_raw, train=True)\n y_1 = y_made_claim.drop(drop_index).values\n y_2 = y_claims_amount.drop(drop_index).values\n \n scaler = StandardScaler()\n clf_made = RandomForestClassifier(n_estimators=500,class_weight={0:1,1:10},n_jobs=-1,max_depth=10,max_features=33,min_samples_leaf=30)\n self.Model_made = Pipeline([(\"scale\",scaler),(\"clf\",clf_made)])\n self.Model_made.fit(X_1,y_1)\n #self.Model_made = fit_and_calibrate_classifier(self.Model_made, X_1, y_1)\n \n # #prepare for claim amount\n X_2 = X_1[y_1==1]\n y_2 = y_2[y_1==1]\n \n self.y_mean = np.mean(y_2)\n self.y_std = np.std(y_2)\n y_2 = (y_2 - self.y_mean)/self.y_std\n\n clf_claim = RandomForestRegressor(n_estimators=500,n_jobs=-1,max_depth=10,max_features=30,min_samples_leaf=70)\n self.Model_claim = Pipeline([(\"scale\",scaler),(\"clf\",clf_claim)])\n self.Model_claim.fit(X_2,y_2)\n \n\n return None", "def model_accuracy(predict, y):\n true_predict = (predict.argmax(1) == y.argmax(1)).float()\n acc = true_predict.sum() / len(true_predict)\n return acc", "def __train_and_predict(self, X_train, y, X_test):\n self.model.fit(X_train, y, eval_metric='auc')\n prediction_probs = self.model.predict_proba(X_train)[:, 1]\n print \"Training auc = %f\" % roc_auc_score(y, prediction_probs)\n self.__write_csv(prediction_probs,\n X_train.shape[0], self.train_out_file)\n\n prediction_probs = self.model.predict_proba(X_test)[:, 1]\n self.__write_csv(prediction_probs,\n X_test.shape[0], self.test_out_file)\n\n self.feature_imp()", "def model_testing(X_train,y_train):\n\n # for testing amount of layers, each layer has 32 neurons\n # layers = [[32, 32], [32, 32, 32], [32, 32, 32, 32], [32, 32, 32, 32],\\\n # [32, 32, 32, 32, 32], [32, 32, 32, 32, 32, 32]]\n layers = [[8], [16], [32], [64], [128], [256]]\n\n # activation = [\"linear\", \"sigmoid\", \"relu\", \"softmax\"]\n activation = [\"relu\"]\n runs = 1\n for i, act in enumerate(activation):\n val_accs = []\n for layer in layers:\n acc_avg = []\n for run in range(runs):\n model = create_model_testing(layer, act)\n\n # train model on full train set, with 80/20 CV split\n training = model.fit(X_train, y_train, epochs=100, validation_split=0.2, verbose=0)\n val_acc = np.mean(training.history['val_accuracy'])\n print(\"Run \", run, \" - \", act + \" activation - layer \" + str(layer))\n acc_avg.append(val_acc)\n\n # save average accuracy of runs\n val_accs.append(round(np.mean(acc_avg)*100, 2))\n print(\"accuracy: \" + str(np.mean(acc_avg)))\n\n # plot line for each activation method\n plt.plot([1,2,4,8,16,32,64,128,256], val_accs, label=act)\n # plt.plot(val_accs, label=act)\n\n # plotting\n plt.title(\"Accuracy of neural network model with different layers (N=\" +\\\n str(len(layers)) + \")\", fontsize=22)\n plt.xlabel(\"Layers\", fontsize=20)\n # plt.xticks(np.arange(1, len(val_accs) + 1, 1), fontsize=18)\n plt.ylabel(\"Accuracy (%)\", fontsize=20)\n plt.legend()\n plt.subplots_adjust(bottom=.15, left=.15)\n plt.savefig(\"results/linear-relu-\" + str(runs) + \"runs.png\")\n plt.show()", "def train(self, features, labels):\n pass", "def predict(self, **kwargs):\n raise NotImplementedError", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n\n # TRAIN and EVAL\n if not is_predicting:\n\n (loss, predicted_labels, log_probs) = create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n train_op = create_optimizer(\n loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False)\n\n # Calculate evaluation metrics.\n def metric_fn(label_ids, predicted_labels):\n accuracy = tf.compat.v1.metrics.accuracy(label_ids, predicted_labels)\n #f1_score = tf.contrib.metrics.f1_score(\n # label_ids,\n # predicted_labels)\n #auc = tf.metrics.auc(\n # label_ids,\n # predicted_labels)\n #recall = tf.metrics.recall(\n # label_ids,\n # predicted_labels)\n #precision = tf.metrics.precision(\n # label_ids,\n # predicted_labels)\n #true_pos = tf.metrics.true_positives(\n # label_ids,\n # predicted_labels)\n #true_neg = tf.metrics.true_negatives(\n # label_ids,\n # predicted_labels)\n #false_pos = tf.metrics.false_positives(\n # label_ids,\n # predicted_labels)\n #false_neg = tf.metrics.false_negatives(\n # label_ids,\n # predicted_labels)\n return {\n \"eval_accuracy\": accuracy,\n # \"f1_score\": f1_score,\n #\"auc\": auc,\n # \"precision\": precision,\n # \"recall\": recall,\n # \"true_positives\": true_pos,\n # \"true_negatives\": true_neg,\n # \"false_positives\": false_pos,\n # \"false_negatives\": false_neg\n }\n\n eval_metrics = metric_fn(label_ids, predicted_labels)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op)\n else:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n eval_metric_ops=eval_metrics)\n else:\n (predicted_labels, log_probs) = create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n predictions = {\n 'probabilities': log_probs,\n 'labels': predicted_labels\n }\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)", "def evaluate_model(model, X_test_input, y_test_input):\r\n pred_class = [model.classes_[i] for i in model.predict_proba(X_test_input).argmax(axis=-1)]\r\n pred_accuracy = np.sum(np.array(y_test_input)==np.array(pred_class))/len(pred_class)\r\n return pred_class, pred_accuracy", "def predict(self, features):\n feature_labels = []\n for f in features:\n get_label = self.get_k_neighbors(f)\n c0 = get_label.count(0)\n c1 = get_label.count(1)\n if c0 >= c1:\n f_label = 0\n else:\n f_label = 1\n feature_labels.append(f_label)\n return feature_labels\n raise NotImplementedError", "def train_model(algorithm, X_train, y_train, X_test, y_test, cv_type='rand', transformation_type='tf'):\n \n model = algorithm(X_train, y_train, cv_type=cv_type)\n model_preds = model.predict(X_test)\n model_score = f1_score(y_test, model_preds, average='weighted')\n \n return model, model_score, transformation_type", "def prediction():\r\n\r\n\r\n\tpredictVal = []\r\n\taccuracy = 0.0\r\n\r\n\t# Calculate accuracy for each class in testData\r\n\tfor item in testData:\r\n\t\tclass0Prediction = posProb / 100\r\n\t\tclass1Prediction = negProb / 100\r\n\t\t\r\n\t\t# Multiply the prior probablities for negative and positive reviews by their feature likelihoods \r\n\t\tfor word in item[2]:\r\n\t\t\tclass0Prediction *= class0Dict[word]\r\n\t\t\tclass1Prediction *= class1Dict[word]\r\n\r\n\t\t# Give every item in testData a predicted value\r\n\t\tif(class0Prediction > class1Prediction):\r\n\t\t\tpredictVal.append('0')\r\n\t\telse:\r\n\t\t\tpredictVal.append('1')\r\n\r\n\tfor i in range(len(testData)):\r\n\t\tif(testData[i][1] == predictVal[i]):\r\n\t\t\taccuracy += 1\r\n\r\n\t\t\t\r\n\taccuracy = 100 * (accuracy / len(testData))\r\n\treturn(predictVal, accuracy)", "def predict(self, xs, **kwargs):", "def model(features, test_features, encoding='ohe', n_folds=5):\n\n # Extract the ids\n train_ids = features['SK_ID_CURR']\n test_ids = test_features['SK_ID_CURR']\n\n # Extract the labels for training\n labels = features['TARGET']\n\n # Remove the ids and target\n features = features.drop(columns=['SK_ID_CURR', 'TARGET'])\n test_features = test_features.drop(columns=['SK_ID_CURR'])\n\n # One Hot Encoding\n if encoding == 'ohe':\n features = pd.get_dummies(features)\n test_features = pd.get_dummies(test_features)\n\n # Align the dataframes by the columns\n features, test_features = features.align(test_features, join='inner', axis=1)\n\n # No categorical indices to record\n cat_indices = 'auto'\n\n # Integer label encoding\n elif encoding == 'le':\n\n # Create a label encoder\n label_encoder = LabelEncoder()\n\n # List for storing categorical indices\n cat_indices = []\n\n # Iterate through each column\n for i, col in enumerate(features):\n if features[col].dtype == 'object':\n # Map the categorical features to integers\n features[col] = label_encoder.fit_transform(np.array(features[col].astype(str)).reshape((-1,)))\n test_features[col] = label_encoder.transform(np.array(test_features[col].astype(str)).reshape((-1,)))\n\n # Record the categorical indices\n cat_indices.append(i)\n\n # Catch error if label encoding scheme is not valid\n else:\n raise ValueError(\"Encoding must be either 'ohe' or 'le'\")\n\n print('Training Data Shape: ', features.shape)\n print('Testing Data Shape: ', test_features.shape)\n\n # Extract feature names\n feature_names = list(features.columns)\n\n # Convert to np arrays\n features = np.array(features)\n test_features = np.array(test_features)\n\n # Create the kfold object\n k_fold = KFold(n_splits=n_folds, shuffle=True, random_state=50)\n\n # Empty array for feature importances\n feature_importance_values = np.zeros(len(feature_names))\n\n # Empty array for test predictions\n test_predictions = np.zeros(test_features.shape[0])\n\n # Empty array for out of fold validation predictions\n out_of_fold = np.zeros(features.shape[0])\n\n # Lists for recording validation and training scores\n valid_scores = []\n train_scores = []\n\n # Iterate through each fold\n for train_indices, valid_indices in k_fold.split(features):\n # Training data for the fold\n train_features, train_labels = features[train_indices], labels[train_indices]\n # Validation data for the fold\n valid_features, valid_labels = features[valid_indices], labels[valid_indices]\n\n # Create the model\n model = lgb.LGBMClassifier(n_estimators=10000, objective='binary',\n class_weight='balanced', learning_rate=0.05,\n reg_alpha=0.1, reg_lambda=0.1,\n subsample=0.8, n_jobs=-1, random_state=50)\n\n # Train the model\n model.fit(train_features, train_labels, eval_metric='auc',\n eval_set=[(valid_features, valid_labels), (train_features, train_labels)],\n eval_names=['valid', 'train'], categorical_feature=cat_indices,\n early_stopping_rounds=100, verbose=200)\n\n # Record the best iteration\n best_iteration = model.best_iteration_\n\n # Record the feature importances\n feature_importance_values += model.feature_importances_ / k_fold.n_splits\n\n # Make predictions\n test_predictions += model.predict_proba(test_features, num_iteration=best_iteration)[:, 1] / k_fold.n_splits\n\n # Record the out of fold predictions\n out_of_fold[valid_indices] = model.predict_proba(valid_features, num_iteration=best_iteration)[:, 1]\n\n # Record the best score\n valid_score = model.best_score_['valid']['auc']\n train_score = model.best_score_['train']['auc']\n\n valid_scores.append(valid_score)\n train_scores.append(train_score)\n\n # Clean up memory\n gc.enable()\n del model, train_features, valid_features\n gc.collect()\n\n # Make the submission dataframe\n submission = pd.DataFrame({'SK_ID_CURR': test_ids, 'TARGET': test_predictions})\n\n # Make the feature importance dataframe\n feature_importances = pd.DataFrame({'feature': feature_names, 'importance': feature_importance_values})\n\n # Overall validation score\n valid_auc = roc_auc_score(labels, out_of_fold)\n\n # Add the overall scores to the metrics\n valid_scores.append(valid_auc)\n train_scores.append(np.mean(train_scores))\n\n # Needed for creating dataframe of validation scores\n fold_names = list(range(n_folds))\n fold_names.append('overall')\n\n # Dataframe of validation scores\n metrics = pd.DataFrame({'fold': fold_names,\n 'train': train_scores,\n 'valid': valid_scores})\n\n return submission, feature_importances, metrics", "def evaluate_model(model, X_test, Y_test, category_names):\n# Print out Precision , recall F1_score and support for each column using classification_report function\n y_pred_test = model.predict(X_test)\n print(classification_report(Y_test, y_pred_test, target_names=category_names))", "def NBAccuracy(features_train, labels_train, features_test, labels_test):\n ### import the sklearn module for SVM\n from sklearn.svm import SVC\n\n ### create classifier specifying the kernel\n clf = SVC(kernel=\"rbf\", C = 10000)\n\n ### these lines effectively slice the training dataset down \n ### to 1% of its original size, tossing out 99% of the training data.\n #features_train = features_train[:len(features_train)/100] \n #labels_train = labels_train[:len(labels_train)/100]\n\n ### Calculate the Time spent to train our algorithm\n t0 = time()\n ### fit the classifier on the training features and labels\n clf.fit(features_train, labels_train)\n print \"Training time:\", round(time()-t0, 3), \"s\"\n\n ### Calculate the Time spent in the prediction\n t0 = time()\n ### use the trained classifier to predict labels for the test features\n pred = clf.predict(features_test)\n\n print \"Prediction time:\", round(time()-t0, 3), \"s\"\n\n print \"Prediction for element #10:\", pred[10]\n print \"Prediction for element #26:\", pred[26]\n print \"Prediction for element #50:\", pred[50]\n print \"We could predict \", (sum(i == 1 for i in pred)),\"in \", len(features_test),\"test events bilong to Chris\"\n\n ### calculate and return the accuracy on the test data\n from sklearn.metrics import accuracy_score\n accuracy = accuracy_score(pred, labels_test)\n \n ### Another way\n ### accuracy = clf.score(features_test, labels_test)\n return accuracy", "def predictive_model (train_x, train_y, test_x, model_name):\n \n assert model_name in ['logisticregression', 'nn', 'randomforest',\n 'gaussiannb', 'bernoullinb', 'multinb',\n 'svmlin', 'gbm', 'extra trees',\n 'lda','passive aggressive', 'adaboost',\n 'bagging', 'xgb']\n \n # Define model\n if model_name == 'logisticregression':\n model = LogisticRegression()\n elif model_name == 'nn': \n model = MLPClassifier(hidden_layer_sizes=(200,200))\n elif model_name == 'randomforest': \n model = RandomForestClassifier()\n elif model_name == 'gaussiannb': \n model = GaussianNB()\n elif model_name == 'bernoullinb': \n model = BernoulliNB()\n elif model_name == 'multinb': \n model = MultinomialNB()\n elif model_name == 'svmlin': \n model = svm.LinearSVC()\n elif model_name == 'gbm': \n model = GradientBoostingClassifier() \n elif model_name == 'extra trees':\n model = ExtraTreesClassifier(n_estimators=20)\n elif model_name == 'lda':\n model = LinearDiscriminantAnalysis() \n elif model_name == 'passive aggressive':\n model = PassiveAggressiveClassifier()\n elif model_name == 'adaboost':\n model = AdaBoostClassifier()\n elif model_name == 'bagging':\n model = BaggingClassifier()\n elif model_name == 'xgb':\n model = XGBRegressor() \n \n # Train & Predict\n if model_name in ['svmlin', 'Passive Aggressive']: \n model.fit(train_x, train_y)\n test_y_hat = model.decision_function(test_x)\n \n elif model_name == 'xgb':\n model.fit(np.asarray(train_x), train_y)\n test_y_hat = model.predict(np.asarray(test_x))\n \n else:\n model.fit(train_x, train_y)\n test_y_hat = model.predict_proba(test_x)[:,1]\n \n return model, test_y_hat", "def fit_predict(self):\n self.classifier = self.model\n self.classifier.fit(self.X_sample, self.y_sample)\n self.y_pred = self.classifier.predict(self.X_test)", "def train_models(self, clf, silent, feature_names=None, target_names=None, live=False):\n X_train, X_test, y_train, y_test = self.X_train, self.X_test, self.y_train, self.y_test\n t0 = time()\n clf.fit(X_train, y_train)\n train_time = time() - t0\n pred = clf.predict(X_test)\n test_time = time() - t0\n accuracy = metrics.accuracy_score(y_test, pred)\n fbeta = metrics.fbeta_score(y_test, pred,1,labels=self.dataset['label'].unique(),average='weighted')\n name = clf.name[0]\n if False:\n score_stats = f'Model : {name} | Score : {accuracy} | F-beta : {fbeta}'\n print(score_stats)\n\n if self.best_score_ledger[name][0] < accuracy:\n last = self.best_score_ledger[name][0]\n print(name)\n self.best_score_ledger[name] = [accuracy,fbeta]\n score_stats = f'Model : {name} | Score : {accuracy} | F-beta : {fbeta}'\n print(self.stemmer, ' ', self.transform)\n print(score_stats)\n\n if accuracy > self.best_models[name] and last != 0.0 and self.tuning_depth in ['normal','maximal']:\n new_model,score = self.hyperparameter_tuning(name,clf)\n if score > accuracy:\n self.best_score_ledger[name][0] = score\n clf = new_model\n dump(clf, os.path.join(os.getcwd(), self.file_term, 'models', f'{\"_\".join([self.uid_base, name])}'))\n\n\n\n if not silent:\n if hasattr(clf, 'coef_'):\n print(\"dimensionality: %d\" % clf.coef_.shape[1])\n print(\"density: %f\" % density(clf.coef_))\n\n if True and feature_names is not None:\n print(\"top 10 keywords per class:\")\n for i, label in enumerate(target_names):\n top10 = np.argsort(clf.coef_[i])[-10:]\n print(trim(\"%s: %s\" % (label, \" \".join(feature_names[top10]))))\n print()\n\n if True:\n print(\"classification report:\")\n print(metrics.classification_report(y_test, pred,\n target_names=target_names))\n\n if True:\n print(\"confusion matrix:\")\n print(metrics.confusion_matrix(y_test, pred))\n # if no model exists for the current settings, create one by default. Prevents issues if models are deleted.\n elif not os.path.exists(\n os.path.join(os.getcwd(), self.file_term, 'models', f'{\"_\".join([self.uid_base, name])}')):\n dump(clf, os.path.join(os.getcwd(), self.file_term, 'models', f'{\"_\".join([self.uid_base, name])}'))\n clf_descr = str(clf).split('(')[0]\n return clf_descr, accuracy, train_time, test_time", "def predict(self, model, x_test):\n pass", "def evaluate_model(model, X_test, Y_test, category_names):\n Y_pred=model.predict(X_test)\n acc=[]\n for i,c in enumerate(Y_test.columns):\n print(c)\n print(classification_report(Y_test[c], Y_pred[:,i]))\n acc.append(accuracy_score(Y_test[c], Y_pred[:,i]))\n print('Accuracy :',np.mean(acc))\n\n pass", "def train_classifiers(params):\n # Create result dataframe\n out = pd.DataFrame(\n columns=[\"Dataset\", \"Classifier\", \"Accuracy\", \"F1\", \"Precision\", \"Recall\"])\n\n for model_type, all_languages in params.items():\n print(\"Classifier: \", str(model_type))\n\n for language, all_targets in all_languages.items():\n print(language)\n for target, model_params in all_targets.items():\n print(target)\n print(model_params)\n\n datasets = sample_datasets(\n language, target, SAMPLING, TFIDF, model_params['top_k_words'], SUB_SAMPLE_RERUNS)\n\n # Iterate the datasets\n for data_id, dataset in enumerate(datasets):\n dataset_name = dataset[0]\n data = dataset[1]\n y = np.array(dataset[2])\n val_data = dataset[3]\n val_y = np.array(dataset[4])\n\n acc_scores = []\n pre_scores = []\n rec_scores = []\n f1_scores = []\n \n global X_train\n X_train, X_test = data, val_data\n y_train, y_test = y, val_y\n y_pred = None\n\n # Create model instance.\n model = mlp_model(layers=model_params['hidden_layers'], units=model_params['hidden_units'], dropout_rate=model_params['dropout_rate'],\n input_shape=X_train.shape[1:], num_classes=2)\n optimizer = tf.keras.optimizers.Adam(\n lr=model_params['learning_rate'])\n model.compile(optimizer=optimizer,\n loss='binary_crossentropy', metrics=['acc'])\n\n # Stop training is validation loss doesnt decrease for 3 steps\n callbacks = [tf.keras.callbacks.EarlyStopping(\n monitor='val_loss', patience=3)]\n\n # Train and validate model.\n history = model.fit(\n X_train,\n y_train,\n epochs=model_params['epochs'],\n callbacks=callbacks,\n validation_data=(X_test, y_test),\n verbose=0,\n batch_size=512)\n\n acc_scores.append(\n history.history['val_acc'][-1])\n y_pred = [round(a[0])\n for a in model.predict(X_test)]\n\n # Compute the results\n prfs = precision_recall_fscore_support(\n y_test, y_pred, warn_for=[])\n\n pre_scores.append(prfs[0].mean())\n rec_scores.append(prfs[1].mean())\n f1_scores.append(prfs[2].mean())\n\n # Append average scores\n clf_acc = np.array(acc_scores).mean()\n clf_pre = np.array(pre_scores).mean()\n clf_rec = np.array(rec_scores).mean()\n clf_f1 = np.array(f1_scores).mean()\n\n out = out.append(pd.DataFrame(\n [[dataset_name, model_type, clf_acc, clf_f1, clf_pre, clf_rec]], columns=out.columns), ignore_index=True)\n\n return out", "def make_predictions(features, targets, loss, regularization):\n from your_code import GradientDescent\n\n np.random.seed(0)\n learner = GradientDescent(loss=loss, regularization=regularization,\n learning_rate=0.01, reg_param=0.05)\n learner.fit(features, targets, batch_size=None, max_iter=1000)\n\n print(\"actual targets: \", targets)\n return learner.predict(features)", "def model(classifier, data):\n print(\"Beggining to test model\")\n train, test = cross_validation.train_test_split(data, test_size=.30)\n f,c = train[:,1:], train[:,0]\n classifier.fit(f,c,False)\n print(\"Score: \" + classifier.score(f,c))\n print(\"Finished testing model\")", "def train(models, X_train, y_train, X_test, y_test):\n \n # Train and test each model in a for lop\n accuracies = []\n \n for model in models:\n clf = model.fit(X_train, y_train) # Train\n score = clf.score(X_test, y_test) # Test\n accuracies.append(score)\n\n return accuracies", "def predict(self, X):\n ...", "def predict(self, X):\n ...", "def predict(self, X):\n ...", "def predict_all(model_file, input_file):\n # Reading a model file\n w = {}\n for line in open(model_file):\n line = line.strip()\n (name, value) = line.split(\"\\t\")\n value = float(value)\n w[name] = value\n\n # Evaluation and print results\n for line in open(input_file):\n line = line.strip()\n phi = create_features(line)\n y_ = predict_one(w, phi)\n\n print y_", "def __train__(self):\n if (self.type_camf == 'CAMF_CI'):\n #users, items, context, ratings\n ci = camf_ci.CI_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = ci.fit()\n elif (self.type_camf == 'CAMF_CU'):\n cu = camf_cu.CU_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = cu.fit()\n elif (self.type_camf == 'CAMF_C'):\n c = camf_c.C_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = c.fit()\n\n dummy_pred = np.zeros((predictions.shape))\n for r, pred_array in enumerate(predictions):\n for c, pred in enumerate(pred_array):\n dummy_pred[r][c] = self.__check_ratings__(pred)\n predictions = dummy_pred\n #save a plot with a loss function\n plots = prs.PlotRSData()\n #print(losses)\n plots.plot_loss_cars(losses, self.type_camf, self.__save_prefix__+\"_loop\"+str(self.loop))\n pd.DataFrame(losses).to_csv(\"./RecSys/out/CAMF/train/\"+self.type_camf+\"/\" + self.__save_prefix__ +\"losses_loop\"+str(self.loop)+\".csv\")\n print('Saving the feature matrix...')\n # set predictions back to the pivot table\n self.__utility_saved_training__(predictions) \n # save results\n self.utility_predictions.to_csv(\"./RecSys/out/CAMF/train/\"+self.type_camf+\"/\" + self.__save_prefix__ + \"_SGD_predictions_loop\"+str(self.loop)+\".csv\")", "def model_fn(features, labels, mode, params):\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n label_ids = features[\"label_ids\"]\n\n is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n\n # TRAIN and EVAL\n if not is_predicting:\n\n (loss, predicted_labels, log_probs) = ClassifierModel.create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n train_op = bert.optimization.create_optimizer(\n loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False)\n\n # Calculate evaluation metrics.\n def metric_fn(label_ids, predicted_labels):\n accuracy = tf.metrics.accuracy(label_ids, predicted_labels)\n f1_score = tf.contrib.metrics.f1_score(\n label_ids,\n predicted_labels)\n auc = tf.metrics.auc(\n label_ids,\n predicted_labels)\n recall = tf.metrics.recall(\n label_ids,\n predicted_labels)\n precision = tf.metrics.precision(\n label_ids,\n predicted_labels)\n true_pos = tf.metrics.true_positives(\n label_ids,\n predicted_labels)\n true_neg = tf.metrics.true_negatives(\n label_ids,\n predicted_labels)\n false_pos = tf.metrics.false_positives(\n label_ids,\n predicted_labels)\n false_neg = tf.metrics.false_negatives(\n label_ids,\n predicted_labels)\n\n return {\n \"eval_accuracy\": accuracy,\n \"f1_score\": f1_score,\n \"auc\": auc,\n \"precision\": precision,\n \"recall\": recall,\n \"true_positives\": true_pos,\n \"true_negatives\": true_neg,\n \"false_positives\": false_pos,\n \"false_negatives\": false_neg\n }\n\n eval_metrics = metric_fn(label_ids, predicted_labels)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n train_op=train_op)\n else:\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss,\n eval_metric_ops=eval_metrics)\n else:\n (predicted_labels, log_probs) = ClassifierModel.create_model(\n is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n\n predictions = {\n 'probabilities': log_probs,\n 'labels': predicted_labels\n }\n return tf.estimator.EstimatorSpec(mode, predictions=predictions)", "def predict_ensemble(test_df):\n instances = [wongnai_predictor._dataset_reader.text_to_instance(word_tokenize(review)) \n for review in list(test_df.review)]\n model_paths = glob('output_*/model.tar.gz')\n all_predicted_labels = []\n for model_path in model_paths:\n archive = load_archive(model_path) # load trained model\n wongnai_predictor = Predictor.from_archive(archive, 'wongnai_predictor')\n predicted_labels = [int(wongnai_predictor.predict_instance(instance)['predicted_label']) \n for instance in instances]\n all_predicted_labels.append(predicted_labels)\n all_predicted_labels = np.array(all_predicted_labels)\n predicted_labels_vote = mode(np.array(all_predicted_labels).T, axis=-1).mode.ravel()\n test_df['rating'] = predicted_labels_vote\n return test_df.drop('review', axis=1)", "def evaluate(model, eval_data, num_labels): \n # Turn on the evaluation state to ignore dropouts\n model.eval()\n results = [predict(model, x) for x, y in eval_data]\n f1_score, accuracy = get_metrics(np.array([y for x, y in eval_data]), results, num_labels)\n return f1_score, accuracy", "def predict():\n\n\n json_payload = request.json\n #LOG.info(f\"JSON payload: %s\" %json_payload)\n inference_payload = pd.DataFrame(json_payload)\n #LOG.info(\"inference payload DataFrame: %s\" %inference_payload)\n scaled_payload = scale(inference_payload)\n prediction = list(clf.predict(scaled_payload))\n return jsonify({'prediction': prediction})", "def accuracy(predictions, targets):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n n_samples = targets.shape[0]\n _, y_pred = predictions.max(dim=1)\n accuracy = (y_pred == targets).sum().item() / n_samples\n ########################\n # END OF YOUR CODE #\n #######################\n\n return accuracy", "def predict(self, features: pd.DataFrame, return_prob: bool=False):\n\n # Load a model.\n if self.net is None:\n self.load()\n\n # Copy features.\n features_in = features.copy()\n\n # Apply log10 for some features.\n # TODO: fine this code.\n features_in['period'], _ = \\\n apply_log10(features_in['period'],\n self.min_values['min_period'])\n features_in['amplitude'], _ = \\\n apply_log10(features_in['amplitude'],\n self.min_values['min_amplitude'])\n features_in['hl_amp_ratio'], _ = \\\n apply_log10(features_in['hl_amp_ratio'],\n self.min_values['min_hl_amp_ratio'])\n features_in['kurtosis'], _ = \\\n apply_log10(features_in['kurtosis'],\n self.min_values['min_kurtosis'])\n features_in['phase_cusum'], _ = \\\n apply_log10(features_in['phase_cusum'],\n self.min_values['min_phase_cusum'])\n features_in['phase_eta'], _ = \\\n apply_log10(features_in['phase_eta'],\n self.min_values['min_phase_eta'])\n features_in['quartile31'], _ = \\\n apply_log10(features_in['quartile31'],\n self.min_values['min_quartile31'])\n features_in['skewness'], _ = \\\n apply_log10(features_in['skewness'],\n self.min_values['min_skewness'])\n features_in['slope_per90'], _ = \\\n apply_log10(features_in['slope_per90'],\n self.min_values['min_slope_per90'])\n features_in = np.array(features_in)\n\n # original.\n features_norm = (features_in - self.norm_params[0]) / \\\n self.norm_params[1]\n\n # new.\n # features_norm = features_in - self.norm_params[0]\n # features_norm /= self.norm_params[1]\n\n # Build a dataset with dummy labels.\n labels = np.random.randn(len(features_norm))\n data_set = LightCurveDataset(features_norm, labels)\n\n # Build data loaders. Do NOT shuffle to keep the order.\n data_loader = torch.utils.data.DataLoader(\n data_set, batch_size=100, shuffle=False, num_workers=2,\n drop_last=False\n )\n\n predicted_value = []\n predicted_label = []\n predicted_prob = []\n sm = nn.Softmax(dim=1)\n\n self.net.eval()\n for i, test_data in enumerate(data_loader, 0):\n test_inputs, _ = test_data\n test_inputs = test_inputs.to(self.device)\n\n outputs = self.net(test_inputs)\n outputs_max = torch.max(outputs, 1)\n outputs_value = outputs_max[0].detach().cpu().numpy()\n outputs_label = outputs_max[1].detach().cpu().numpy()\n predicted_value += outputs_value.tolist()\n predicted_label += outputs_label.tolist()\n\n if return_prob:\n predicted_prob += sm(outputs).detach().cpu().numpy().tolist()\n\n # Inverse transform the label (i.e. into string label).\n predicted_label = self.label_encoder.inverse_transform(predicted_label)\n\n if return_prob:\n return predicted_label, predicted_prob\n else:\n return predicted_label", "def learn1_svc():\n \n svc.fit(vector_training,sentiment_training) ##fit the training data of vector tweets and sentiments using LinearSVC\n correct = 0\n for i in range(vector_testing.shape[0]): ##using the testing data, see how accurate LinearSVC is\n prediction = svc.predict(vector_testing[i])\n sentiment = sentiment_testing[i]\n if prediction[0] == sentiment:\n correct +=1\n accuracy = correct/vector_testing.shape[0]\n print('Linear Support Vector Classifier Testing Accuracy: {:.2f}'.format(accuracy)) ##print the accuracy of the algorithm", "def pick_model(self):\n self.x = self.train[self.use_columns]\n try:\n self.x = pd.get_dummies(self.x)\n except:\n pass # if no categorical features\n self.final_columns = self.x.columns\n print(self.x.columns)\n self.scaler = StandardScaler()\n self.x = self.scaler.fit_transform(self.x)\n self.y = self.train['y']\n\n if len(np.unique(self.y))<50:\n print('Consider using classification, probably not continuos target variable!')\n\n # for picking the best model\n lr = Ridge(max_iter=1500)\n rf = RandomForestRegressor(n_estimators=500, max_depth=20, min_samples_leaf=3,\n max_features='auto', n_jobs=-1)\n svr = SVR(max_iter=-1)\n\n self.models = {'lr': lr, 'rf': rf, 'svr': svr}\n self.scores = {'lr': [], 'rf': [], 'svr': []}\n print('selecting model')\n for i, (train_index, test_index) in enumerate(self.kf.split(self.x, self.y)):\n x_tr, x_val = self.x[train_index], self.x[test_index]\n y_tr, y_val = self.y[train_index], self.y[test_index]\n if len(x_tr)>10000:\n print('reduced train size')\n y_tr.index, y_val.index = range(len(y_tr)), range(len(y_val))\n mask_train = np.random.choice(range(len(x_tr)),size=10000)\n x_tr, y_tr = x_tr[mask_train], y_tr[mask_train]\n for k, model in self.models.items():\n print('fold: ', i+1)\n print('model: ', k)\n model = clone(self.models[k])\n model.fit(x_tr, y_tr)\n p = model.predict(x_val)\n # score = mean_squared_error(y_val, p)\n score = mean_absolute_error(y_val, p)\n self.scores[k] = self.scores[k] + [score]\n\n self.best_score = 9e10\n self.old_score = 9e10\n self.best_model = ''\n self.old_model = ''\n for k, l in self.scores.items():\n mean = np.mean(l)\n if mean < self.best_score:\n self.old_score = self.best_score\n self.old_model = self.best_model\n self.best_score = mean\n self.best_model = k\n print(self.best_model, self.best_score)", "def predict(self, features):\n out_l = []\n one_features = np.concatenate((np.ones(features.shape[0])[:, np.newaxis], features), axis=1)\n for example in one_features:\n this_pred = 1 if self.w.dot(example) >= 0 else -1\n out_l.append(this_pred)\n self.out = out_l\n return np.array(out_l)", "def predict(self) :\n y_pred = np.dot(self.W.T,self.X_test) + self.b \n if self.thr!=-1 :\n y_pred[y_pred <= self.thr] = -1\n y_pred[y_pred > self.thr] = 1\n y_pred = y_pred.astype(\"int\")\n corr = 0\n for i in range(y_pred.shape[1]) :\n if y_pred[:,i]==self.y_test[:,i] :\n corr += 1\n accu = (corr / y_pred.shape[1])*100\n print(\"ACCURACY : {}\".format(accu))\n else :\n rmse = np.sqrt(np.sum(np.square(self.y_test - y_pred)) / y_pred.shape[1])\n print(\"RMSE : {}\".format(rmse))", "def predict():\n if (not request.json):\n abort(400)\n \n product = {\n 'brand': request.json['brand'],\n 'category-1': request.json['category-1'],\n 'category-2': request.json['category-2'],\n 'category-3': request.json['category-3'],\n 'colour': request.json['colour'],\n 'fabric_type': request.json['fabric_type'],\n 'ftp_acrylic': request.json['ftp_acrylic'],\n 'ftp_cotton': request.json['ftp_cotton'],\n 'ftp_elastane': request.json['ftp_elastane'],\n 'ftp_linen': request.json['ftp_linen'],\n 'ftp_other': request.json['ftp_other'],\n 'ftp_polyamide': request.json['ftp_polyamide'],\n 'ftp_polyester': request.json['ftp_polyester'],\n 'ftp_polypropylene': request.json['ftp_polypropylene'],\n 'ftp_silk': request.json['ftp_silk'],\n 'ftp_viscose': request.json['ftp_viscose'],\n 'ftp_wool': request.json['ftp_wool'],\n 'gender': request.json['gender'],\n 'label': request.json['label'],\n 'made_in': request.json['made_in'],\n 'season': request.json['season'],\n 'size': request.json['size'],\n 'unspsc_code': request.json['unspsc_code'],\n 'weight': request.json['weight'],\n 'ML-model': request.json['ML-model']\n }\n\n product['co2_total'] = None\n ml_model = product.pop('ML-model', None)\n if (ml_model == None or ml_model == ''):\n print('Loading default model: LGBM')\n ml_model = 'lgbm_default'\n else:\n print(f'Loading model: {ml_model}')\n model = load_model(ml_model)\n print('Model loaded')\n \n pred_with_intervals = do_prediction_with_params(model, product, intervals=True)\n \n prediction = pred_with_intervals[0][0]\n percentile_5 = pred_with_intervals[0][1] if len(pred_with_intervals[0]) == 3 and pred_with_intervals[0][1] is not None else None\n percentile_95 = pred_with_intervals[0][2] if len(pred_with_intervals[0]) == 3 and pred_with_intervals[0][2] is not None else None\n result = {\n \"prediction\": prediction,\n \"5-percentile\": percentile_5,\n \"95-percentile\": percentile_95\n }\n \n print('CO2e prediction complete, returning result')\n print(result)\n \n resp = jsonify(result)\n resp.status_code = 201\n return resp", "def predict_proba(self):\n ...", "def model_accuracy(model, X, y):\n acc = None\n ### YOUR CODE HERE 1-2 lines\n predictions = model.predict(X)\n acc = np.mean([1 if predict == y[target] else 0 for target, predict in enumerate(predictions)])\n ### END CODE\n return acc", "def evaluate_model(model,test_inputs,test_labels,model_mode):\n\n if model_mode == \"classification\":\n y_pred = model.predict(test_inputs)\n print(\"Accuracy score: \", accuracy_score(test_labels, y_pred))\n #print(\"F1 score: \", f1_score(test_labels,y_pred, average='weighted'))\n\n conf_mx = confusion_matrix(test_labels, y_pred)\n #print(conf_mx)\n plt.matshow(conf_mx, cmap = plt.cm.jet)\n plt.show()\n\n if model_mode == \"regression\":\n y_pred = model.predict(test_inputs)\n print(\"Mean absolute error: \", mean_absolute_error(test_labels, y_pred))", "def score_features(self, features, predictor, cv_fold, verbose=0):\n # First we optimise the hyper parameters:\n # data has 4 keys but only 2 (x_train and y_train) will be used for the optimization\n best_params = optimize_hyper_parameters(features, predictor, cv_fold, verbose)\n predictor.set_hyper_parameters(best_params)\n\n # Then we fit the predictor:\n predictor.fit(features)\n\n # Afterwards, we generate the prediction\n y_pred = predictor.predict(features)\n\n # Finally, we compute the metrics:\n metric_res = score_prediction(features['y_test'], y_pred)\n\n self.predictor = predictor\n\n return metric_res, best_params", "def compute(self) -> None:\n \n self.model.eval()\n \n with torch.no_grad():\n for (input, target, _) in self.loader:\n\n # self.model = self.model.train(False) # TEST @lacoupe\n output, _ = self.model(input)\n \n output = (output >= 0.5)\n \n for out, tar in zip(output, target):\n \n tar = bool(tar)\n \n if out and tar:\n self.confusion['true_positive'] += 1\n elif not out and not tar:\n self.confusion['true_negative'] += 1\n elif out and not tar:\n self.confusion['false_positive'] += 1\n elif not out and tar:\n self.confusion['false_negative'] += 1\n \n self.accuracy = (self.confusion['true_positive'] + self.confusion['true_negative']) \\\n / sum(list(self.confusion.values()))\n \n if (self.confusion['true_positive'] + self.confusion['false_positive']) == 0.:\n self.precision = 0.\n else:\n self.precision = self.confusion['true_positive'] \\\n / (self.confusion['true_positive'] + self.confusion['false_positive'])\n \n if (self.confusion['true_positive'] + self.confusion['false_negative']) == 0.:\n self.recall = 0.\n else:\n self.recall = self.confusion['true_positive'] \\\n / (self.confusion['true_positive'] + self.confusion['false_negative'])\n \n if (self.precision + self.recall) == 0.:\n self.f1_score = 0.\n else:\n self.f1_score = 2 * self.precision * self.recall / (self.precision + self.recall)", "def accuracy(predictions, targets):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n correct = 0\n for i in range(len(targets)):\n if(predictions[i] == targets[i]):\n correct += 1\n accuracy = correct/len(targets)\n #raise NotImplementedError\n ########################\n # END OF YOUR CODE #\n #######################\n\n return accuracy", "def learn(self, Xtrain, ytrain):", "def _fit_predict(X_train, y_train, X_test):\n raise NotImplementedError()", "def make_predictions(df):\n t_labels = get_labels(\"labels_pca\")\n # clean data\n df = clean_data(df)\n # engineer data\n df = engineer_features(df)\n # predict\n with open(\"model.pkl\",\"r\") as mdl:\n model = pickle.load(mdl)\n mdl.close()\n predictions = model.predict(df[t_labels])\n return predictions", "def trainRegressionModel(X,y):\n # # instantiate a logistic regression model, and fit with X and y\n # model = LogisticRegression()\n # model = model.fit(X, y)\n # # check the accuracy on the training set\n # print(model.score(X, y))\n #X['intercept'] = 1.0\n #del X['isCapitalized']\n #del X['isNN']\n #del X['isNNP']\n #del X['isJJ']\n #del X['isUpper']\n #del X['isPrecedingIN']\n logit = sm.Logit(y, X)\n result = logit.fit()\n print(result.summary())\n print(result.conf_int())\n model = LogisticRegression()\n model = model.fit(X, y)\n print(model.score(X, y))\n print(y.mean())\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)\n model2 = LogisticRegression()\n model2.fit(X_train, y_train)\n # predict class labels for the test set\n predicted = model.predict(X_test)\n print(predicted)\n for i in predicted:\n if i==1:\n print(\"Test:\"+str(i))\n print(max(predicted))\n #generate class probabilities\n probs = model2.predict_proba(X_test)\n print(probs)\n # generate evaluation metrics\n print(\"Accuracy: \"+str(metrics.accuracy_score(y_test, predicted)))\n print(\"AUC: \"+str(metrics.roc_auc_score(y_test, probs[:, 1])))\n print(metrics.confusion_matrix(y_test, predicted))\n print(metrics.classification_report(y_test, predicted))\n\n from sklearn.cross_validation import cross_val_score\n # evaluate the model using 10-fold cross-validation\n scores = cross_val_score(LogisticRegression(), X, y, scoring='accuracy', cv=10)\n print(scores)\n print(scores.mean())", "def classify():\n yes_dataset = df[df[\"_class\"] == 1] # 470588\n no_dataset = df[df[\"_class\"] == 0] # 1971\n\n parameter_analysis = list()\n for criterion in np.arange(0.05, 0.91, 0.05):\n print(\"doing experiment at criterion = %s ...\" % criterion)\n rate_list = list()\n for i in range(10):\n # shuffle yes_dataset and no_dataset, so we can randomly choose 90% yes_dataset\n # + 90% no_dataset as train dataset, 10% yes_dataset + 10% no_dataset as test dataset\n yes_index = yes_dataset.index.tolist()\n random.shuffle(yes_index)\n no_index = no_dataset.index.tolist()\n random.shuffle(no_index)\n \n # concatenate 90%yes + 90%no, 10%yes + 10%no\n train = pd.concat([\n yes_dataset.loc[yes_index[:1774], :],\n no_dataset.loc[no_index[:423530], :]\n ])\n test = pd.concat([\n yes_dataset.loc[yes_index[1774:], :],\n no_dataset.loc[no_index[423530:], :]\n ]) \n \n # split data and label\n train_data, train_label = (train[[\"Revenue.Code\", \n \"Service.Code\", \n \"Procedure.Code\", \n \"Diagnosis.Code\", \n \"Subscriber.Index\"]], \n train[\"_class\"])\n test_data, test_label = (test[[\"Revenue.Code\", \n \"Service.Code\", \n \"Procedure.Code\", \n \"Diagnosis.Code\", \n \"Subscriber.Index\"]], \n test[\"_class\"])\n \n # apply classifier\n clf = GaussianNB()\n clf.fit(train_data, train_label)\n probability = clf.predict_proba(test_data).T[1]\n \n result = pd.DataFrame()\n result[\"_class\"] = test_label\n result[\"_predict\"] = probability >= criterion\n \n result_yes = result[result[\"_class\"] == 1]\n yes_yes_rate = sum(result_yes[\"_class\"] == result_yes[\"_predict\"])/len(result_yes[\"_predict\"])\n \n result_no = result[result[\"_class\"] == 0]\n no_no_rate = sum(result_no[\"_class\"] == result_no[\"_predict\"])/len(result_no[\"_predict\"])\n \n rate_list.append((yes_yes_rate, no_no_rate))\n\n rate_list = pd.DataFrame(rate_list)\n yes_yes_rate, no_no_rate = rate_list.mean()[0], rate_list.mean()[1]\n parameter_analysis.append((criterion, yes_yes_rate, no_no_rate))\n \n # save data to excel spreadsheet\n parameter_analysis = pd.DataFrame(parameter_analysis, columns=[\"criterion\", \"yes_yes_rate\", \"no_no_rate\"])\n writer = pd.ExcelWriter(\"parameter_analysis.xlsx\")\n parameter_analysis.to_excel(writer, \"parameter_analysis\", index=False)\n writer.save()", "def test(model, X_test, y_test, config):\n loss, y_pred = model.forward_pass(X_test)\n\n y_maxVals = np.amax(y_pred, axis=1).reshape(-1, 1)\n y_1hot = np.where(y_maxVals == y_pred, 1, 0)\n correct = np.sum(y_test * y_1hot)\n\n accuracy = correct / len(X_test)\n return accuracy", "def train_model(self):\n self.best_epoch = {'auto':{}, 'coffee':{}, 'movie':{}, 'pizza':{}, 'restaurant':{}, 'uber':{} }\n self.best_f1 = {'auto':{}, 'coffee':{}, 'movie':{}, 'pizza':{}, 'restaurant':{}, 'uber':{} }\n for t in self.topic:\n if t != 'other':\n for st in self.topic2sub_topic[t].keys():\n\n print(\"Now training the classsfier for topic: \", t, \" ; intent: \", st)\n print(128 * \"=\")\n print(\"Input: str; Output: boolean(if the str contents the intent: \", st, \" ).\")\n print(64 * \"-\")\n X, y = self.get_data(t, st)\n print(\"data_loaded!\")\n X_train, X_dev, y_train, y_dev = self.my_train_test_split(X, y)\n best_f1 = 0\n for e in range(1,10):\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.InputLayer(input_shape=[1024, ]))\n model.add(tf.keras.layers.Dense(64, activation='relu'))\n model.add(tf.keras.layers.Dense(64, activation='relu'))\n model.add(tf.keras.layers.Dense(1, activation='relu'))\n model.compile(loss='mean_squared_logarithmic_error', optimizer='adam', metrics=[metrics.mae, metrics.categorical_accuracy])\n model.fit(X_train, y_train, epochs=e, batch_size=128)\n print(\"f1_score on dev set: \")\n f1 = self.f1_score_model(model, X_dev, y_dev)[0]\n if f1 > best_f1:\n self.model_zoo[t][st] = model\n model.save_weights(self.trained_w_folder+\"/%s/%s.h5\" %(t,st))\n self.best_epoch[t][st] = e\n self.best_f1[t][st] = f1\n best_f1 = f1\n\n print(64*\"=\")\n print()", "def train_model(x_tra, y_tra):\n\n clf1 = AdaBoostClassifier(n_estimators=300, random_state=1)\n clf1.fit(x_tra, y_tra)\n return clf1", "def testmodel(label_name, category_features, non_category_features, model):\n # Reading the train/test data as data frames.\n train_df, test_df = read_data()\n\n # Preprocess the data frame.\n new_df = preprocess(train_df, label_name, category_features, non_category_features)\n\n # Names of final columns\n final_columns = new_df.columns\n\n # Find averages for non-category features.\n averages = {}\n for col in non_category_features:\n averages[col] = new_df[col].mean()\n\n # Dropping na's\n new_df = new_df.dropna()\n\n # getting X & y for the train set\n y = np.array(new_df.fraud)\n X = new_df.drop(['fraud'], axis=1).values\n\n # Fit the model\n model = model.fit(X, y)\n\n return (model, final_columns, averages)", "def do_score_prediction(self):\n \n import iread.myio as mio\n from igui.score_canvas import ScoreCanvas\n exp_name = 'JPS_act_12_exp_4_accv_half_fc_j2'\n exp_name_base = 'ASM_act_12_exp_4'\n exp_base_folder = '/opt/visal/tmp/for_sijin/Data/H36M/H36MExp'\n exp_path = iu.fullfile(exp_base_folder, 'folder_%s' % exp_name, 'batches.meta')\n meta_base_path = iu.fullfile(exp_base_folder, 'folder_%s' % exp_name_base, 'batches.meta')\n meta = mio.unpickle(exp_path)\n meta_base = mio.unpickle(meta_base_path)\n images_path = meta_base['images_path']\n \n pred_pose = meta['feature_list'][0]\n gt_pose = meta['random_feature_list'][0]\n ntotal = gt_pose.shape[-1]\n print 'gt_pose_shape',gt_pose.shape\n print 'pred_pose_shape', pred_pose.shape\n ref_frame = 7600 # This is the index in test range\n ## ref_frame = 2600 # This is the index in test range\n test_range = self.test_data_provider.feature_range\n ref_idx = test_range[ref_frame]\n \n n_to_show = 1000\n \n idx_to_show = np.random.choice(ntotal, n_to_show - 1)\n idx_to_show = [ref_idx] + idx_to_show.tolist() \n idx_to_show = np.asarray(idx_to_show, dtype=np.int).flatten()\n \n ref_pose = pred_pose[...,ref_idx].reshape((-1,1),order='F') \n pose_to_eval =gt_pose[...,idx_to_show]\n output_feature_name = 'fc_2' # <------------------Parameter\n output_layer_idx = self.get_layer_idx(output_feature_name)\n\n # do it once <------------- Maybe it can support multiple batch in the future\n data_dim = self.model_state['layers'][output_layer_idx]['outputs']\n print 'data_dim', data_dim\n \n cur_data = [np.require(np.tile(ref_pose, [1,n_to_show]), \\\n dtype=np.single,requirements='C'), \\\n np.require(pose_to_eval.reshape((-1,n_to_show),order='F'),\\\n dtype=np.single,requirements='C'), \\\n np.require(np.zeros((1,n_to_show),dtype=np.single), \\\n requirements='C'),\n np.require(np.zeros((n_to_show,data_dim),dtype=np.single), \\\n requirements='C')]\n residuals = cur_data[1][...,0].reshape((-1,1),order='F') - cur_data[1]\n dp = self.test_data_provider\n mpjpe = dutils.calc_mpjpe_from_residual(residuals, dp.num_joints)\n\n gt_score = dp.calc_score(mpjpe, dp.mpjpe_factor/dp.max_depth,\\\n dp.mpjpe_offset/dp.max_depth).reshape((1,n_to_show)).flatten()\n self.libmodel.startFeatureWriter(cur_data, output_layer_idx)\n self.finish_batch()\n score = cur_data[-1].T\n print 'dim score', score.shape, 'dim gt_score', gt_score.shape\n score = score.flatten()\n # score = gt_score.flatten()\n def my_sort_f(k):\n if k == 0:\n return 10000000\n else:\n return score[k]\n sorted_idx = sorted(range(n_to_show), key=my_sort_f,reverse=True)\n s_to_show = [idx_to_show[k] for k in sorted_idx]\n sorted_score = np.asarray( [score[k] for k in sorted_idx])\n \n pose_to_plot = self.convert_relskel2rel(cur_data[1])\n sorted_pose = pose_to_plot[...,sorted_idx]\n class ScorePoseCanvas(ScoreCanvas):\n def __init__(self,data_dic):\n import iread.h36m_hmlpe as h36m\n ScoreCanvas.__init__(self,data_dic)\n self.pose_data = data_dic['pose_data']\n self.limbs = h36m.part_idx\n self.tmp = 0\n def show_image(self,ax):\n # ScoreCanvas.show_image(self,ax)\n # return\n import Image\n idx =self.cur_data_idx\n if idx == 0:\n self.tmp = self.tmp + 1\n if self.tmp == 1:\n img = self.load_image(idx)\n ax.imshow(np.asarray(img))\n return\n print 'Current data idx %d ' % self.cur_data_idx\n # params = {'elev':-89, 'azim':-107}\n # params = {'elev':-69, 'azim':-107}\n params = {'elev':-81, 'azim':-91} # frontal view\n fig = plt.figure(100)\n from mpl_toolkits.mplot3d import Axes3D\n import imgproc\n # new_ax = self.fig.add_axes( rng_rel,projection='polar')\n new_ax = fig.add_subplot(111,projection='3d')\n imgproc.turn_off_axis(new_ax)\n cur_pose = self.pose_data[...,idx].reshape((3,-1),order='F')\n dutils.show_3d_skeleton(cur_pose.T,\\\n self.limbs, params)\n xmin,xmax = np.min(cur_pose[0]),np.max(cur_pose[0])\n ymin,ymax = np.min(cur_pose[1]),np.max(cur_pose[1])\n zmin,zmax = np.min(cur_pose[2]),np.max(cur_pose[2])\n def extent(x,y,ratio):\n x = x + (x-y) * ratio\n y = y + (y-x) * ratio\n return -0.4,0.4\n r = 0.1\n new_ax.set_xlim(extent(xmin,xmax,r))\n new_ax.set_ylim(extent(ymin,ymax,r))\n new_ax.set_ylim(extent(zmin,zmax,r))\n tmp_folder = '/public/sijinli2/ibuffer/2014-CVPR2015/tmp/images'\n save_path = iu.fullfile(tmp_folder, 'tmp_image.png')\n plt.savefig(save_path)\n img = Image.open(save_path)\n plt.close(100)\n img_arr = np.asarray(img)\n s = np.int(img_arr.shape[0]/5.0)\n e = np.int(img_arr.shape[0] - s)\n s = 0\n e = img_arr.shape[0]\n img_arr = img_arr[s:e,:,:]\n ax.imshow(np.asarray(img_arr))\n # ax.plot([1,0,0],[0,1,0],[0,0,1])\n\n\n sc = ScorePoseCanvas({'x': np.asarray(range(len(idx_to_show))), 'y':sorted_score,\\\n 'images_path': [images_path[k] for k in s_to_show], \\\n 'pose_data':sorted_pose})\n sc.start()\n print 'max score is ' , sorted_score.max()\n gt_sort_idx = sorted(range(n_to_show), key=lambda k:gt_score[k], reverse=True)\n sorted_gt_score = np.asarray([gt_score[k] for k in gt_sort_idx])\n sorted_score_by_gt = [score[k] for k in gt_sort_idx]\n pl.plot(np.asarray(range(n_to_show)), sorted_gt_score, 'r', label='gt_score')\n pl.plot(np.asarray(range(n_to_show)), sorted_score_by_gt, 'g', label='pred_score')", "def Model_Train(datasample):\r\n datasample=df_to_array(datasample)\r\n train_features=datasample[:,:-1]\r\n train_labels=datasample[:,-1]\r\n rf = RandomForestRegressor(n_estimators= n_trees)\r\n rf.fit(train_features,train_labels)\r\n return rf", "def accuracy(self):", "def predict_all():\n\n # need train dir to list category names\n cfg = configparser.ConfigParser()\n cfg.read(sys.argv[1])\n base = os.environ['DATA_ROOT']\n eval_type = cfg.get('args', 'eval_type')\n train_xml_dir = os.path.join(base, cfg.get('data', 'train_xml_dir'))\n\n if eval_type == 'sparse':\n predict_sparse(train_xml_dir)\n else:\n predict_dense(train_xml_dir)", "def model(df,x,y):\n clf = RandomForestClassifier()\n clf.fit(x, y)\n \n prediction = int(clf.predict(df))\n #prediction_proba = clf.predict_proba(df)\n columns = [\"Iris setosa\", \"Iris versicolor\", \"Iris virginica\"]\n prediction_proba = pd.DataFrame(clf.predict_proba(df), columns=columns)\n \n\n dict_pred = {0 : \"Iris setosa\", \n 1 : \"Iris versicolor\", \n 2 : \"Iris virginica\"}\n\n pred = dict_pred[prediction]\n\n pred_proba = prediction_proba\n return pred, pred_proba", "def train_and_test_model(In_train, Out_train, In_test, Out_test):\n\n # Naive Bayes Classifier\n print(\"Naive Bayes\")\n NB_classifier = MultinomialNB()\n NB_classifier.fit(In_train, Out_train)\n predictions = NB_classifier.predict(In_test)\n print(NB_classifier.score(In_test, Out_test))\n NB_Confusion_Matrix = confusion_matrix(Out_test, predictions)\n print(NB_Confusion_Matrix)\n plot_confusion_matrix(NB_Confusion_Matrix)\n print()\n\n # Stochastic Gradient Descent Classifier\n print(\"Stochastic Gradient Descent\")\n SGD_classifier = SGDClassifier()\n SGD_classifier.fit(In_train, Out_train)\n predictions = SGD_classifier.predict(In_test)\n print(SGD_classifier.score(In_test, Out_test))\n SGD_Confusion_Matrix = confusion_matrix(Out_test, predictions)\n print(SGD_Confusion_Matrix)\n plot_confusion_matrix(SGD_Confusion_Matrix)\n print()\n\n # MultiLayer Perceptron Classifier\n print(\"MultiLayer Perceptron\")\n MLP_classifier = MLPClassifier()\n MLP_classifier.fit(In_train, Out_train)\n predictions = MLP_classifier.predict(In_test)\n print(MLP_classifier.score(In_test, Out_test))\n MLP_Confusion_Matrix = confusion_matrix(Out_test, predictions)\n print(MLP_Confusion_Matrix)\n plot_confusion_matrix(MLP_Confusion_Matrix)\n print()\n\n # Random Forest Classifier\n print(\"Random Forest Classifier\")\n RF_classifier = RandomForestClassifier()\n RF_classifier.fit(In_train, Out_train)\n predictions = RF_classifier.predict(In_test)\n scores = cross_val_score(RF_classifier, In_test, Out_test)\n print(scores.mean())\n RF_Confusion_Matrix = confusion_matrix(Out_test, predictions)\n print(RF_Confusion_Matrix)\n plot_confusion_matrix(RF_Confusion_Matrix)\n print()\n\n # Decision Tree Classifier\n print(\"Decision Tree\")\n DT_classifier = tree.DecisionTreeClassifier()\n DT_classifier.fit(In_train, Out_train)\n predictions = RF_classifier.predict(In_test)\n print(DT_classifier.score(In_test, Out_test))\n DT_Confusion_Matrix = confusion_matrix(Out_test, predictions)\n print(DT_Confusion_Matrix)\n plot_confusion_matrix(DT_Confusion_Matrix)\n print()\n\n # K-Nearest Neighbors Classifier\n print(\"K-NN\")\n KNN_Classifier = KNeighborsClassifier()\n KNN_Classifier.fit(In_train, Out_train)\n predictions = KNN_Classifier.predict(In_test)\n print(KNN_Classifier.score(In_test, Out_test))\n KNN_Confusion_Matrix = confusion_matrix(Out_test, predictions)\n print(KNN_Confusion_Matrix)\n plot_confusion_matrix(KNN_Confusion_Matrix)\n print()\n\n # Support Vector Machines\n print(\"Support Vector Machines\")\n SVM_Classifier = svm.SVC()\n SVM_Classifier.fit(In_train, Out_train)\n predictions = KNN_Classifier.predict(In_test)\n print(SVM_Classifier.score(In_test, Out_test))\n SVM_Confusion_Matrix = confusion_matrix(Out_test, predictions)\n print(SVM_Confusion_Matrix)\n plot_confusion_matrix(SVM_Confusion_Matrix)\n print()\n\n return NB_classifier", "def _predict_scores_fixed(self, X, **kwargs):\n raise NotImplementedError", "def predict_from(self, inputs, to_layers):", "def classification(features, scores, n_classes, model_type=0, save_path='results/',\n lr=.01, batch_size=10, n_epochs=20, test_size=.3,\n verbose=False, save_results=False, normalize=True):\n # features, scores = read_data_from_csv()\n verbose_opc = 0\n if verbose:\n print(\"[INFO] Shuffle Data\")\n verbose_opc = 1\n\n features, scores = shuffle(features, scores, random_state=0)\n\n if normalize:\n if verbose:\n print(\"[INFO] Normalizing Data\")\n scaler = StandardScaler()\n scaler.fit(features)\n features = scaler.transform(features)\n\n if verbose:\n print(\"[INFO] Splitting data into train and test sets\")\n x_train, x_test, y_train, y_test = train_test_split(features, scores, test_size=test_size)\n\n\n\n if verbose:\n print(\"[INFO] Creating the machine learning model\")\n\n model = None\n if model_type == 0:\n model = res_model(x_train.shape[1:], n_classes)\n elif model_type == 1:\n model = simple_model(x_train.shape[1:], n_classes)\n elif model_type == 2:\n model = sklearn.svm.SVC(gamma='auto')\n elif model_type == 3:\n model = RandomForestClassifier()\n elif model_type == 4:\n model = AdaBoostClassifier()\n elif model_type == 5:\n model = xgb.XGBClassifier(objective=\"multi:softprob\", random_state=42)\n\n h = None\n if model_type >= 0 and model_type <= 1:\n # classes 0.0 ,0.5, 1.0, 1.5, 2.0\n y_cat_train = to_categorical(y_train, n_classes)\n y_cat_test = to_categorical(y_test, n_classes)\n\n model.compile(loss=\"logcosh\",\n #optimizer=keras.optimizers.SGD(lr=lr, momentum=.3),\n optimizer=\"adamax\",\n metrics=['accuracy'])\n\n h = model.fit(x_train, y_cat_train,\n batch_size=batch_size,\n epochs=n_epochs,\n validation_data=(x_test, y_cat_test),\n verbose=verbose_opc)\n\n evaluate_model(x_test, y_cat_test, batch_size, model, n_epochs, h, n_classes, folder_name=save_path,\n save_results=save_results, is_rna=True)\n else:\n model.fit(x_train, y_train)\n\n evaluate_model(x_test, y_test, batch_size, model, n_epochs, h, n_classes, folder_name=save_path,\n save_results=save_results)\n\n return model", "def predict_and_acc(model, test_data):\n test_data_f = format_data(test_data, 10)\n y_predicted = model.predict(test_data_f[0])\n predicted_classes = np.argmax(y_predicted, axis=1)\n true_classes = np.argmax(test_data_f[1], axis=1)\n acc = metrics.accuracy(predicted_classes, true_classes)\n return acc, predicted_classes, y_predicted", "def naive_forecasting(x_test, y_test):\n y_pred = np.concatenate([x_test[:,-1].reshape((TESTING_BATCH_SIZE,1,N_INPUT_FEATURES)) for x in range(N_PREDICTIONS)],axis=1)\n y_pred = y_pred[:,:,:N_OUTPUT_FEATURES]\n return np.mean(keras.losses.mean_squared_error(y_test,y_pred)), y_pred", "def test(self, dataset): \n predictions = np.zeros(len(dataset), int)\n \n accuracy = self.random_forest.score(dataset[:,:-1], dataset[:,-1]) # Predict and compute accuracy.\n predictions = self.predict(dataset[:,:-1]) # Predict and return list of predictions.\n \n return predictions, accuracy", "def train(self, features, labels):\n self._clf.fit(features, labels)", "def train(self, features, labels):\n self._clf.fit(features, labels)", "def train(self):\n feature = Feature(trained=False)\n classifier = LogisticRegression(\n penalty='l2',\n max_iter=100,\n solver='liblinear',\n random_state=self.RAND_SEED)\n\n true_labels = []\n predicted_labels = []\n\n for subj in self.subjects:\n print(subj)\n # preprocess training and testing set\n self.dataset_gen(subject=subj, valid=False)\n\n # train and predict\n pipeline_steps = [('vectorized', feature.vector)]\n if self.istfidf:\n pipeline_steps.append(('tf-idf', feature.tfidftransform))\n if self.islda == 'small':\n pipeline_steps.append(('lda', feature.ldatransform_small))\n elif self.islda == 'large':\n pipeline_steps.append(('lda', feature.ldatransform_large))\n else:\n pass\n if self.isnorm:\n pipeline_steps.append(('scalar', StandardScaler(with_mean=False)))\n pipeline_steps.append(('clf', classifier))\n model = Pipeline(steps=pipeline_steps)\n\n model.fit(self.X_train, self.y_train)\n\n predicted = model.predict(self.X_test)\n # hamming\n predicted_labels.append(predicted)\n true_labels.append(self.y_test)\n\n true_matrix, pred_matrix = np.array(true_labels, int).T, np.array(predicted_labels, int).T\n true_matrix[true_matrix == -1] = 0\n pred_matrix[pred_matrix == -1] = 0\n\n evaluation = Evaluation(self.subjects)\n evaluation.model_evaluate(true_matrix=true_matrix, pred_matrix=pred_matrix, model_name=self.modelname)", "def train_all(X_train_fuse, Y_train, X_dev_fuse, Y_dev, R_train, R_dev, hyperparams):" ]
[ "0.69236994", "0.6804752", "0.66647077", "0.6594891", "0.6577277", "0.6555848", "0.6513031", "0.65010685", "0.64928365", "0.6475141", "0.6472414", "0.64383626", "0.6421993", "0.6405277", "0.6402431", "0.63951135", "0.63951135", "0.638529", "0.63590425", "0.63344693", "0.6332913", "0.632656", "0.6322379", "0.63163084", "0.63130146", "0.6310513", "0.6304223", "0.62865615", "0.6284346", "0.62749046", "0.6273323", "0.62686884", "0.6265518", "0.62627494", "0.62558705", "0.6255267", "0.6252295", "0.6251279", "0.62428814", "0.62406427", "0.62405944", "0.6234917", "0.62346977", "0.6230593", "0.62283224", "0.6227546", "0.6226482", "0.62161046", "0.6211467", "0.6204356", "0.62005335", "0.61931014", "0.6189045", "0.6184911", "0.6183926", "0.6183926", "0.6183926", "0.6181176", "0.6180825", "0.61806434", "0.6175619", "0.61746114", "0.6170426", "0.6170002", "0.6162084", "0.61554384", "0.61552787", "0.61534184", "0.6150228", "0.6145541", "0.6145204", "0.6141459", "0.6139296", "0.6131766", "0.61309814", "0.61278117", "0.6126416", "0.61232823", "0.6113428", "0.61132824", "0.61120075", "0.6109258", "0.61072445", "0.61035675", "0.610021", "0.6099764", "0.6096013", "0.60924363", "0.6090234", "0.6090214", "0.6088262", "0.60866445", "0.6085668", "0.608523", "0.60848796", "0.6084781", "0.60847473", "0.60811394", "0.60811394", "0.6076899", "0.6076666" ]
0.0
-1
Initialize monitor with name and units.
def __init__(self, pvname: str, units: str, controller: Controller) -> None: self.units = units.split(":") self.pvname = pvname self.controller = controller
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __initializeMonitor( self ):\n if self.__moduleProperties[ 'standalone' ]:\n self.monitor = gMonitor\n else:\n self.monitor = MonitoringClient()\n self.monitor.setComponentType( self.monitor.COMPONENT_AGENT )\n self.monitor.setComponentName( self.__moduleProperties[ 'fullName' ] )\n self.monitor.initialize()\n self.monitor.registerActivity( 'CPU', \"CPU Usage\", 'Framework', \"CPU,%\", self.monitor.OP_MEAN, 600 )\n self.monitor.registerActivity( 'MEM', \"Memory Usage\", 'Framework', 'Memory,MB', self.monitor.OP_MEAN, 600 )\n # Component monitor\n for field in ( 'version', 'DIRACVersion', 'description', 'platform' ):\n self.monitor.setComponentExtraParam( field, self.__codeProperties[ field ] )\n self.monitor.setComponentExtraParam( 'startTime', Time.dateTime() )\n self.monitor.setComponentExtraParam( 'cycles', 0 )\n self.monitor.disable()\n self.__monitorLastStatsUpdate = time.time()", "def initialize_monitoring(monitor_name: str = LOG_NAME,\n logging_level=logging.INFO) -> logging.Logger:\n\n root_dir = os.path.abspath(os.path.dirname(__file__))\n date_postfix = datetime.today().strftime('%Y-%m-%d')\n file_name = f\"{root_dir}/{MONITORING_PATH}/{monitor_name}_{date_postfix}.log\"\n\n stream_handler = logging.StreamHandler()\n\n file_handler = logging.FileHandler(filename=file_name, mode='w+')\n\n logging.basicConfig(format=\"%(asctime)s %(message)s\",\n datefmt=\"%m/%d/%Y %I:%M:%S %p\",\n level=logging_level,\n handlers=[stream_handler, file_handler])\n\n monitor = logging.getLogger(name=monitor_name)\n return monitor", "def initMonitor(self, task, job, logPath, args={}):\n print(\"In TestMonitor.initMonitor\")\n\n self.softTimeOut = args.get('softTimeOut', None)\n self.hardTimeOut = args.get('hardTimeOut', None)", "def configureMonitor(self, monName, *posArgs, **kwargs):\n monitorRef = self._ShREEKMonitors.get(monName, None)\n if monitorRef == None:\n msg = \"Tried to configure Non-existent monitor:\"\n msg += \"\\n%s\\n\" % monName\n msg += \"Existing Monitors:\\n\"\n msg += str(self._ShREEKMonitors.keys())\n raise ShREEKException(\n msg, ClassInstance = self,\n MissingMonitor = monName,\n ValidMonitors = self._ShREEKMonitors.keys())\n\n monitorRef.addPositionalArg(*posArgs)\n monitorRef.addKeywordArg(**kwargs)\n return", "async def _async_start_monitor(self) -> None:\n if not sys.platform.startswith(\"linux\"):\n return\n info = await system_info.async_get_system_info(self.hass)\n if info.get(\"docker\"):\n return\n\n from pyudev import ( # pylint: disable=import-outside-toplevel\n Context,\n Monitor,\n MonitorObserver,\n )\n\n try:\n context = Context()\n except (ImportError, OSError):\n return\n\n monitor = Monitor.from_netlink(context)\n try:\n monitor.filter_by(subsystem=\"tty\")\n except ValueError as ex: # this fails on WSL\n _LOGGER.debug(\n \"Unable to setup pyudev filtering; This is expected on WSL: %s\", ex\n )\n return\n observer = MonitorObserver(\n monitor, callback=self._device_discovered, name=\"usb-observer\"\n )\n observer.start()\n\n def _stop_observer(event: Event) -> None:\n observer.stop()\n\n self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _stop_observer)\n self.observer_active = True", "def setMonitorParam(self, monName, *params):\n monitorRef = self._ShREEKMonitors.get(monName, None)\n if monitorRef == None:\n msg = \"Tried to configure Non-existent monitor:\"\n msg += \"\\n%s\\n\" % monName\n msg += \"Existing Monitors:\\n\"\n msg += str(self._ShREEKMonitors.keys())\n raise ShREEKException(\n msg, ClassInstance = self,\n MissingMonitor = monName,\n ValidMonitors = self._ShREEKMonitors.keys())\n monitorRef.addPositionalArg(*params)\n return", "def __init__(self, lunit=\"nm\"):\n super().__init__(lunit)", "def start(self):\n self.monitor_lc.start(self.interval)", "def test_monitor_creation(processor, measure, dialog_sleep):\n def run(measure):\n t = Thread(target=processor._start_monitors, args=(measure,))\n t.start()\n while t.is_alive():\n process_app_events()\n sleep(0.001)\n process_app_events()\n sleep(dialog_sleep)\n\n processor.engine = processor.plugin.create('engine', 'dummy')\n\n measure.add_tool('monitor', 'dummy')\n run(measure)\n assert len(processor.monitors_window.dock_area.dock_items()) == 1\n\n measure.add_tool('monitor', 'dummy2')\n run(measure)\n assert len(processor.monitors_window.dock_area.dock_items()) == 2\n\n measure.remove_tool('monitor', 'dummy2')\n run(measure)\n assert len(processor.monitors_window.dock_area.dock_items()) == 1\n\n measure.add_tool('monitor', 'dummy3')\n run(measure)\n assert len(processor.monitors_window.dock_area.dock_items()) == 2\n\n measure.add_tool('monitor', 'dummy4')\n run(measure)\n assert len(processor.monitors_window.dock_area.dock_items()) == 2\n\n processor.plugin.stop()\n assert not processor.monitors_window", "def __init__(self, name, gauge, window = Amount(1, Time.SECONDS), clock = time):\r\n self._clock = clock\r\n self._gauge = gauge\r\n self._samples = []\r\n self._window = window\r\n NamedGauge.__init__(self, '%s_per_%s%s' % (name, window.amount(), window.unit()))", "def test_initialize_mutornadmon(self, mutornadomon_mock):\n result = initialize_mutornadomon(sentinel.application,\n host_limit='test')\n monitor_inst = mutornadomon_mock.MuTornadoMon.return_value\n\n # initialize_mutornadomon() should return the monitor instance\n self.assertEqual(result, monitor_inst)\n\n # MuTornadoMon was created with monitor config values\n mutornadomon_mock.MuTornadoMon.assert_called_once_with(\n host_limit='test')\n\n # Monitor instance was registered with tornado application\n monitor_inst.register_application.assert_called_once_with(\n sentinel.application)", "def start_monitor(self, collector):\n pass", "def __init__(self):\n self._monitor_lock = threading.Lock() # type: threading.Lock", "def parse_monitor(self):\n return DEFAULT_MONITOR", "def __init__(self, task=\"example\", test=False):\n # Set up the calling task that set up the monitor and if this is a test instance\n self.test = test\n self.task = task\n \n # Set the callbacks and monitors\n self.wx.callback(windmon)\n self.wx.monitor()\n\n self.ok2open.callback(okmon)\n self.ok2open.monitor()\n\n self.dmtimer.callback(dmtimemon)\n self.dmtimer.monitor()\n\n self.countrate.callback(countmon)\n self.countrate.monitor()\n \n self.fwhm.callback(fwhmmon)\n self.fwhm.monitor()\n \n self.teqmode.monitor()\n self.vmag.monitor()\n self.ldone.monitor()\n self.counts.monitor()\n self.decker.monitor()\n self.mv_perm.monitor()\n self.chk_close.monitor()\n\n self.sunel.monitor()\n self.aaz.monitor()\n self.ael.monitor()\n self.fspos.monitor()\n self.rspos.monitor()\n self.aafocus.monitor()\n\n # Grab some initial values for the state of the telescope\n \n self.wx.poll()\n self.fwhm.poll()\n self.countrate.poll()\n self.ok2open.poll()", "def __init__(self, monitor='val_loss', min_delta=0,\n patience=0, verbose=0,\n mode='auto', start_epoch=100):\n super().__init__(monitor=monitor, min_delta=min_delta,\n patience=patience, mode=mode,\n verbose=verbose)\n self.start_epoch = start_epoch", "def __init__(self, name = 'REMOTE STATION', typ = 'VIRTUAL'):\n super(RemoteMeasureUnit, self).__init__(name, typ)", "def __init__(self,units=None):\n self.__units = units", "def assign_surface_monitor(self, face_name, monitor_type=\"Temperature\", monitor_name=None):\n if not monitor_name:\n monitor_name = generate_unique_name(\"Monitor\")\n oModule = self.odesign.GetModule(\"Monitor\")\n oModule.AssignFaceMonitor([\"NAME:\" + monitor_name, \"Quantities:=\", [monitor_type], \"Objects:=\", [face_name]])\n return True", "def initialize(self, process_monitor):\n self.process_monitor = process_monitor\n self.radvd = ra.DaemonMonitor(self.router_id,\n self.ns_name,\n process_monitor,\n self.get_internal_device_name)\n\n if self.router_namespace:\n self.router_namespace.create()", "def set_monitor(w_card):\n\n # standard name for the monitor interfaces\n mon_id = \"mon{}\".format(w_card.phy)\n\n if mon_id not in pyw.winterfaces():\n # this monitor interface is not set\n # then create a new one\n m_card = pyw.devadd(w_card, mon_id, 'monitor')\n\n # remove obsolete interface\n pyw.devdel(w_card)\n\n return m_card\n\n return None", "def testMonitorInitGlobalAttributes(self):\n self.assertEquals(monitor.monitor_request, None)\n self.assertEquals(monitor.monitor_memory, None)", "def __init__(self, coresys: CoreSys):\n self.coresys: CoreSys = coresys\n self.context = pyudev.Context()\n self.monitor: pyudev.Monitor | None = None\n self.observer: pyudev.MonitorObserver | None = None", "def __init__(self, name=None, silent=False, unit='ms', logger_func=None):\n self.name = name\n self.silent = silent\n self.unit = unit\n self.logger_func = logger_func", "def init():\n \n # Check if metric already present in the metric_map\n if system_power_consumption not in metric_map:\n # Create metric and add it to metric_map\n metric_map[system_power_consumption] = Gauge(system_power_consumption, \"System Power Consumption\")\n \n if psu_health not in metric_map:\n metric_map[psu_health] = Gauge(psu_health, \"PSU Overall Health\")\n \n print(\"Initialized Power Exporter...\")", "def start_monitor():\n monitor_enabled = config_json[env]['MONITOR_ENABLED']\n monitor_trigger_interval_s = int( config_json[env]['MONITOR_TRIGGER_INTERVAL_S'] )\n\n # IF SCHEDULE IS ENABLED IN CONFIG:\n if monitor_enabled == \"1\":\n\n print(\"\\nSpace Weather Service Monitor: ENABLED (running every %s seconds)\" % monitor_trigger_interval_s)\n\n # RUN INITIAL CHECK SPACE WEATHER\n processes.process_check_space_weather()\n\n # CREATE SCHEDULER W/ INTERVAL TRIGGER AND START\n scheduler = BackgroundScheduler()\n scheduler.add_job(\n func = processes.process_check_space_weather,\n trigger = IntervalTrigger( seconds = monitor_trigger_interval_s ),\n id = 'check_space_weather',\n name = 'Checking Space Weather Every 30 Seconds')\n scheduler.start()\n atexit.register( lambda: scheduler.shutdown() )\n else:\n print(\"\\nSpace Weather Service Monitor: DISABLED\")", "def __init__(self, *args, **kwargs):\n super(NetworkStat, self).__init__(*args, **kwargs)\n Clock.schedule_interval(self.init_ui, 1)", "def test_create_healthmonitor_with_mandatory_params(self):\r\n resource = 'health_monitor'\r\n cmd = healthmonitor.CreateHealthMonitor(test_cli20.MyApp(sys.stdout),\r\n None)\r\n admin_state_up = False\r\n delay = '60'\r\n max_retries = '2'\r\n timeout = '10'\r\n type = 'TCP'\r\n tenant_id = 'my-tenant'\r\n my_id = 'my-id'\r\n args = ['--admin-state-down',\r\n '--delay', delay,\r\n '--max-retries', max_retries,\r\n '--timeout', timeout,\r\n '--type', type,\r\n '--tenant-id', tenant_id]\r\n position_names = ['admin_state_up', 'delay', 'max_retries', 'timeout',\r\n 'type', 'tenant_id']\r\n position_values = [admin_state_up, delay, max_retries, timeout, type,\r\n tenant_id]\r\n self._test_create_resource(resource, cmd, '', my_id, args,\r\n position_names, position_values)", "def __init__(self, monitor_id: str, wait_time: int, run_interval: bool = False,\n custom_logger: Optional[Logger] = None, ping_interval: int = 60, debug: bool = False,\n debug_logger: Optional[Logger] = None):\n super().__init__(debug, debug_logger)\n # set meta flags\n self.active = False\n self.finished = False\n self.started = False\n\n # store parameters\n self.monitor_id = monitor_id\n self.wait_time = wait_time\n self.ping_interval = ping_interval\n self.run_interval = run_interval\n\n # init logger\n if custom_logger is not None:\n self.logger = custom_logger\n else:\n self.logger = Logger(self.__class__.__name__)\n # handle ping interval issues\n if wait_time < ping_interval:\n self.logger.info(f\"WARNING, monitor wait time {ping_interval} is longer than {wait_time} - overriding\")\n self.ping_interval = wait_time\n # runtime variables\n self.next_execution = 0", "async def _start_service_monitor(cls):\n cls.service_monitor = Monitor()\n await cls.service_monitor.start()", "def run(self):\n self.monitor.start()", "def __init__(self, width: int, height: int, interface: DisplayInterface):\n self.__width = width\n self.__height = height\n\n self.__interface = interface\n\n self.__sleeping = True\n\n self.init()", "def __init__(self, name='demo'):\n init()\n joystick.init()\n for i in range(joystick.get_count()):\n joystick.Joystick(i).init()\n\n State.game = util.load_cfg(name)\n State.clock = Clock(10, State.game['frame_rate'])\n State.window = display.set_mode(State.game['screen_size'])\n\n self._last_joystick_action = None\n self.create_screens()", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(mav_monitor, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.battery_voltage is None:\n self.battery_voltage = 0.\n if self.flight_mode_ll is None:\n self.flight_mode_ll = ''\n if self.state_estimation is None:\n self.state_estimation = ''\n if self.position_control is None:\n self.position_control = ''\n if self.serial_interface_enabled is None:\n self.serial_interface_enabled = False\n if self.serial_interface_active is None:\n self.serial_interface_active = False\n if self.flight_time is None:\n self.flight_time = 0.\n if self.cpu_load is None:\n self.cpu_load = 0.\n if self.motor_status is None:\n self.motor_status = ''\n if self.gps_status is None:\n self.gps_status = ''\n if self.gps_num_satellites is None:\n self.gps_num_satellites = 0\n if self.have_SSDK_parameters is None:\n self.have_SSDK_parameters = False\n if self.timesync_offset is None:\n self.timesync_offset = 0.\n if self.rc_channel is None:\n self.rc_channel = [0,0,0,0,0,0,0,0]\n if self.control_axes is None:\n self.control_axes = [0,0,0,0,0,0]\n if self.control_buttons is None:\n self.control_buttons = []\n if self.latitude is None:\n self.latitude = 0.\n if self.longitude is None:\n self.longitude = 0.\n if self.altitude is None:\n self.altitude = 0.\n if self.pressure_height is None:\n self.pressure_height = 0.\n if self.velocity_x is None:\n self.velocity_x = 0.\n if self.velocity_y is None:\n self.velocity_y = 0.\n else:\n self.header = std_msgs.msg.Header()\n self.battery_voltage = 0.\n self.flight_mode_ll = ''\n self.state_estimation = ''\n self.position_control = ''\n self.serial_interface_enabled = False\n self.serial_interface_active = False\n self.flight_time = 0.\n self.cpu_load = 0.\n self.motor_status = ''\n self.gps_status = ''\n self.gps_num_satellites = 0\n self.have_SSDK_parameters = False\n self.timesync_offset = 0.\n self.rc_channel = [0,0,0,0,0,0,0,0]\n self.control_axes = [0,0,0,0,0,0]\n self.control_buttons = []\n self.latitude = 0.\n self.longitude = 0.\n self.altitude = 0.\n self.pressure_height = 0.\n self.velocity_x = 0.\n self.velocity_y = 0.", "def start_monitor_loop(self):\n read_file = read_config_file.ConfigFileReader()\n\n communication_time = read_file.get_send_communication_time()\n metrics_array = read_file.get_metrics()\n\n self.add_metrics_to_monitor_object(communication_time, metrics_array)", "def test_init(self):\n M = simulation.EventMonitor(self.G)\n self.assertTrue(hasattr(M, 't'))\n self.assertTrue(hasattr(M, 'i'))\n\n self.assertEqual(len(M.t), 0)\n self.assertEqual(len(M.i), 0)", "def startMonitor(user, queueTwoMin, queueTenMin, queueHour, queueAlerts, queueTermination, testDic = None):\n Monitor(user, queueTwoMin, queueTenMin, queueHour, queueAlerts, queueTermination, testDic = testDic)", "def init_meas(self, **kwargs):\n self.failed = self.completed = self.inited = False\n self.message = ''\n self.start = self.time = 0\n if 'autotest' not in kwargs: # do not establish communication with the device for autotest tasks\n try:\n self.com = DTSerialCom() # serial communication instance (initialised only once as DTSerialCom is singleton)\n except DTComError as exc:\n self.set_com_error(exc)\n return self\n for par in kwargs:\n if par in self.parameters:\n self.parameters[par] = kwargs[par]\n if not self.check_all_parameters():\n self.set_error('Ошибка ввода параметров' if dtg.LANG == 'ru' else 'Parameter enter error')\n for res in self.results:\n self.results[res] = None\n return self", "def newMonitor(self, monitorName, monitorType):\n if monitorName in self._ShREEKMonitors.keys():\n msg = \"Tried to add Duplicate monitor:\\n\"\n msg += \"%s\\n\" % monitorName\n msg += \"To ShREEKInterface, existsing names:\\n\"\n msg += str(self._ShREEKMonitors.keys())\n raise ShREEKException(\n msg, ClassInstance = self,\n DuplicateName = monitorName,\n ExistingNames = self._ShREEKMonitors.keys())\n \n newMonitor = ShREEKMonitorCfg(MonitorName = monitorName,\n MonitorType = monitorType)\n \n self._ShREEKMonitors[monitorName] = newMonitor\n self._ShREEKConfig.addMonitorCfg(newMonitor)\n return", "def __init__(self, coresys: CoreSys):\n self.coresys: CoreSys = coresys\n self._devices: dict[str, Device] = {}\n self._udev = pyudev.Context()\n\n self._montior: HwMonitor = HwMonitor(coresys)\n self._helper: HwHelper = HwHelper(coresys)\n self._policy: HwPolicy = HwPolicy(coresys)\n self._disk: HwDisk = HwDisk(coresys)", "def __init__(self,\n measure_name_fmix: str,\n measure_name_emis: str,\n database: str):\n super().__init__()\n self._measurements[self.KEY_FMIX] = Measurement(name=measure_name_fmix,\n unit=self.UNIT_FMIX,\n database=database)\n self._measurements[self.KEY_EMIS] = Measurement(name=measure_name_emis,\n unit=self.UNIT_EMIS,\n database=database)", "def __init__(self,initial_meter_reading, initial_date):\r\n\t\tself.initial_meter_reading = initial_meter_reading\r\n\t\tself.initial_date = initial_date\r\n\t\tself.total_units_consumed = 0\r\n\t\tself.total_amount_spent = 0", "def setMonitorOption(self, monName, **options):\n monitorRef = self._ShREEKMonitors.get(monName, None)\n if monitorRef == None:\n msg = \"Tried to configure Non-existent monitor:\"\n msg += \"\\n%s\\n\" % monName\n msg += \"Existing Monitors:\\n\"\n msg += str(self._ShREEKMonitors.keys())\n raise ShREEKException(\n msg, ClassInstance = self,\n MissingMonitor = monName,\n ValidMonitors = self._ShREEKMonitors.keys())\n monitorRef.addKeywordArg(**options)\n return", "def _init_system(*args):\n __set_time_elements(args[0], args[1])\n __set_control_elements(args[0], args[2], args[3])\n __set_sensor_elements(args[0], args[4], args[5], args[6], args[7])", "def __init__(__self__, *,\n metrics: Optional[pulumi.Input['ManagedClusterAzureMonitorProfileMetricsArgs']] = None):\n if metrics is not None:\n pulumi.set(__self__, \"metrics\", metrics)", "def __init__(__self__, *,\n component_config: Optional[pulumi.Input['MonitoringComponentConfigArgs']] = None,\n managed_prometheus_config: Optional[pulumi.Input['ManagedPrometheusConfigArgs']] = None):\n if component_config is not None:\n pulumi.set(__self__, \"component_config\", component_config)\n if managed_prometheus_config is not None:\n pulumi.set(__self__, \"managed_prometheus_config\", managed_prometheus_config)", "def init_meter(self, loss_meters, elbo_meters):\n if loss_meters is None:\n self.train_loss_meter = RunningAverageMeter()\n self.val_loss_meter = RunningAverageMeter(0.5)\n else:\n self.train_loss_meter = loss_meters[0]\n self.val_loss_meter = loss_meters[1]\n\n if elbo_meters is None:\n self.train_elbo_meter = RunningAverageMeter()\n self.val_elbo_meter = RunningAverageMeter(0.5)\n else:\n self.train_elbo_meter = elbo_meters[0]\n self.val_elbo_meter = elbo_meters[1]", "def __init__(self, name: str, bus_number: int, device_id: int):\r\n self.name = name\r\n self.bus_number = bus_number\r\n self.bus_number_string = f\"bus.{bus_number}\"\r\n self.device_id = device_id\r\n self.average_temp = \"unknown\"", "def test_create_healthmonitor_with_all_params(self):\r\n resource = 'health_monitor'\r\n cmd = healthmonitor.CreateHealthMonitor(test_cli20.MyApp(sys.stdout),\r\n None)\r\n admin_state_up = False\r\n delay = '60'\r\n expected_codes = '200-202,204'\r\n http_method = 'HEAD'\r\n max_retries = '2'\r\n timeout = '10'\r\n type = 'TCP'\r\n tenant_id = 'my-tenant'\r\n url_path = '/health'\r\n my_id = 'my-id'\r\n args = ['--admin-state-down',\r\n '--delay', delay,\r\n '--expected-codes', expected_codes,\r\n '--http-method', http_method,\r\n '--max-retries', max_retries,\r\n '--timeout', timeout,\r\n '--type', type,\r\n '--tenant-id', tenant_id,\r\n '--url-path', url_path]\r\n position_names = ['admin_state_up', 'delay',\r\n 'expected_codes', 'http_method',\r\n 'max_retries', 'timeout',\r\n 'type', 'tenant_id', 'url_path']\r\n position_values = [admin_state_up, delay,\r\n expected_codes, http_method,\r\n max_retries, timeout,\r\n type, tenant_id, url_path]\r\n self._test_create_resource(resource, cmd, '', my_id, args,\r\n position_names, position_values)", "def __init__(self, machine):\n super().__init__(machine)\n self._displays = set()\n self._display_flash_task = None", "def __init__(self, measure):\n self.measure = measure # Dictionary of the measurement steps\n self.devices = {} # Dictionary holding all the devices\n self.output_devices = [] # List of devices with output capabilities\n self.daqs = {} # Dictionary that holds for each daq the inputs and outputs.\n self.rotation_stages = [] # If there are rotation stages present, they will show up in this list.\n # This short block is going to become useful in the future, when interfacing with a GUI\n for d in self.measure:\n setattr(self, d, self.measure[d])", "def monitor(self, rms):\n pass", "def __init__(self):\n self.wnd = WindSensor()", "def __init__(self, name, unique_id, hass, config):\n self._name = name\n self._unique_id = unique_id\n self._hass = hass\n self._cfg = config\n self._icon = 'mdi:power-socket'\n self._available = False\n self._state = None\n self._state_attrs = {\n # ATTR_TEMPERATURE: None,\n }\n self._skip_update = False", "def monitor(self):\n if self._monitor is None:\n self._monitor = Monitor(self)\n return self._monitor", "def create_local_monitor(cls, parent_logger: Logger, metrics: List[str], engine,\n mtab_path=None) -> BaseLocalMonitor:\n log = parent_logger.getChild(cls.__name__)\n if not mtab_path:\n if is_windows():\n return StandardLocalMonitor(parent_logger, metrics, engine)\n mtab_path = '/etc/mtab'\n cgroups_version, cgroups_fs_path = cls._detect_cgroup_info(log, mtab_path)\n if cgroups_version == 2:\n return Cgroups2LocalMonitor(cgroups_fs_path, parent_logger, metrics, engine)\n elif cgroups_version == 1:\n return Cgroups1LocalMonitor(cgroups_fs_path, parent_logger, metrics, engine)\n else:\n return StandardLocalMonitor(parent_logger, metrics, engine)", "def from_dict(cls, _dict: Dict) -> 'Monitor':\n args = {}\n if 'id' in _dict:\n args['id'] = _dict.get('id')\n if 'name' in _dict:\n args['name'] = _dict.get('name')\n if 'description' in _dict:\n args['description'] = _dict.get('description')\n if 'type' in _dict:\n args['type'] = _dict.get('type')\n if 'port' in _dict:\n args['port'] = _dict.get('port')\n if 'interval' in _dict:\n args['interval'] = _dict.get('interval')\n if 'retries' in _dict:\n args['retries'] = _dict.get('retries')\n if 'timeout' in _dict:\n args['timeout'] = _dict.get('timeout')\n if 'method' in _dict:\n args['method'] = _dict.get('method')\n if 'path' in _dict:\n args['path'] = _dict.get('path')\n if 'headers' in _dict:\n args['headers_'] = [HealthcheckHeader.from_dict(x) for x in _dict.get('headers')]\n if 'allow_insecure' in _dict:\n args['allow_insecure'] = _dict.get('allow_insecure')\n if 'expected_codes' in _dict:\n args['expected_codes'] = _dict.get('expected_codes')\n if 'expected_body' in _dict:\n args['expected_body'] = _dict.get('expected_body')\n if 'created_on' in _dict:\n args['created_on'] = _dict.get('created_on')\n if 'modified_on' in _dict:\n args['modified_on'] = _dict.get('modified_on')\n return cls(**args)", "def initialize(self):\n\n # --------- BEGIN YOUR CODE ----------\n\n # This is exactly the same as Human.initialize, just copy the code over\n\n # --------- END YOUR CODE ----------\n pass", "def __init__(self, start, step, size, unit='SECOND'):\n self.unit = unit\n self.start = start\n self.step = step\n self.size = size", "def __init__(self, name, unit=\"F\") -> None:\n\n super().__init__(name)\n self.unit: str = unit\n \"\"\"The temperature unit.\"\"\"", "def _start_monitor(instance=\"default\"):\n global logger_ic\n logger_ic = infrasim_log.get_logger(LoggerType.ipmi_console.value, instance)\n logger_ic.info(\"ipmi-console monitor thread starts to run.\")\n monitor_thread = threading.Thread(target=monitor, args=(instance,))\n monitor_thread.setDaemon(True)\n monitor_thread.start()", "def initUI(self):\n self.logger.debug('Setting up the Measurement GUI')\n self.setWindowTitle(self.title)\n\n self.show()\n\n self.make_combobox_scanner()\n self.make_combobox_movements()\n self.make_combobox_configurate()\n self.make_combobox_basic()", "def __init__(self, config):\n self.config = config\n self.status = {\n 'serial':'None',\n 'timestamp':'None',\n 'uptime':'None',\n 'free_disk_space_sdcard':'None',\n 'free_disk_space_stick':'None',\n 'wwan_reception':'None',\n 'log':'None',\n }\n self.collect()", "def __init__ (self, size, name):\n\n self.size = size\n self.name = name\n self.units = [1 for x in range(size)]", "def __init__(self, name):\r\n super(SystemDescription, self).__init__()\r\n self.name = name", "def init(self):\n imageDim = u.getDimImage(self.length, 0, 0, 78) # 54.5, 42.3, 66.17\n self.imageInfo['ratio'] = u.getRatio(self.imageInfo['shape'],\n imageDim)\n\n self.measuring = pymeasuring.Measuring(self.imageInfo, self.length)\n\n # rospy.loginfo(\"dims of image [mm]: \" + str(imageDim))\n # rospy.loginfo(\"ratios [mm/px]: \" + str(self.imageInfo['ratio']))\n # rospy.loginfo(\"shape [px]: \" + str(self.imageInfo['shape']))\n rospy.loginfo('init of measuring object is complete.')", "def start(self) -> None:\n self.bus.subscribe(\"cache:ready\", self.revive)\n self.bus.subscribe(\"scheduler:add\", self.add)\n self.bus.subscribe(\"scheduler:persist\", self.persist)\n self.bus.subscribe(\"scheduler:remove\", self.remove)\n self.bus.subscribe(\"scheduler:upcoming\", self.upcoming)\n self.scheduler = sched.scheduler(time.time, time.sleep)\n cherrypy.process.plugins.Monitor.start(self)", "def __init__(self, name, level):\n\t\tself.name = name\n\t\tself.level = level\n\t\tself.human = False\n\t\tself.timer = None\n\t\tself.ai = game_engine(level)", "def monitor(self, *args, **kwargs):\n kwargs['logger'] = self\n return Monitor(*args, **kwargs)", "def __init__(self, hdw=['Soundcard'], devicename='dev1'):\n self.debugFlag = False\n self.task = None # NI Task\n self.required_hardware = hdw # Require specific hardware \n self.hardware = [] # list of hardware actually found on this system\n self.find_hardware(device_info={'devicename': devicename}) # population the self.hardware list", "def initialise_screen(self):\n self.objects.append(\n pgzero.actor.Actor('screen_background', topleft=(-1, 0))\n )\n self.vessel = gauge.Gauge(\n name='vessel',\n foreground='vessel_front',\n back_colour=S['vessel-back-colour'],\n front_colour=S['vessel-water-colour'],\n size=S['vessel-size'],\n value=20,\n orientation=gauge.Gauge.VERTICAL,\n bar_offset=S['vessel-bar-offset'],\n )\n self.vessel.pos = S['vessel-position']\n self.objects.append(self.vessel)\n #\n # The panels showing the individual people\n self.health_panels = {}\n for idx, name in enumerate('abcde'):\n panel = healthpanel.HealthPanel(name, self)\n panel.pos = (S['panel-initial-x'] + idx * S['panel-dx'], S['panel-initial-y'])\n self.objects.append(panel)\n self.health_panels[name] = panel\n #\n self.tabbed = tabbed.Tabbed()\n self.objects.append(self.tabbed)\n #\n self.clock = game.clock.Clock('clock', self)\n self.clock.pos = S['clock-pos']\n self.objects.append(self.clock)\n self.end_of_day = None\n #\n self.awaiting_conversations = set()\n self.deaths = {}", "def __init__(self, mlhost, mlport, screen_width, screen_height):\n pymlgame.init()\n self.screen = pymlgame.Screen(mlhost, mlport, screen_width, screen_height)\n self.clock = pymlgame.Clock(15)\n\n self.reset()", "def initialize(self):\n watch_tv = self.args['watch_tv']\n cleaning = self.args['cleaning']\n self.sensor_living = self.get_app('globals').sensor_living # type: Sensor\n self.sensor_bedroom = self.get_app('globals').sensor_bedroom # type: Sensor\n self.sensor_spare = self.get_app('globals').sensor_spare # type: Sensor\n self.listen_state(self.watching_tv, watch_tv, new=\"on\")\n self.listen_state(self.stop_watching, watch_tv, new=\"off\")\n self.listen_state(self.clean_on, cleaning, new='on')\n self.listen_state(self.clean_off, cleaning, new='off')", "def __init__(self, title=\"\", units=\"\", tunits=\"ns\", ax=None, talk=False):\n super().__init__(ax=ax, talk=talk)\n self.title = title\n self.units = units\n self.tunits = tunits", "def test_MCE_sysfs_initialized(self):\n num_of_mc_folders = self.get_num_of_mc_folders()\n code, num_cpus, err = systeminfo.Run([\"nproc\"])\n if int(num_of_mc_folders) == int(num_cpus):\n self.log.info(\"MCE sysfs device initialization successful\")\n else:\n self.fail(\"MCE sysfs device initialization failed\")", "def event_monitor(self, event_monitor_path=\"\", action=\"start\"):\n device_name = self.find_iio_device_name()\n if not event_monitor_path:\n self.is_bin_exist(\"iio_event_monitor\", silent_discard=False)\n event_monitor_path = \"iio_event_monitor\"\n if action == \"start\":\n self.console.runcmd(\n f\"{event_monitor_path} {device_name} &\",\n err_msg=\"Event Monitor Initialisation Failed\",\n timeout=50,\n )\n elif action == \"stop\":\n self.console.runcmd(\n f\"pidof {event_monitor_path} {device_name}\", expected=\"\\r\\n\"\n )\n pid_no = self.console.output()\n if pid_no:\n self.console.runcmd(f\"kill -9 {pid_no}\")\n else:\n assert False, \"Not a valid action for event_monitor\"", "def __init__(self):\n self.base_dir = '/sys/bus/w1/devices/'\n self.device_folder = glob.glob(self.base_dir + '28*')[0]\n self.device_file = self.device_folder + '/w1_slave'", "def set_current_units(units=None):\n manager = Manager() \n if units is not None:\n # set units using a supplied dictionary\n for utype in units:\n if utype in manager.allowed_utypes:\n un = units[utype]\n # handle the identity of \"frequency\" and \"energy\"\n if utype==\"frequency\":\n utype=\"energy\"\n un = units[\"frequency\"]\n \n manager.set_current_units(utype,un)\n else:\n raise Exception(\"Unknown units type %s\" % utype)\n\n else:\n # reset units to the default\n for utype in manager.internal_units:\n if utype in manager.allowed_utypes:\n manager.set_current_units(utype,manager.internal_units[utype])\n else:\n raise Exception(\"Unknown units type %s\" % utype)", "def __init__(self, name=\"\", description=\"\", time_units=\"s\", len_units=\"m\",\n pump_units=\"m3/s\"):\n\n # Set general info\n self._type = 1 # pumping well id\n self.parameters = {'full': True,\n 'rw': 1.,\n 'd': 0.,\n 'l': 1.}\n self.time_units = time_units\n self.len_units = len_units\n self.pump_units = pump_units\n\n # Create pumping well data\n self.pumprate = _Data(dtype=0, name=name, description=description)\n self.pumprate.set_units(self.time_units, self.pump_units)\n\n # Set observation wells and piezometers\n self.wells = []", "def init():\n global screen_manager\n screen_manager = ScreenManager(transition=SlideTransition())", "def monitor(self, **kwargs):\n self.show_info(monitor=True, **kwargs)", "def __init__ (self, name=\"TrigEFMissingETOnlineMonitoring_alt\"):\n super(TrigEFMissingETOnlineMonitoring_alt, self).__init__(name)\n self.defineTarget(\"Online\")\n # measurement\n self.Histograms = [ hEx_log, hEy_log, hEz_log, hMET_log, hSumEt_log ]\n self.Histograms += [ hMET_lin, hMETStatus ]\n self.Histograms += [ hMETPhi ]", "def monitor(self) -> HwMonitor:\n return self._montior", "def __init__ (self, name=\"TrigEFMissingETOnlineMonitoring\"):\n super(TrigEFMissingETOnlineMonitoring, self).__init__(name)\n self.defineTarget(\"Online\")\n # measurement\n self.Histograms = [ hEx_log, hEy_log, hEz_log, hMET_log, hSumEt_log ]\n self.Histograms += [ hMET_lin, hSumEt_lin ]\n self.Histograms += [ hXS, hMETPhi, hMETStatus]\n self.Histograms += [ hCompEx, hCompEy, hCompEz, hCompEt, hCompSumEt, hCompSumE ]\n self.Histograms += [ hCompEt_lin, hCompSumEt_lin ]\n # timers\n# self.Histograms += [ hTotalTime, hTotalTimeRegSel, hTotalTimeLoadCol, hTotalTimeLoop ]\n self.Histograms += [ hTotalTimeRegSel, hTotalTimeLoadCol, hTotalTimeLoop ]\n self.Histograms += [ hTotalTimeEM, hTotalTimeHEC, hTotalTimeTile ]", "def __init__(self, resolution):\n # Initialize the base class, so that the object can run on its own\n # thread.\n super(LocalDisplay, self).__init__()\n # List of valid resolutions\n RESOLUTION = {'1080p' : (1920, 1080), '720p' : (1280, 720), '480p' : (858, 480)}\n if resolution not in RESOLUTION:\n raise Exception(\"Invalid resolution\")\n self.resolution = RESOLUTION[resolution]\n # Initialize the default image to be a white canvas. Clients\n # will update the image when ready.\n self.frame = cv2.imencode('.jpg', 255*np.ones([640, 480, 3]))[1]\n self.stop_request = Event()", "def __init__(self, resolution):\n # Initialize the base class, so that the object can run on its own\n # thread.\n super(LocalDisplay, self).__init__()\n # List of valid resolutions\n RESOLUTION = {'1080p' : (1920, 1080), '720p' : (1280, 720), '480p' : (858, 480)}\n if resolution not in RESOLUTION:\n raise Exception(\"Invalid resolution\")\n self.resolution = RESOLUTION[resolution]\n # Initialize the default image to be a white canvas. Clients\n # will update the image when ready.\n self.frame = cv2.imencode('.jpg', 255*np.ones([640, 480, 3]))[1]\n self.stop_request = Event()", "def __init__(self, resolution):\n # Initialize the base class, so that the object can run on its own\n # thread.\n super(LocalDisplay, self).__init__()\n # List of valid resolutions\n RESOLUTION = {'1080p' : (1920, 1080), '720p' : (1280, 720), '480p' : (858, 480)}\n if resolution not in RESOLUTION:\n raise Exception(\"Invalid resolution\")\n self.resolution = RESOLUTION[resolution]\n # Initialize the default image to be a white canvas. Clients\n # will update the image when ready.\n self.frame = cv2.imencode('.jpg', 255*np.ones([640, 480, 3]))[1]\n self.stop_request = Event()", "def __init__(self,usb_ser_port_num,sm_gpib_addr,dmm_gpib_addr):\n self.initialize_elexol(usb_ser_port_num)\n\n #find the number of shift registers installed in test system\n print \"Counting number of shift registers connected to fixture...\"\n num_shift_reg_found = self.count_shift_reg()\n\n if num_shift_reg_found < 1:\n print \"No shift registers present... Exiting program.\n exit()\n else:\n print \"found %d shift registers\"%self.num_registers\n\n #initialize all of the relays to the normally closed state\n print \"\\nInitializing %d shift registers\"%self.num_registers\n self.initialize_relays()\n print \"initialization done.\"\n\n #create instances of the sourcemeter and dmm for communication\n self.dmm = Instruments.DMM_34401A(\"GPIB::22\",\"meter\")\n self.sm = Instruments.sourcemeter_2400(\"GPIB::4\",\"sourcemeter\")", "def startRepeat(self, monitor):\n request = {\"msgid\": uuid.uuid4().hex, \"status\": [\"command\", 0], \"command\": {\"monitor\": monitor, \"setrunning\": \"start\"}}\n\n self.connection.outbuf += json.dumps(request) + \"\\n\"\n self.monitorState[monitor][\"state\"] = \"ok\"", "def set_units(self, units):\n self.units = units", "def __init__(self, config, sensor=None):\n if sensor is None:\n from monitor.sensor import SensorDriver\n self.sensor = SensorDriver(config.getint(CONFIG_SECTION, \"trigger_pin\"),\n config.getint(CONFIG_SECTION, \"echo_pin\"))\n else:\n self.sensor = sensor\n self.num_samples = config.getint(CONFIG_SECTION, \"num_samples\")\n self.drop_extremes = config.getboolean(CONFIG_SECTION, \"drop_extremes\")\n self.sample_delay = config.getfloat(CONFIG_SECTION, \"sample_delay\")\n self.is_running = False\n self.dist_to_bottom = config.getfloat(CONFIG_SECTION, \"distance_to_bottom\")", "def __init__(self, name, sensor_name, updater):\n self._name = name\n self._sensor_name = sensor_name\n self._sensor_type = habitica.SENSORS_TYPES[sensor_name]\n self._state = None\n self._updater = updater", "def test_cycle_monitor(hlwm, mon_num, focus_idx, delta, command):\n for i in range(1, mon_num):\n hlwm.call('add tag' + str(i))\n hlwm.call('add_monitor 800x600+' + str(i * 10))\n hlwm.call(['focus_monitor', str(focus_idx)])\n assert hlwm.get_attr('monitors.focus.index') == str(focus_idx)\n assert hlwm.get_attr('monitors.count') == str(mon_num)\n\n hlwm.call([command, delta])\n\n new_index = (focus_idx + int(delta) + mon_num) % mon_num\n assert hlwm.get_attr('monitors.focus.index') == str(new_index)", "def __init__(self, name: str):\n super().__init__()\n\n # SYNC mode not supported yet\n if cm.num_receivers() > 1:\n raise RuntimeError(\n \"Summaries with multiple receiver ordinals are currently \"\n f\"not supported. `num_receivers` was {cm.num_receivers()}\"\n )\n\n self._name = name\n\n # Variable for storing the received summaries\n self._cached_cpu_activations = []\n\n self._is_appliance = cm.is_appliance()", "def __init__(self):\n super().__init__('node_name')\n self.create_timer(0.2, self.timer_callback)\n\n self.count = 1", "def set_monitor(self, track, xclip, ident, args):\n if track in self.song().tracks and not track.is_foldable:\n if args in MON_STATES:\n track.current_monitoring_state = MON_STATES[args]\n else:\n if track.current_monitoring_state == 2:\n track.current_monitoring_state = 0\n else:\n track.current_monitoring_state += 1", "def __init__(self):\n pygame.init()\n self.rain_settings = RSettings()\n\n self.screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)\n self.rain_settings.screen_width = self.screen.get_rect().width\n self.rain_settings.screen_height = self.screen.get_rect().height\n pygame.display.set_caption(\"Raindrops\")\n\n self.rain = pygame.sprite.Group()\n\n self._create_rain()", "def __init__(self, *args: Species, units=KcalMol):\n super().__init__()\n\n for arg in args:\n assert isinstance(arg, Species)\n self.append(arg)\n\n self.units = units", "def __init__(self, sensor, temperature_resolution, humidity_resolution):\n self.sensor = sensor\n self.sensor.turnHeaterOn() \n time.sleep(1.0) # Burn off condensed stuff.\n self.sensor.turnHeaterOff() \n self.update()\n # Main Program\n #print \"------------\"\n #print \"Manfacturer ID=0x%X\"% self.sensor.readManufacturerID() \n #print \"Device ID=0x%X\"% self.sensor.readDeviceID() \n #print \"Serial Number ID=0x%X\"% self.sensor.readSerialNumber() \n \n # change temperature resolution\n self.sensor.setTemperatureResolution(temperature_resolution)\n self.sensor.setHumidityResolution(humidity_resolution)", "def init():\n return _libsbml.SBMLUnitsConverter_init()" ]
[ "0.69244856", "0.5961898", "0.5762785", "0.5757067", "0.5748094", "0.5743669", "0.5689868", "0.5632864", "0.56286836", "0.5607507", "0.55625767", "0.5536116", "0.5534481", "0.5490145", "0.5483726", "0.5446595", "0.54104185", "0.54083985", "0.5397087", "0.539698", "0.53613365", "0.5336883", "0.5325601", "0.53191644", "0.53179896", "0.53123504", "0.53100437", "0.5307851", "0.529687", "0.5296668", "0.5287479", "0.52777916", "0.5261846", "0.5257182", "0.523942", "0.5231063", "0.5190453", "0.5155871", "0.5138382", "0.5123118", "0.51151896", "0.5054312", "0.50433046", "0.50431615", "0.50305545", "0.50240475", "0.50192124", "0.5001912", "0.49941579", "0.49941248", "0.498681", "0.49851626", "0.49826714", "0.49784777", "0.49734426", "0.4970501", "0.49689844", "0.49674293", "0.49605575", "0.49529755", "0.4946666", "0.49404454", "0.49340776", "0.4926119", "0.4918807", "0.49035442", "0.48928347", "0.48922083", "0.48920506", "0.48904657", "0.4890072", "0.48837683", "0.48831862", "0.48782757", "0.48781478", "0.48756036", "0.4864564", "0.4854529", "0.48440066", "0.4839723", "0.4838545", "0.48370856", "0.48364148", "0.48261663", "0.48233253", "0.48233253", "0.48233253", "0.4819929", "0.48140436", "0.48038754", "0.4802677", "0.4792118", "0.47908923", "0.47901264", "0.4788261", "0.47868878", "0.47849384", "0.47788814", "0.4775253", "0.47705635" ]
0.53771675
20
Collects image data via appropriate protocol and builds image data dictionary. Returns dict Dictionary mapping image components to values.
def poll(self) -> Dict[str, list]: try: value = self.controller.get_image(self.pvname) except TimeoutError: print(f"No process variable found for {self.pvname}") return DEFAULT_IMAGE_DATA # now prepare the value using method defined by the model return value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_imagenet_as_dict(self):\n real_file_path = os.path.realpath(self.map_file)\n if not os.path.exists(real_file_path):\n raise IOError(\"map file {} not exists\".format(self.map_file))\n\n label_dict = {}\n with open(real_file_path) as fp:\n line = fp.readline()\n while line:\n labels = line.split(\" \")\n label_dict[labels[1]] = labels[0]\n line = fp.readline()\n\n # get all the dir which are n02087046, n02094114, n02109525\n dir_paths = {}\n for item in label_dict:\n real_path = os.path.join(self.image_dir, label_dict[item])\n if not os.path.isdir(real_path):\n logger.warning(\"{} dir is not exist\".format(real_path))\n continue\n dir_paths[item] = real_path\n\n if not dir_paths:\n raise PathNotExistsError(\"not valid image dir in {}\".format(self.image_dir))\n\n # get the filename, label and image binary as a dict\n for label in dir_paths:\n for item in os.listdir(dir_paths[label]):\n file_name = os.path.join(dir_paths[label], item)\n if not item.endswith(\"JPEG\") and not item.endswith(\"jpg\"):\n logger.warning(\"{} file is not suffix with JPEG/jpg, skip it.\".format(file_name))\n continue\n data = {}\n data[\"file_name\"] = str(file_name)\n data[\"label\"] = int(label)\n\n # get the image data\n real_file_path = os.path.realpath(file_name)\n image_file = open(real_file_path, \"rb\")\n image_bytes = image_file.read()\n image_file.close()\n if not image_bytes:\n logger.warning(\"The image file: {} is invalid.\".format(file_name))\n continue\n data[\"image\"] = image_bytes\n yield data", "def get_data(self):\n data_str = get_cls_img(root=self.root, suffix=self.suffix)\n\n if not self.load_images:\n return data_str\n\n cls_img_data = dict.fromkeys(data_str.keys())\n for cls_ in data_str:\n temp = [0] * len(data_str[cls_])\n for i, img_name in enumerate(data_str[cls_]):\n img = _load_image(\n img_url=os.path.join(self.root, cls_, img_name),\n expand_dim=self.expand_dim\n )\n temp[i] = img\n cls_img_data[cls_] = list(temp)\n\n return cls_img_data", "def _image_data(self):\n if self.header['Image']['bytes per pixel'] == 2:\n # 16-bit unsigned integers, short\n dt = np.dtype(self._bo + 'H')\n elif self.header['Image']['bytes per pixel'] == 4:\n # 32-bit unsigned integers, int\n dt = np.dtype(self._bo + 'I')\n\n shape = [self.header['Image']['planes'],\n self.header['Image']['masses'],\n self.header['Image']['height'],\n self.header['Image']['width']]\n\n self.fh.seek(self.header['header size'])\n\n compressedfiles = (\n gzip.GzipFile,\n bz2.BZ2File,\n tarfile.ExFileObject,\n lzma.LZMAFile,\n io.BytesIO\n )\n\n # fromfile is about 2x faster than frombuffer(fh.read())\n if isinstance(self.fh, compressedfiles):\n data = np.frombuffer(self.fh.read(), dtype=dt).reshape(shape)\n else:\n data = np.fromfile(self.fh, dtype=dt).reshape(shape)\n\n # We want to have a cube of contiguous data (stacked images) for each\n # mass. Swap axes 0 and 1. Returns a view, so make full copy to make\n # data access faster.\n data = data.swapaxes(0, 1).copy()\n\n self.data = xarray.DataArray(data,\n dims=('species', 'frame', 'y', 'x'),\n coords={'species': ('species', list(self.header['label list']))},\n attrs={'unit': 'counts'})", "def images(self) -> dict:\n raise NotImplementedError", "def imgProp(img):\n\td = {}\n\td[\"shape\"] = img.shape\n\td[\"rows\"] = img.shape[0]\n\td[\"columns\"] = img.shape[1]\n\tif len(img.shape) is 3:\n\t\td[\"channels\"] = img.shape[2]\n\td[\"size\"] = img.size\n\td[\"dtype\"] = img.dtype\n\treturn d", "def make_image_dict(image):\n\n def _fetch_attrs(d, attrs):\n return dict([(a, d[a]) for a in attrs\n if a in d.keys()])\n\n # TODO(sirp): should this be a dict, or a list of dicts?\n # A plain dict is more convenient, but list of dicts would provide\n # access to created_at, etc\n properties = dict((p['name'], p['value'])\n for p in image['properties'] if not p['deleted'])\n\n image_dict = _fetch_attrs(image, db_api.IMAGE_ATTRS)\n\n image_dict['properties'] = properties\n return image_dict", "def parse_image_meta_graph(self, meta):\n\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id,\n \"original_image_shape\": original_image_shape,\n \"image_shape\": image_shape,\n \"window\": window,\n \"scale\": scale,\n \"active_class_ids\": active_class_ids,\n }\n pass", "def get_image_data(imagedir, model_kwds=dict(layer='fc2'),\n img_kwds=dict(size=(224,224)), timestamps_kwds=dict(source='auto'),\n pca_kwds=None):\n fingerprints_fn = pj(imagedir, ic_base_dir, 'fingerprints.pk')\n images_fn = pj(imagedir, ic_base_dir, 'images.pk')\n if os.path.exists(images_fn):\n print(f\"reading image arrays {images_fn} ...\")\n images = read_pk(images_fn)\n else:\n print(f\"create image arrays {images_fn}\")\n images = read_images(imagedir, **img_kwds)\n write_pk(images, images_fn)\n if os.path.exists(fingerprints_fn):\n print(f\"reading fingerprints {fingerprints_fn} ...\")\n fingerprints = read_pk(fingerprints_fn)\n else:\n print(f\"create fingerprints {fingerprints_fn}\")\n fingerprints = ic.fingerprints(images, ic.get_model(**model_kwds))\n if pca_kwds is not None:\n fingerprints = ic.pca(fingerprints, **pca_kwds)\n write_pk(fingerprints, fingerprints_fn)\n print(f\"reading timestamps ...\")\n if timestamps_kwds is not None:\n timestamps = read_timestamps(imagedir, **timestamps_kwds)\n return images, fingerprints, timestamps", "def get_data(self):\n return {\"imgID\": self.image_id}", "def __make_processing(self, img_name, abspath_dir_img, id_foot):\n data = {}\n data['data'] = ImageInfo.get_date(abspath_dir_img)\n data['total_part'] = TOTAL_PART\n data['nuvens'] = ImageInfo.get_cloud(abspath_dir_img)\n self.__make_tms(abspath_dir_img)\n data['geom'] = self.__make_footprint(abspath_dir_img, shp_out=id_foot)\n abspath_rgb, img_name_rgb = ImageInfo.get_image_rgb(\n abspath_dir_img, img_name\n )\n data['tms'] = ImageInfo.get_xml_tms(img_name_rgb)\n data['image'] = img_name_rgb\n data['quicklook'] = self.__make_png(abspath_rgb)\n data['path'] = ImageInfo.get_path(img_name)\n return data", "def compose_image_meta(image_id, image_shape, window, active_class_ids):\n meta = np.array(\n [image_id] + # size=1\n list(image_shape) + # size=3\n list(window) + # size=4 (x1, y1, x2, y2) in image cooredinates\n list(active_class_ids) # size=num_classes\n )\n return meta", "def get_image_data(self):\n raise NotImplementedError(str(type(self)) + 'does not'\n 'implement get_image.')", "def compose_image_meta(image_id, original_image_shape, image_shape,\n window, scale, active_class_ids):\n meta = np.array(\n [image_id] + # size=1\n list(original_image_shape) + # size=3\n list(image_shape) + # size=3\n list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates\n [scale] + # size=1\n list(active_class_ids) # size=num_classes\n )\n return meta", "def get_image(self, pvname):\n if self.protocol == \"ca\":\n pvname = pvname.replace(\":ArrayData_RBV\", \"\")\n nx = self.get(f\"{pvname}:ArraySizeX_RBV\")\n ny = self.get(f\"{pvname}:ArraySizeY_RBV\")\n dw = self.get(f\"{pvname}:dw\")\n dh = self.get(f\"{pvname}:dh\")\n image = self.get(f\"{pvname}:ArrayData_RBV\")\n image = image.reshape(int(nx), int(ny))\n\n elif self.protocol == \"pva\":\n # context returns np array with WRITEABLE=False\n # copy to manipulate array below\n output = self.get(pvname)\n attrib = output.attrib\n dw = attrib[\"dw\"]\n dh = attrib[\"dh\"]\n nx, ny = output.shape\n image = copy.copy(output)\n\n return {\n \"image\": [image],\n \"x\": [-dw / 2],\n \"y\": [-dh / 2],\n \"dw\": [dw],\n \"dh\": [dh],\n }", "def store_img_infos(self, msg):\n # msg is technically a ConsumerRecord that is a collections.namedtuple, see:\n # https://github.com/dpkp/kafka-python/blob/master/kafka/consumer/fetcher.py#L30\n strk = str(msg['sha1'])\n self.dict_sha1_infos[strk] = dict()\n for key in msg:\n # dumps json of 'img_info'\n # We actually need that only for DIG...\n if key == \"img_info\":\n self.dict_sha1_infos[strk][key] = json.dumps(msg[key])\n else:\n # discard 'img_buffer' (if it exists?...), and 'sha1'\n # if k != \"img_buffer\" and k != \"sha1\":\n # self.dict_sha1_infos[strk][k] = msg[k]\n # discard 'sha1'\n if key != \"sha1\":\n self.dict_sha1_infos[strk][key] = msg[key]", "def parse_image_meta_graph(meta):\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id,\n \"original_image_shape\": original_image_shape,\n \"image_shape\": image_shape,\n \"window\": window,\n \"scale\": scale,\n \"active_class_ids\": active_class_ids,\n }", "def image_data_info(page):\n xObject = page['/Resources']['/XObject'].getObject()\n\n for obj_key in xObject:\n obj = xObject[obj_key]\n if obj['/Subtype'] == '/Image':\n width, height = (obj['/Width'], obj['/Height'])\n num_bytes = len(obj._data)\n density = num_bytes * 1.0 / (width * height)\n return {'width': width, 'height': height, 'size': num_bytes, 'density': density}\n\n return None", "def compose_image_meta(self, image_id, original_image_shape, image_shape,\n window, scale, active_class_ids):\n\n meta = np.array([image_id] + # size=1\n list(original_image_shape) + # size=3\n list(image_shape) + # size=3\n list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates\n [scale] + # size=1\n list(active_class_ids) # size=class_num\n )\n return meta\n pass", "def get_image_info_struct(nimage, path_len,\n image_id_len=None,\n wcs_len=None,\n ext_len=None,\n extra_dtype=None):\n dt = get_image_info_dtype(\n path_len,\n image_id_len=image_id_len,\n wcs_len=wcs_len,\n ext_len=ext_len,\n extra_dtype=extra_dtype,\n )\n\n data = np.zeros(nimage, dtype=dt)\n\n data['scale'] = 1.0\n\n return data", "def create_data_dict(data_dir, img_size=[25, 83]):\n print(\"Creating data dictionary\")\n print(\"- Using data at:\", data_dir)\n\n # Directories\n imgs_dir = os.path.join(data_dir, \"training/image_2\")\n labels_dir = os.path.join(data_dir, \"training/gt_image_2\")\n\n print(\"- Getting list of files\")\n # Only get the label files for road (not lane)\n label_files = glob.glob(os.path.join(labels_dir, \"*_road_*.png\"))\n\n # Create corresponding list of training image files\n img_files = list(map(lambda f: os.path.basename(f).replace(\"_road\", \"\"), label_files))\n img_files = list(map(lambda f: os.path.join(imgs_dir, f), img_files)) # absolute path\n\n n_samples = len(img_files)\n print(\"- Encountered {} samples\".format(n_samples))\n est_filesize = (n_samples*np.prod(img_size)*(3+1))/1e6\n print(\"- Estimated output filesize: {:0.3f} MB + overhead\".format(est_filesize))\n\n data = {}\n data[\"X_train\"] = np.empty([n_samples]+img_size+[3], dtype=np.uint8)\n data[\"Y_train\"] = np.empty([n_samples]+img_size, dtype=np.uint8)\n\n print(\"- Processing image files\")\n for i in range(n_samples):\n label_img = scipy.misc.imread(label_files[i])\n input_img = scipy.misc.imread(img_files[i])\n\n # PRERPOCESS THE IMAGES\n label_img = scipy.misc.imresize(label_img, img_size)\n input_img = scipy.misc.imresize(input_img, img_size)\n\n # PROCESSING LABEL IMAGE\n # Only one channel, (1=road, 0=not road)\n non_road_class = np.array([255,0,0])\n label_img = (1-np.all(label_img==non_road_class, axis=2, keepdims=False)).astype(np.uint8)\n\n # Place the images into the data arrays\n data[\"X_train\"][i] = input_img\n data[\"Y_train\"][i] = label_img\n\n print(\"- Shuffling the data\")\n np.random.seed(seed=128)\n ids = list(np.random.permutation(n_samples))\n data[\"X_train\"] = data[\"X_train\"][ids]\n data[\"Y_train\"] = data[\"Y_train\"][ids]\n\n print(\"- Done!\")\n return data", "def serialize_image(self, image):\r\n result = {\r\n 'pixels': image.tobytes(),\r\n 'size': image.size,\r\n 'mode': image.mode\r\n }\r\n return result", "def read_image_data(self):\n\n for sequence_name in self.sequence_name_list:\n sequence = self.sequences[sequence_name]\n for image_id in sequence.image_id_list:\n sequence.image_dict[image_id].image_path = '{}{}/{}'.format(self.root_dir, self.name, sequence.image_dict[image_id].filename)", "def _prepare_image_and_label(self, data):\n image = tf.io.decode_image(data['image/encoded'], channels=3)\n label = tf.io.decode_image(data['image/segmentation/class/encoded'],\n channels=1)\n height = data['image/height']\n width = data['image/width']\n image = tf.reshape(image, (height, width, 3))\n label = tf.reshape(label, (1, height, width))\n label = tf.cast(label, tf.float32)\n # Normalizes image with mean and std pixel values.\n image = input_utils.normalize_image(image)\n return image, label", "def _image_hdr(self, hdr):\n # Called ... in OpenMIMS\n d = {}\n d['header size'], d['type'], d['width'], d['height'], \\\n d['bytes per pixel'], d['masses'], d['planes'], \\\n d['raster'], d['original filename'] = \\\n unpack(self._bo + 'i 6h i 64s', hdr.read(84))\n\n # Called nickname in OpenMIMS\n d['original filename'] = self._cleanup_string(d['original filename'])\n if d['header size'] != 84:\n raise ValueError(\"Image header size is {}, not 84.\".format(d['header size']))\n return d", "def get_images():\n images = {}\n for k, v in DB.IMAGES.iteritems():\n images[k] = v.__dict__\n return images", "def get_objects(self, image_np: np.array,\n image: Image) -> Tuple[Dict, object]:\n pass", "def build_features_dict(image, image_id, filename, image_format=None,\n bboxes=None, masks=None, label_ids=None,\n label_names=None, masks_format=\"png\"):\n\n # Add channel dimension if needed.\n if len(image.shape) == 3:\n pass\n elif len(image.shape) == 2:\n image = np.expand_dims(image, -1)\n else:\n raise Exception(f\"Wrong image shape: {image.shape}\")\n\n # Get image shape.\n image_width, image_height, image_channel = image.shape\n\n # Encode image.\n image_encoded = imaging.encode_image(image, image_format)\n\n # Create te feature dict.\n feature_dict = {}\n\n # Image features\n feature_dict['image_height'] = int64_feature(image_height)\n feature_dict['image_width'] = int64_feature(image_width)\n feature_dict['image_channel'] = int64_feature(image_channel)\n feature_dict['image_filename'] = bytes_feature(filename.encode('utf8'))\n feature_dict['image_id'] = bytes_feature(str(image_id).encode('utf8'))\n feature_dict['image_encoded'] = bytes_feature(image_encoded.numpy())\n feature_dict['image_format'] = bytes_feature(image_format.encode('utf8'))\n\n # Object features\n if bboxes is not None:\n if bboxes.shape[0] > 0:\n bboxes_x = bboxes[:, 0]\n bboxes_y = bboxes[:, 1]\n bboxes_width = bboxes[:, 2]\n bboxes_height = bboxes[:, 3]\n else:\n bboxes_x = []\n bboxes_y = []\n bboxes_width = []\n bboxes_height = []\n\n feature_dict['bboxes_x'] = float_list_feature(bboxes_x)\n feature_dict['bboxes_y'] = float_list_feature(bboxes_y)\n feature_dict['bboxes_width'] = float_list_feature(bboxes_width)\n feature_dict['bboxes_height'] = float_list_feature(bboxes_height)\n\n if label_ids is not None:\n feature_dict['label_ids'] = int64_list_feature(label_ids)\n\n if label_names is not None:\n feature_dict['label_names'] = bytes_list_feature(label_names)\n\n if masks is not None:\n # Encode masks.\n masks_encoded = []\n for mask in masks:\n mask = image = np.expand_dims(mask, -1)\n mask_encoded = imaging.encode_image(mask, masks_format)\n masks_encoded.append(mask_encoded.numpy())\n\n feature_dict['masks_encoded'] = bytes_list_feature(masks_encoded)\n feature_dict['masks_format'] = bytes_feature(masks_format.encode(\"utf8\"))\n\n return feature_dict", "def images_mapped(self):\n try:\n return dict([x for x in enumerate(self.images())])\n except:\n return None", "def clean(imagedata):\n if asarray(imagedata).ndim not in set((2, 3)):\n raise Exception(\"Input must be two or three dimensional\")\n\n outdict = [array_to_im(imagedata)]\n\n return {'images': outdict}", "def clean(imagedata):\n if asarray(imagedata).ndim not in set((2, 3)):\n raise Exception(\"Input must be two or three dimensional\")\n\n outdict = [array_to_im(imagedata)]\n\n return {'images': outdict}", "def prep_image_data(arg_dict):\n cat_df = pd.read_csv(arg_dict['category_file'],\n skiprows=1,\n sep='\\s+')\n bbox_df = pd.read_csv(arg_dict['bbox_file'],\n skiprows=1,\n sep='\\s+')\n img_dir = arg_dict['image_dir']\n\n combo_df = pd.merge(cat_df, bbox_df, how='outer', on='image_name')\n combo_df['image_name'] = combo_df['image_name'].apply(\n lambda x: x[len('img'):-len('.jpg')])\n labels = Labels(combo_df, img_dir, n_images_loaded=-1)\n labels.set_data_target('raw_image', chunksize=3000)\n return labels", "def clean(imagedata):\n\n if isinstance(imagedata, ndarray):\n imagedata = [imagedata]\n\n outdict = [array_to_im(im) for im in imagedata]\n\n return {'images': outdict}", "def get_entry_dict(self):\n\n # generating thumbnail URLs is slow, so only generate the ones\n # that will definitely be used.\n ret = {\n 'id': self.id,\n 'vertices': self.vertices,\n 'triangles': self.triangles,\n 'segments': self.segments,\n 'photo': self.photo.get_entry_dict(),\n }\n if self.dominant_rgb0:\n ret['dominant_rgb0'] = self.dominant_rgb0\n #if self.image_pbox:\n #ret['pbox'] = self.pbox\n #ret['image_pbox'] = {\n #'300': self.image_pbox_300.url,\n #'512': self.image_pbox_512.url,\n #'1024': self.image_pbox_1024.url,\n #'orig': self.image_pbox.url,\n #}\n if self.image_bbox:\n ret['image_bbox'] = {\n #'512': self.image_bbox_512.url,\n '1024': self.image_bbox_1024.url,\n #'orig': self.image_bbox.url,\n }\n return ret", "def populate_image_stats(self, image):\n ti = image\n image_data = ti.data\n if not ti.data:\n return ti\n ti.size = len(image_data)\n try:\n with connect(Blobby) as c:\n ti.shahash = c.get_data_bhash(image_data)\n except o.Exception, ex:\n raise o.Exception('oException getting shahash: %s' % ex.msg)\n except Exception, ex:\n raise o.Exception('Exception getting shahash: %s' % ex)\n\n try:\n b = StringIO(image_data)\n img = Image.open(b)\n except Exception, ex:\n raise o.Exception('Exception getting PIL img: %s' % ex)\n try:\n ti.xdim, ti.ydim = img.size\n except Exception, ex:\n raise o.Exception('Exception getting dimensions: %s' % ex)\n try:\n ti.vhash = str(average_hash(img))\n except Exception, ex:\n raise o.Exception('Exception getting vhash: %s' % ex)\n\n return ti", "def _get_data(self, image_name=False, image=False, training_format=True):\n if not image_name and not image:\n # one of the two should be specified\n assert False\n if image_name:\n blob = self._load_data(image_name)\n if image:\n blob = {}\n for m in image:\n blob[m] = image[m].copy()\n\n if training_format:\n blob = augmentate(blob,\n scale=self.config['augmentation']['scale'],\n crop=self.config['augmentation']['crop'],\n hflip=self.config['augmentation']['hflip'],\n vflip=self.config['augmentation']['vflip'],\n gamma=self.config['augmentation']['gamma'],\n contrast=self.config['augmentation']['contrast'],\n brightness=self.config['augmentation']['brightness'],\n rotate=self.config['augmentation']['rotate'],\n shear=self.config['augmentation']['shear'])\n\n # Format labels into one-hot\n blob['labels'] = np.array(self.one_hot_lookup ==\n blob['labels'][:, :, None]).astype(int)\n\n # We have to add a dimension for the channels, as there is only one and the\n # dimension is omitted.\n blob['depth'] = np.expand_dims(blob['depth'], 3)\n\n # Force the image dimension to be multiple of 16\n h, w, _ = blob['rgb'].shape\n h_c, w_c = [d - (d % 16) for d in [h, w]]\n if h_c != h or w_c != w:\n for m in ['rgb', 'depth', 'labels']:\n blob[m] = blob[m][:h_c, :w_c, ...]\n\n return blob", "def getImg(img_name):\n img = Image.objects.raw({\"_id\": img_name}).first()\n in_dict = {}\n in_dict[\"name\"] = img.name\n in_dict[\"b64str\"] = img.b64str\n in_dict[\"imgsize\"] = img.imgsize\n in_dict[\"processed\"] = img.processed\n in_dict[\"timestamp\"] = img.timestamp\n return in_dict", "def to_ingest_dict(self):\n return {\n 'extent': self.extent,\n 'crsExtent': self.extent_crs,\n 'bandMaps': self.band_maps,\n 'crs': self.source_crs,\n 'uri': self.image['sourceUri'],\n }", "def get_images(self):\n return {'source': utils.image_from_tensor(self.source[0]),\n 'output': utils.image_from_tensor(self.output.data[0]),\n 'target': utils.image_from_tensor(self.target[0])}", "def mapped_reconstructed_data_dict(\r\n self,\r\n ) -> Dict[LinearObj, Visibilities]:\r\n mapped_reconstructed_data_dict = {}\r\n\r\n image_dict = self.mapped_reconstructed_image_dict\r\n\r\n for linear_obj in self.linear_obj_list:\r\n visibilities = self.transformer.visibilities_from(\r\n image=image_dict[linear_obj]\r\n )\r\n\r\n visibilities = Visibilities(visibilities=visibilities)\r\n\r\n mapped_reconstructed_data_dict[linear_obj] = visibilities\r\n\r\n return mapped_reconstructed_data_dict", "def open_image_and_meta(image_bytes):\n with MemoryFile(image_bytes) as memfile:\n with memfile.open() as src:\n meta = src.meta\n arr = reshape_as_image(src.read())\n return arr, meta", "def getAllForImages(self):\n imageDict = {}\n for id, name in self.getAll().items():\n imageDict[id] = {}\n imageDict[id][\"name\"] = name\n imageDict[id][\"filename\"] = \"The_Steamer_Great_Western_small.jpg\"\n\n return imageDict", "def _get_image_info(\n image_id: int,\n width: int,\n height: int,\n file_name: str,\n license_id=1,\n flickr_url=\"\",\n coco_url=\"\",\n date_captured=datetime.datetime.utcnow().isoformat(' ')):\n image_info = {\n \"id\": image_id,\n \"width\": width,\n \"height\": height,\n \"file_name\": file_name,\n \"license\": license_id,\n \"flickr_url\": flickr_url,\n \"coco_url\": coco_url,\n \"date_captured\": date_captured,\n }\n\n return image_info", "def _load_components(self):\n compsf = self._fetch_components_file()\n comps_img = niimg.load_img(compsf)\n return comps_img", "def make_image_dict(self):\n sprite_sheet = setup.GFX['treasurechest']\n image_dict = {'closed': self.get_image(0, 0, 32, 32, sprite_sheet),\n 'opened': self.get_image(32, 0, 32, 32, sprite_sheet)}\n\n return image_dict", "def preprocess_graph(self):\n image = tf.placeholder(\n tf.float32,\n shape=[self.img_h, self.img_w, self.col_channels])\n patches = self.create_patches(image)\n return {'image': image,\n 'patches': patches}", "def __build__(self,data_index=0):\n \n super(Image,self).__build__()\n # -- How to read the image\n self._build_properties = dict(\n data_index = data_index,\n header_exptime = \"EXPTIME\",\n dataslice0=\"undefined\",\n dataslice1=\"undefined\",\n bkgdbox={\"bh\":100,\"bw\":100,\"fh\":3,\"fw\":3},\n )", "def transform(data):\n if 'name' not in data or 'engine' not in data:\n return None\n return Image(\n name=data['name'],\n engine=data['engine'],\n id=data['id'] if 'id' in data else '',\n parameters=data['parameters'] if 'parameters' in data else {},\n capabilities=data['capabilities'] if 'capabilities' in data else {},\n node=data['node'] if 'node' in data else {}\n )", "def _getAttributes(self):\n self._params = {}\n if self.interp is not None:\n # Initialize interpolation function :\n self['x'] = np.arange(0, self.pixels, 1)\n self['y'] = np.arange(0, self.pixels, 1)\n # Define newaxis :\n self['xnew'] = np.arange(0, self.pixels, self.interp)\n self['ynew'] = np.arange(0, self.pixels, self.interp)\n self['csize'] = len(self['xnew'])\n else:\n self['csize'] = self.pixels\n # Variables :\n l = int(self['csize'] / 2)\n self['l'] = l\n y, x = np.ogrid[-l:l, -l:l]\n disc = x**2 + y**2\n self['mask'] = disc < l**2\n self['nmask'] = np.invert(self['mask'])\n # self['image'] = np.tile(self.bgcolor[np.newaxis, ...], (2*l, 2*l, 1))", "def prepare_data(data):\n\n image_array = np.zeros(shape=(len(data), 48, 48))\n image_label = np.array(list(map(int, data['emotion'])))\n\n for i, row in enumerate(data.index):\n image = np.fromstring(data.loc[row, 'pixels'], dtype=int, sep=' ')\n image = np.reshape(image, (48, 48))\n\n image = face_detection(image.astype(np.uint8))\n\n image_array[i] = image\n\n return image_array, image_label", "def _get_images(self):\n raw_outputs = self.interface.get_data(self.target_charge,\n self.charge_deviation,\n n_samples=self.n_samples)\n\n # apply roi to images\n roi_images = []\n for i in range(self.n_samples):\n roi_images += [apply_roi(raw_outputs['raw_images'][i], raw_outputs['ROI'])]\n\n # process and identify blobs in image\n min_size = 100\n outputs = {}\n for ele in self.output_keys:\n outputs[ele] = []\n\n for i in range(len(roi_images)):\n processed_image_data = image_processing.process_and_fit(roi_images[i],\n min_size)\n\n for ele in self.output_keys:\n if ele == 'image_check':\n outputs[ele] += [image_processing.check_image(processed_image_data['binary_image'],\n processed_image_data['smoothed_image'])]\n elif ele == 'processed_images':\n outputs[ele] += [processed_image_data['smoothed_image']]\n else:\n outputs[ele] += [processed_image_data[ele]]\n\n for ele in self.output_keys:\n outputs[ele] = np.array(outputs[ele])\n\n # add in raw data\n outputs.update(raw_outputs)\n\n # if we need to, get averaged results\n if self.average_measurements:\n avg_keys = ['rms_x', 'rms_y', 'CX', 'CY', 'n_blobs', 'FWHMX', 'FWHMY', 'centroid_offset']\n for key in avg_keys:\n outputs[key] = np.nanmean(outputs[key])\n\n return outputs", "def _GetImageInfo(self,path):\n hd = Header(path, scan=True)\n hdr = hd.hdr\n self.hdr = hdr\n if hdr is None:\n# Either a ref.dat file or it isn't an imaging file.\n if 'ref' in path and 'dat' in path:\n self.refdats[os.path.realpath(path)] = True\n info = {'type':'refdat'}\n return info\n else:\n return None\n elif hdr['filetype'] == 'dicom' and not path.endswith('.yaml'):\n# Write a yaml file to the raw data directory if possible.\n dirname, outfile = self._yaml_filename(path)\n yaml_name = '%s/%s' % (dirname, outfile)\n if not os.path.exists(yaml_name):\n# Create yaml file using dirname,\n# e.g., ../anatomicals/S2_EFGRE3D/s2_efgre3d.yaml\n try:\n hd.write_hdr_to_yaml('%s/%s' % (dirname,outfile))\n except IOError:\n# This is a nonessential function, so ignore exceptions\n# such as access violations.\n pass\n elif hdr['filetype'] == 'dicom' or hdr['filetype'] == 'ge_ifile':\n if not os.path.isdir(path):\n path = os.path.dirname(path)\n shdr = hdr['subhdr']\n nhdr = hdr['native_header']\n self.shdr = shdr\n if 'dti' in shdr.get('PulseSequenceName','').lower() \\\n or 'dti' in nhdr.get('PulseSequenceFile',''):\n psdname = 'dti'\n else:\n psdname = os.path.basename((shdr.get('PulseSequenceName','').strip()).lower())\n info = {'psdname':psdname, \\\n 'acqtime':shdr['AcqTime'], \\\n 'series':int(shdr['SeriesNumber']), \\\n 'plane':hdr['plane'].strip(), \\\n 'type':self.imgtype.get(psdname,None), \\\n 'plane':hdr['plane'], \\\n 'acqtime':shdr['SeriesTime'], \\\n# 'fmapdir':None, \\\n 'refdat':None, \\\n 'imgfile':None, \\\n 'base':None, \\\n 'tdim':int(hdr['tdim']), \\\n 'echo_spacing':None, \\\n 'filetype':'brik', \\\n 'suffix':self.suffix.get(hdr['filetype'], 'brik'), \\\n 'data_filetype':hdr['filetype']}\n if info['type'] == 'localizer':\n# Don't process the localizer.\n return info\n if isinstance(info['acqtime'], int):\n info['acquisition_time'] = time.ctime(info['acqtime'])\n if nhdr.get('ImageFormat',('unknown'))[0] == 'DERIVED' and info['type'] == 'epi':\n# Sometimes screenshots are defined as epis.\n info['type'] = None\n\n# Call the method appropriate to the type of scan in this series.\n stat = apply( self.GetInfoMethods.get(info['type'], self._NoInfo), \\\n [info, path])\n if stat:\n info = {'type':'break'}\n return info\n info['suffix'] = self.suffix.get(info['filetype'], 'brik')\n return info", "def read(self):\n self._sync()\n d = {tag: struct.unpack('<f', self.pic.read(4))[0] for tag in tags}\n d['ts_pic'] = struct.unpack('<i', self.pic.read(4))[0]\n return d", "def make_image(self, **kwargs):\n image = dict(self.BASE_EMR_IMAGE, **kwargs)\n\n return {k: v for k, v in image.items() if v is not None}", "def unpack_data(imagefile, labelfile):\n\t# Open the images with gzip in read binary mode\n\timages = open(imagefile, 'rb')\n\tlabels = open(labelfile, 'rb')\n\t# Read the binary data\n\t# We have to get big endian unsigned int. So we need '>I'\n\t# Get metadata for images\n\timages.read(4) # skip the magic_number\n\tnumber_of_images = images.read(4)\n\tnumber_of_images = unpack('>I', number_of_images)[0]\n\trows = images.read(4)\n\trows = unpack('>I', rows)[0]\n\tcols = images.read(4)\n\tcols = unpack('>I', cols)[0]\n\n\t# Get metadata for labels\n\tlabels.read(4) # skip the magic_number\n\tN = labels.read(4)\n\tN = unpack('>I', N)[0]\n\n\tif number_of_images != N:\n\t\traise Exception('number of labels did not match the number of images')\n\t# Get the data\n\tx = zeros((N, rows, cols), dtype=float32) # Initialize numpy array\n\ty = zeros((N, 1), dtype=uint8) # Initialize numpy array\n\tfor i in range(N):\n\t\tif i % 1000 == 0:\n\t\t\tprint(\"i: %i\" % i)\n\t\tfor row in range(rows):\n\t\t\tfor col in range(cols):\n\t\t\t\ttmp_pixel = images.read(1) # Just a single byte\n\t\t\t\ttmp_pixel = unpack('>B', tmp_pixel)[0]\n\t\t\t\tx[i][row][col] = tmp_pixel\n\t\ttmp_label = labels.read(1)\n\t\ty[i] = unpack('>B', tmp_label)[0]\n\treturn x, y", "def _build_final_image(self, image):\n raise NotImplementedError", "def data(self):\n return self.image", "def extract_data(filename: str, directory: str) -> Dict:\n with open(filename) as f:\n lines = f.readlines()\n\n # Split data by :\n annotations = [line.replace(\" \", \"\").split(\":\") for line in lines]\n\n # Split data by ;\n for annotation in annotations:\n annotation[1] = annotation[1].split(\";\")\n\n # Loop for saving metadata into dictionary\n annot_dict = dict()\n for annotation in annotations:\n img = annotation[0]\n bbox_metadata = annotation[1]\n bbox = list()\n \n # Path to images\n img_path = os.path.join(directory, img)\n im = Image.open(img_path)\n width, height = im.size\n\n # Iterate over each bounding box\n for annot in bbox_metadata:\n \n if \"MISC_SIGNS\" == annot:\n signStatus = 'N/A'\n signTypes = \"MISC_SIGNS\"\n signPurpose = 'N/A'\n\n signBB = (-1, -1, -1, -1)\n signC = (-1, -1)\n signSize = 0\n aspectRatio = 0\n\n bbox.append({\"signStatus\": signStatus, \n \"signTypes\": signTypes, \n \"signPurpose\": signPurpose, \n \"signBB\": signBB, \n \"signC\": signC, \n \"signSize\": signSize, \n \"aspectRatio\": aspectRatio})\n elif \"\\n\" in annot:\n pass\n else:\n data = annot.split(\",\")\n \n signStatus = data[0] # signStatus\n signTypes = data[6] # signTypes\n signPurpose = data[5] # PROHIBITORY, WARNING, OTHER, INFORMATION\n tl_x, tl_y, br_x, br_y = data[3], data[4], data[1], data[2]\n \n if is_valid_decimal(tl_x):\n tl_x = float(tl_x)\n else:\n tl_x = float(cutoff_letter(tl_x))\n\n if is_valid_decimal(tl_y):\n tl_y = float(tl_y)\n else:\n tl_y = float(cutoff_letter(tl_y))\n\n if is_valid_decimal(br_x):\n br_x = float(br_x)\n else:\n br_x = float(cutoff_letter(br_x))\n\n if is_valid_decimal(br_y):\n br_y = float(br_y)\n else:\n br_y = float(cutoff_letter(br_y))\n\n if tl_x < 0:\n tl_x = 0\n elif tl_x > width:\n tl_x = width\n \n if tl_y < 0:\n tl_y = 0\n elif tl_y > height:\n tl_y = height\n \n if br_x < 0:\n br_x = 0\n elif br_x > width:\n br_x = width\n \n if br_y < 0:\n br_y = 0\n elif br_y > height:\n br_y = height\n\n signBB = (tl_x, tl_y, br_x, br_y)\n signC = (br_x + tl_x)/2, (br_y + tl_y)/2\n signSize = (br_x - tl_x) * (br_y - tl_y)\n aspectRatio = (br_x - tl_x) / (br_y - tl_y)\n\n bbox.append({\"signStatus\": signStatus, \n \"signTypes\": signTypes, \n \"signPurpose\": signPurpose, \n \"signBB\": signBB, \n \"signC\": signC, \n \"signSize\": signSize, \n \"aspectRatio\": aspectRatio})\n \n \n annot_dict[img_path] = bbox\n return annot_dict", "def __getstate__(self):\n\t\tdct = self.__dict__.copy()\n\t\t# Can't pickle ImageCore objects - convert to string\n\t\tdel dct['image']\n\t\treturn dct", "def load_data(self) -> tuple:\n label_num = {}\n data_set = pathlib.Path(self.path)\n data = []\n\n # create the label lookup dict for verifcation later\n for i, v in enumerate(data_set.iterdir()):\n label_num[v.name] = i\n self.labels[i] = v.name\n # end\n\n # read images\n for img_path in data_set.rglob(\"*.jpg\"):\n lbl = label_num[str(img_path.parent.stem)]\n img = cv2.imread(str(img_path))\n img = cv2.resize(img, self.dims, interpolation=cv2.INTER_AREA)\n\n # flatten RGB data into a vector\n # NOTE: NOT ACTUALLY NECESSARY! \n img.flatten()\n\n # label the sample and append to temp data list\n sample = np.append(lbl, img)\n data.append(sample)\n # end\n\n # partition and package the data (*_ ensures safe unpacking)\n train, test, validate, *_ = Data.partition(data, self.parts, 0.7, 0.2)\n self.train = Data(train)\n self.test = Data(test)\n self.validate = Data(validate)", "def get_exif_data(image):\n exif_data = {}\n info = image._getexif()\n if info:\n for tag, value in info.items():\n decoded = TAGS.get(tag, tag)\n if decoded == \"GPSInfo\":\n gps_data = {}\n for t in value:\n sub_decoded = GPSTAGS.get(t, t)\n gps_data[sub_decoded] = value[t]\n\n exif_data[decoded] = gps_data\n else:\n exif_data[decoded] = value\n\n return exif_data", "def galaxy_model_image_dict(self) -> {g.Galaxy: np.ndarray}:\r\n galaxy_model_image_dict = self.tracer.galaxy_image_dict_from_grid(\r\n grid=self.grid\r\n )\r\n\r\n for path, image in galaxy_model_image_dict.items():\r\n galaxy_model_image_dict[path] = image.binned\r\n\r\n # TODO : Extend to multiple inversioons across Planes\r\n\r\n for plane_index in self.tracer.plane_indexes_with_pixelizations:\r\n\r\n galaxy_model_image_dict.update(\r\n {\r\n self.tracer.planes[plane_index].galaxies[\r\n 0\r\n ]: self.inversion.mapped_reconstructed_image\r\n }\r\n )\r\n\r\n return galaxy_model_image_dict", "def pics_dict(self):\n\n img_dict = {}\n\n for name, path in zip(ICON_NAMES,ICON_PATHS):\n\n if name == \"main_icon\":\n tk_pic = cGUIf.get_TkImage(path,32,32)\n\n else:\n tk_pic = cGUIf.get_TkImage(path,64,64)\n \n img_dict.update({name : tk_pic})\n\n return img_dict", "def make_image_data(image_filenames):\n imgdict = make_image_data_list(image_filenames)\n return json.dumps({\"requests\": imgdict }).encode()", "def __init__(self):\n# This is the top container for all data. The gid is the global id (for a image).\n# Before calling convert most of the values are strings. Some additional\n# values are also calculated, see convert() for details. After calling\n# convert, most values are integers or floats where appropriat.\n # set through parser\n self.orientation = None\n self.tileheight = 0\n self.tilewidth = 0\n self.width = 0\n self.height = 0\n self.version = 0\n self.tile_sets = [] # TileSet\n self.layers = [] # WorldTileLayer <- what order? back to front (guessed)\n self.indexed_tiles = {} # {gid: (offsetx, offsety, image}\n self.object_groups = []\n self.properties = {} # {name: value}\n # additional info\n self.pixel_width = 0\n self.pixel_height = 0\n self.named_layers = {} # {name: layer}\n self.named_tile_sets = {} # {name: tile_set}\n self.map_file_name = \"\"\n self._image_loader = None", "def __init__(self):\n# This is the top container for all data. The gid is the global id (for a image).\n# Before calling convert most of the values are strings. Some additional\n# values are also calculated, see convert() for details. After calling\n# convert, most values are integers or floats where appropriat.\n # set through parser\n self.orientation = None\n self.tileheight = 0\n self.tilewidth = 0\n self.width = 0\n self.height = 0\n self.version = 0\n self.tile_sets = [] # TileSet\n self.layers = [] # WorldTileLayer <- what order? back to front (guessed)\n self.indexed_tiles = {} # {gid: (offsetx, offsety, image}\n self.object_groups = []\n self.properties = {} # {name: value}\n # additional info\n self.pixel_width = 0\n self.pixel_height = 0\n self.named_layers = {} # {name: layer}\n self.named_tile_sets = {} # {name: tile_set}\n self.map_file_name = \"\"\n self._image_loader = None", "def build_img_dict(dataset):\n imgs_dict = {i:dict(imgs=[], index=[]) for i in range(10)}\n for i, (img, t) in enumerate(zip(dataset.data, dataset.targets)):\n imgs_dict[t.item()][\"imgs\"].append(img.float())\n imgs_dict[t.item()][\"index\"].append(i)\n\n for i in range(10):\n imgs_dict[i][\"imgs\"] = torch.stack(imgs_dict[i][\"imgs\"])\n imgs_dict[i][\"index\"] = torch.LongTensor(imgs_dict[i][\"index\"])\n return imgs_dict", "def _get_data(self, image_name, one_hot=True, preproc_type=-1):\n preproc_type = self.config['preprocessing']['type'] \\\n if preproc_type is -1 else preproc_type\n\n filetype = {'rgb': 'png', 'depth': 'png', 'labels': 'npy'}\n rgb_filename, depth_filename, groundtruth_filename = (\n path.join(self.base_path, '{}/Stereo_Right/Omni_{}/{}.{}'\n .format(pref, self.config['direction'],\n image_name, filetype[modality]))\n for pref, modality in zip(['RGB', 'Depth', 'GT/LABELS_NPY'],\n ['rgb', 'depth', 'labels']))\n\n blob = {}\n blob['rgb'] = cv2.imread(rgb_filename)\n blob['depth'] = cv2.imread(depth_filename, cv2.IMREAD_ANYDEPTH)\n blob['labels'] = np.load(groundtruth_filename)\n\n if preproc_type == 'online':\n scale = self.config['preprocessing'].get('scale')\n crop = self.config['preprocessing'].get('crop')\n hflip = self.config['preprocessing'].get('hflip')\n vflip = self.config['preprocessing'].get('vflip')\n gamma = self.config['preprocessing'].get('gamma')\n\n if scale and crop:\n h, w, _ = blob['rgb'].shape\n min_scale = crop / float(min(h, w))\n k = random.uniform(max(min_scale, scale[0]), scale[1])\n blob['rgb'] = cv2.resize(blob['rgb'], None, fx=k, fy=k)\n blob['depth'] = cv2.resize(blob['depth'], None, fx=k, fy=k,\n interpolation=cv2.INTER_NEAREST)\n blob['labels'] = cv2.resize(blob['labels'], None, fx=k, fy=k,\n interpolation=cv2.INTER_NEAREST)\n\n if crop:\n h, w, _ = blob['rgb'].shape\n h_c = random.randint(0, h - crop)\n w_c = random.randint(0, w - crop)\n for m in ['rgb', 'depth', 'labels']:\n blob[m] = blob[m][h_c:h_c+crop, w_c:w_c+crop, ...]\n\n if hflip and np.random.choice([0, 1]):\n for m in ['rgb', 'depth', 'labels']:\n blob[m] = np.flip(blob[m], axis=0)\n\n if vflip and np.random.choice([0, 1]):\n for m in ['rgb', 'depth', 'labels']:\n blob[m] = np.flip(blob[m], axis=1)\n\n if gamma:\n k = random.uniform(gamma[0], gamma[1])\n lut = np.array([((i / 255.0) ** (1/k)) * 255\n for i in np.arange(0, 256)]).astype(\"uint8\")\n blob['rgb'] = lut[blob['rgb']]\n\n force_multiple = self.config['preprocessing'].get('force_multiple')\n if force_multiple:\n h, w, _ = blob['rgb'].shape\n h_c, w_c = [d - (d % force_multiple) for d in [h, w]]\n if h_c != h or w_c != w:\n for m in ['rgb', 'depth', 'labels']:\n blob[m] = blob[m][:h_c, :w_c, ...]\n\n blob['depth'] = np.expand_dims(blob['depth'], 3)\n blob['labels'] = np.asarray(self.label_lookup)[blob['labels']]\n\n if one_hot:\n blob['labels'] = np.array(self.one_hot_lookup ==\n blob['labels'][:, :, None]).astype(int)\n return blob", "def galaxy_model_image_dict(self) -> {g.Galaxy: np.ndarray}:\r\n galaxy_model_image_dict = self.plane.galaxy_image_dict_from_grid(grid=self.grid)\r\n\r\n for path, image in galaxy_model_image_dict.items():\r\n galaxy_model_image_dict[path] = image.binned\r\n\r\n # TODO : Extend to multiple inversioons across Planes\r\n\r\n for galaxy in self.galaxies:\r\n\r\n if galaxy.has_pixelization:\r\n\r\n galaxy_model_image_dict.update(\r\n {galaxy: self.inversion.mapped_reconstructed_image}\r\n )\r\n\r\n return galaxy_model_image_dict", "def build_img_info(img_root):\n imgs = []\n feats = []\n K = []\n for i, name in enumerate(os.listdir(img_root)):\n if '.jpg' in name or '.JPG' in name:\n path = os.path.join(img_root, name)\n img = cv2.imread(path)\n imgs.append(img)\n feature_process = FeatureProcess(img)\n kpt, des = feature_process.extract_features()\n photo_info = PhotoExifInfo(path)\n photo_info.get_tags()\n K.append(photo_info.get_intrinsic_matrix())\n A = photo_info.get_area()\n D = photo_info.get_diam()\n feats.append({'kpt': kpt, 'des': des, 'A': A, 'D': D})\n return imgs, feats, K", "def populate_images(self):\n print \"Populating images info...\"\n images = self.get_all_images()\n for i in images:\n\n associated_snapshots = self.get_snapshots_of(i)\n\n self.spreadsheet[i.id] = dict(name=i.name, Name_tag=self.get_name_tag(i), id=i.id,\n KEEP_tag=self.get_keep_tag(i), PROD_tag=self.is_production(i),\n region=i.region.name,\n created=i.creationDate,\n associated_snapshots=associated_snapshots,\n description=i.description)", "def to_image_data(data):\n \n # removing image\n if not data:\n return u''\n\n # image path (not changed)\n if data[0:5] != u'data:':\n return None\n \n # TODO: better MIME handling\n mime = data[5:data.index(u';')].lower()\n img = data[data.index(u',') + 1:].decode('base64')\n \n return mime, img", "def _handle_image_descriptors(self):\n while self.file_content[self.data_idx] == 0x2c:\n img_left = self.file_content[self.data_idx + 1] + \\\n (self.file_content[self.data_idx + 2] << 8)\n img_top = self.file_content[self.data_idx + 3] + \\\n (self.file_content[self.data_idx + 4] << 8)\n img_width = self.file_content[self.data_idx+5] + \\\n (self.file_content[self.data_idx + 6] << 8)\n #img_height = self.file_content[self.data_idx+7] + \\\n # (self.file_content[self.data_idx + 8] << 8)\n flags = self.file_content[self.data_idx + 9]\n local_col_table_flag = (flags & 0b10000000) != 0\n #interlace_flag = (flags & 0b01000000) != 0\n self.data_idx = self.data_idx + 10\n if local_col_table_flag:\n # read local color table\n print('read local color table. Not implemented yet')\n\n self.lzw_min_code_sz = self.file_content[self.data_idx]\n self.data_idx = self.data_idx + 1\n\n pix_xix = img_left\n pix_yix = img_top\n subblock_data = []\n while self.file_content[self.data_idx] != 0:\n subblock_sz = self.file_content[self.data_idx]\n self.data_idx = self.data_idx + 1\n subblock_data += self.file_content[self.data_idx:self.data_idx + subblock_sz]\n self.data_idx = self.data_idx + subblock_sz\n self.data_idx = self.data_idx + 1\n dec_data = self.decode_subblock(subblock_data)\n for dat in dec_data:\n self.output_image[pix_xix][pix_yix][0] = self.color_table[dat][0]\n self.output_image[pix_xix][pix_yix][1] = self.color_table[dat][1]\n self.output_image[pix_xix][pix_yix][2] = self.color_table[dat][2]\n pix_xix = pix_xix + 1\n if pix_xix == img_left + img_width:\n pix_xix = img_left\n pix_yix = pix_yix + 1", "def deserialise_image(data):\n if \"data:image\" in data:\n data = data[data.find(\",\") + 1:]\n\n return Image.open(io.BytesIO(base64.urlsafe_b64decode(data)))", "def get_image_data():\n #mac\n #user_images = [i.replace('static/img/', \"\") for i in glob.glob('static/img/*.png')]\n #pc\n #user_images = [i.replace('static\\\\img\\\\', \"\") for i in glob.glob('static\\\\img\\\\*.png')]\n user_images = [i.replace('static/img/', \"\") for i in glob.glob('static/img/*.png')]\n sports = [inflection.titleize(i.replace('.png', \"\").capitalize().replace(\"_\", \" \")) + \"!\" for i in user_images]\n data = list(zip(sports, user_images))\n return data", "def _index_data(self, raw_data):\n self._all_class_images = collections.OrderedDict()\n self._image_embedding = collections.OrderedDict()\n for i, k in enumerate(raw_data[\"keys\"]):\n _, class_label, image_file = k.split(\"-\")\n image_file_class_label = image_file.split(\"_\")[0]\n assert class_label == image_file_class_label\n self._image_embedding[image_file] = raw_data[\"embeddings\"][i]\n if class_label not in self._all_class_images:\n self._all_class_images[class_label] = []\n self._all_class_images[class_label].append(image_file)\n\n self._check_data_index(raw_data)\n\n self._all_class_images = collections.OrderedDict([\n (k, np.array(v)) for k, v in six.iteritems(self._all_class_images)\n ])\n if self._verbose:\n tf.logging.info(str([len(raw_data), len(self._all_class_images),\n len(self._image_embedding)]))", "def encode_decode(self, img, img_metas):\n pass", "def get_features_by_image_data(image_data: np.ndarray) -> t.Dict[str, torch.Tensor]:\r\n with torch.no_grad():\r\n return _reorder_features(predictor(image_data))", "def process(self, image: np.ndarray) -> NamedTuple:\n\n return super().process(input_data={'image': image})", "def __call__(\n self, \n image: ndarray, \n adversarial_image: ndarray\n ) -> Dict[str, Union[float, int]]:\n ...", "def _load_data_worker(self,img_dir,lbl_dir):\n data = []\n\n for img,lbl in zip(glob(img_dir+\"/*.jpg\"),glob(lbl_dir+\"/*.txt\")):\n im = np.array(Image.open(img))\n im = make_square_image_with_padding(im, self.core_config.num_colors)\n lbl_fh = open(lbl,encoding='utf-8')\n\n objects = self._get_objects(lbl_fh)\n sorted_objects = sort_object_list(objects)\n object_class = self._get_object_classes(sorted_objects)\n \n image_with_objects = {\n 'img':im,\n 'objects':sorted_objects,\n 'object_class': object_class\n }\n\n image_with_mask = convert_to_mask(image_with_objects, self.core_config)\n\n data.append(image_with_mask)\n lbl_fh.close()\n\n return data", "def entity(self) -> dict: \n image = {}\n image['PartitionKey'] = self.getPartitionKey()\n image['RowKey'] = self.getRowKey()\n for key, value in vars(self).items():\n if not key.startswith('_') and key not in ['','PartitionKey','RowKey']:\n if type(value) in [str, int, bool, datetime.date, datetime.datetime]:\n image[key] = value \n return image", "def get_img_data(data_type, file_info, img_info, **kwargs):\n if file_info['ext']=='fits':\n hdulist = get_file(file_info)\n data = hdulist[int(img_info['frame'])].data\n else:\n try:\n from PIL import Image\n except ImportError:\n raise ToyzJobError(\n \"You must have PIL (Python Imaging Library) installed to \"\n \"open files of this type\"\n )\n img = get_file(file_info)\n data = np.array(img)\n \n if data_type == 'data':\n if 'scale' in kwargs:\n width = int(kwargs['width']/2/img_info['viewer']['scale'])\n height = int(kwargs['height']/2/img_info['viewer']['scale'])\n else:\n width = int(kwargs['width']/2)\n height = int(kwargs['height']/2)\n x0 = max(0, kwargs['x']-width)\n y0 = max(0, kwargs['y']-height)\n xf = min(data.shape[1], kwargs['x']+width)\n yf = min(data.shape[0], kwargs['y']+height)\n if 'scale' in kwargs:\n tile_data = {\n 'x0_idx': x0,\n 'y0_idx': y0,\n 'xf_idx': xf,\n 'yf_idx': yf\n }\n data = scale_data(file_info, img_info, tile_data, data)\n else:\n data = data[y0:yf, x0:xf]\n response = {\n 'id': 'data',\n 'min': float(data.min()),\n 'max': float(data.max()),\n 'mean': float(data.mean()),\n 'median': float(np.median(data)),\n 'std_dev': float(np.std(data)),\n 'data': data.tolist()\n }\n elif data_type == 'datapoint':\n if (kwargs['x']<data.shape[1] and kwargs['y']<data.shape[0] and\n kwargs['x']>=0 and kwargs['y']>=0):\n response = {\n 'id': 'datapoint',\n 'px_value': float(data[kwargs['y'],kwargs['x']])\n }\n else:\n response = {\n 'id': 'datapoint',\n 'px_value': 0\n }\n else:\n raise ToyzJobError(\"Loading that data type has not been implemented yet\")\n return response", "def _load_metadata(self):\n\n cub_dir = self.root / \"CUB_200_2011\"\n images_list: Dict[int, List] = OrderedDict()\n\n with open(str(cub_dir / \"train_test_split.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n is_train_instance = int(row[1]) == 1\n if is_train_instance == self.train:\n images_list[img_id] = []\n\n with open(str(cub_dir / \"images.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n if img_id in images_list:\n images_list[img_id].append(row[1])\n\n with open(str(cub_dir / \"image_class_labels.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n if img_id in images_list:\n # CUB starts counting classes from 1 ...\n images_list[img_id].append(int(row[1]) - 1)\n\n with open(str(cub_dir / \"bounding_boxes.txt\")) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\" \")\n for row in csv_reader:\n img_id = int(row[0])\n if img_id in images_list:\n box_cub = [int(float(x)) for x in row[1:]]\n box_avl = [box_cub[1], box_cub[0], box_cub[3], box_cub[2]]\n # PathsDataset accepts (top, left, height, width)\n images_list[img_id].append(box_avl)\n\n images_tuples = []\n for _, img_tuple in images_list.items():\n images_tuples.append(tuple(img_tuple))\n self._images = images_tuples # type: ignore\n\n # Integrity check\n for row_check in self._images:\n filepath = self.root / CUB200.images_folder / row_check[0]\n if not filepath.is_file():\n if self.verbose:\n print(\"[CUB200] Error checking integrity of:\", filepath)\n return False\n\n return True", "def extract_data(filename, num_images, starting_id, context_factor):\n imgs = []\n for i in range(starting_id, num_images+starting_id):\n imageid = \"satImage_%.3d\" % i\n image_filename = filename + imageid + \".png\"\n if os.path.isfile(image_filename):\n print ('Loading ' + image_filename)\n img = mpimg.imread(image_filename)\n\n\n imgs.append(img)\n else:\n print ('File ' + image_filename + ' does not exist')\n\n num_images = len(imgs)\n IMG_WIDTH = int(imgs[0].shape[0]/DOWNSCALE)\n IMG_HEIGHT = int(imgs[0].shape[1]/DOWNSCALE)\n N_PATCHES_PER_IMAGE = (IMG_WIDTH/IMG_PATCH_SIZE)*(IMG_HEIGHT/IMG_PATCH_SIZE)\n\n\n img_patches = [img_crop_context(imgs[i], IMG_PATCH_SIZE, IMG_PATCH_SIZE,context_factor, sub_mean=True) for i in range(num_images)]\n data = [img_patches[i][j] for i in range(len(img_patches)) for j in range(len(img_patches[i]))]\n data = np.asarray(data)\n return data", "def get_data_structure_representation(self) -> dict:\n byte_buff = self.get_rle()\n encoding = \"RLE\"\n\n if len(byte_buff) > self.grid_size[0] * self.grid_size[1] * 4:\n encoding = \"RAW\"\n byte_buff = self.get_raw()\n print(\"RAW ran\")\n else:\n print(\"RLE ran\")\n\n json_dict = {\n \"encoding\": encoding,\n \"nodes\": [base64.b64encode(bytes(byte_buff)).decode()],\n \"dimensions\": [self.grid_size[0], self.grid_size[1]]\n }\n\n return json_dict", "def create_feed_dict(self, image):\n\n # Expand 3-dim array to 4-dim by prepending an 'empty' dimension.\n # This is because we are only feeding a single image, but the\n # Inception model was built to take multiple images as input.\n image = np.expand_dims(image, axis=0)\n\n # Create feed-dict for inputting data to TensorFlow.\n return {self.tensor_name_input_image: image}", "def data(self):\n return self._img", "def write_component_image_info_area(pldm_fw_up_pkg, metadata, image_files):\n components = metadata[\"ComponentImageInformationArea\"]\n # ComponentImageCount\n pldm_fw_up_pkg.write(struct.pack(\"<H\", len(components)))\n component_location_offsets = []\n # ComponentLocationOffset position in individual component image\n # information\n component_location_offset_pos = 12\n\n for component in components:\n # Record the location of the ComponentLocationOffset to be updated\n # after appending images to the firmware update package\n component_location_offsets.append(\n pldm_fw_up_pkg.tell() + component_location_offset_pos\n )\n\n # ComponentClassification\n component_classification = component[\"ComponentClassification\"]\n if component_classification < 0 or component_classification > 0xFFFF:\n sys.exit(\n \"ERROR: ComponentClassification should be [0x0000 - 0xFFFF]\"\n )\n\n # ComponentIdentifier\n component_identifier = component[\"ComponentIdentifier\"]\n if component_identifier < 0 or component_identifier > 0xFFFF:\n sys.exit(\"ERROR: ComponentIdentifier should be [0x0000 - 0xFFFF]\")\n\n # ComponentComparisonStamp\n component_comparison_stamp = get_component_comparison_stamp(component)\n\n # ComponentOptions\n component_options = bitarray(16, endian=\"little\")\n component_options.setall(0)\n supported_component_options = [0, 1, 2]\n for option in component[\"ComponentOptions\"]:\n if option not in supported_component_options:\n sys.exit(\n \"ERROR: unsupported ComponentOption in \"\n \" ComponentImageInformationArea section\"\n )\n component_options[option] = 1\n\n # RequestedComponentActivationMethod\n requested_component_activation_method = bitarray(16, endian=\"little\")\n requested_component_activation_method.setall(0)\n supported_requested_component_activation_method = [0, 1, 2, 3, 4, 5]\n for option in component[\"RequestedComponentActivationMethod\"]:\n if option not in supported_requested_component_activation_method:\n sys.exit(\n \"ERROR: unsupported RequestedComponent \"\n \" ActivationMethod entry\"\n )\n requested_component_activation_method[option] = 1\n\n # ComponentLocationOffset\n component_location_offset = 0\n # ComponentSize\n component_size = 0\n # ComponentVersionStringType\n component_version_string_type = string_types[\"ASCII\"]\n # ComponentVersionStringlength\n # ComponentVersionString\n component_version_string = component[\"ComponentVersionString\"]\n check_string_length(component_version_string)\n\n format_string = \"<HHIHHIIBB\" + str(len(component_version_string)) + \"s\"\n pldm_fw_up_pkg.write(\n struct.pack(\n format_string,\n component_classification,\n component_identifier,\n component_comparison_stamp,\n ba2int(component_options),\n ba2int(requested_component_activation_method),\n component_location_offset,\n component_size,\n component_version_string_type,\n len(component_version_string),\n component_version_string.encode(\"ascii\"),\n )\n )\n\n index = 0\n pkg_header_checksum_size = 4\n start_offset = pldm_fw_up_pkg.tell() + pkg_header_checksum_size\n # Update ComponentLocationOffset and ComponentSize for all the components\n for offset in component_location_offsets:\n file_size = os.stat(image_files[index]).st_size\n pldm_fw_up_pkg.seek(offset)\n pldm_fw_up_pkg.write(struct.pack(\"<II\", start_offset, file_size))\n start_offset += file_size\n index += 1\n pldm_fw_up_pkg.seek(0, os.SEEK_END)", "def loadImagesAvatar(self): \n dictionary = {}\n dictionary[\"body\"] = None\n dictionary[\"shoes\"] = None\n dictionary[\"shirt\"] = None\n dictionary[\"trousers\"] = None\n dictionary[\"skirt\"] = None\n dictionary[\"head\"] = None\n dictionary[\"hair\"] = None\n dictionary[\"mask\"] = None\n return dictionary", "def __call__(self, results):\r\n if isinstance(results['img'], str):\r\n results['filename'] = results['img']\r\n results['ori_filename'] = results['img']\r\n else:\r\n results['filename'] = None\r\n results['ori_filename'] = None\r\n img = mmcv.imread(results['img'])\r\n results['img'] = img\r\n results['img_fields'] = ['img']\r\n results['img_shape'] = img.shape\r\n results['ori_shape'] = img.shape\r\n return results", "def _preprocess(self):\n print(\"Note: if root path is changed, the previously generated json files need to be re-generated (delete them first)\")\n if osp.exists(self.imgs_labeled_dir) and \\\n osp.exists(self.imgs_detected_dir) and \\\n osp.exists(self.split_classic_det_json_path) and \\\n osp.exists(self.split_classic_lab_json_path) and \\\n osp.exists(self.split_new_det_json_path) and \\\n osp.exists(self.split_new_lab_json_path):\n return\n\n mkdir_if_missing(self.imgs_detected_dir)\n mkdir_if_missing(self.imgs_labeled_dir)\n\n print(\"Extract image data from {} and save as png\".format(self.raw_mat_path))\n mat = h5py.File(self.raw_mat_path, 'r')\n\n def _deref(ref):\n return mat[ref][:].T\n\n def _process_images(img_refs, campid, pid, save_dir):\n img_paths = [] # Note: some persons only have images for one view\n for imgid, img_ref in enumerate(img_refs):\n img = _deref(img_ref)\n # skip empty cell\n if img.size == 0 or img.ndim < 3: continue\n # images are saved with the following format, index-1 (ensure uniqueness)\n # campid: index of camera pair (1-5)\n # pid: index of person in 'campid'-th camera pair\n # viewid: index of view, {1, 2}\n # imgid: index of image, (1-10)\n viewid = 1 if imgid < 5 else 2\n img_name = '{:01d}_{:03d}_{:01d}_{:02d}.png'.format(campid+1, pid+1, viewid, imgid+1)\n img_path = osp.join(save_dir, img_name)\n imageio.imwrite(img_path, img)\n img_paths.append(img_path)\n return img_paths\n\n def _extract_img(name):\n print(\"Processing {} images (extract and save) ...\".format(name))\n meta_data = []\n imgs_dir = self.imgs_detected_dir if name == 'detected' else self.imgs_labeled_dir\n for campid, camp_ref in enumerate(mat[name][0]):\n camp = _deref(camp_ref)\n num_pids = camp.shape[0]\n for pid in range(num_pids):\n img_paths = _process_images(camp[pid,:], campid, pid, imgs_dir)\n assert len(img_paths) > 0, \"campid{}-pid{} has no images\".format(campid, pid)\n meta_data.append((campid+1, pid+1, img_paths))\n print(\"done camera pair {} with {} identities\".format(campid+1, num_pids))\n return meta_data\n\n meta_detected = _extract_img('detected')\n meta_labeled = _extract_img('labeled')\n\n def _extract_classic_split(meta_data, test_split):\n train, test = [], []\n num_train_pids, num_test_pids = 0, 0\n num_train_imgs, num_test_imgs = 0, 0\n for i, (campid, pid, img_paths) in enumerate(meta_data):\n \n if [campid, pid] in test_split:\n for img_path in img_paths:\n camid = int(osp.basename(img_path).split('_')[2])\n test.append((img_path, num_test_pids, camid))\n num_test_pids += 1\n num_test_imgs += len(img_paths)\n else:\n for img_path in img_paths:\n camid = int(osp.basename(img_path).split('_')[2])\n train.append((img_path, num_train_pids, camid))\n num_train_pids += 1\n num_train_imgs += len(img_paths)\n return train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs\n\n print(\"Creating classic splits (# = 20) ...\")\n splits_classic_det, splits_classic_lab = [], []\n for split_ref in mat['testsets'][0]:\n test_split = _deref(split_ref).tolist()\n\n # create split for detected images\n train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs = \\\n _extract_classic_split(meta_detected, test_split)\n splits_classic_det.append({\n 'train': train, 'query': test, 'gallery': test,\n 'num_train_pids': num_train_pids, 'num_train_imgs': num_train_imgs,\n 'num_query_pids': num_test_pids, 'num_query_imgs': num_test_imgs,\n 'num_gallery_pids': num_test_pids, 'num_gallery_imgs': num_test_imgs,\n })\n\n # create split for labeled images\n train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs = \\\n _extract_classic_split(meta_labeled, test_split)\n splits_classic_lab.append({\n 'train': train, 'query': test, 'gallery': test,\n 'num_train_pids': num_train_pids, 'num_train_imgs': num_train_imgs,\n 'num_query_pids': num_test_pids, 'num_query_imgs': num_test_imgs,\n 'num_gallery_pids': num_test_pids, 'num_gallery_imgs': num_test_imgs,\n })\n \n write_json(splits_classic_det, self.split_classic_det_json_path)\n write_json(splits_classic_lab, self.split_classic_lab_json_path)\n\n def _extract_set(filelist, pids, pid2label, idxs, img_dir, relabel):\n tmp_set = []\n unique_pids = set()\n for idx in idxs:\n img_name = filelist[idx][0]\n camid = int(img_name.split('_')[2])\n pid = pids[idx]\n if relabel: pid = pid2label[pid]\n img_path = osp.join(img_dir, img_name)\n tmp_set.append((img_path, int(pid), camid))\n unique_pids.add(pid)\n return tmp_set, len(unique_pids), len(idxs)\n\n def _extract_new_split(split_dict, img_dir):\n train_idxs = split_dict['train_idx'].flatten() - 1 # index-0\n pids = split_dict['labels'].flatten()\n train_pids = set(pids[train_idxs])\n pid2label = {pid: label for label, pid in enumerate(train_pids)}\n query_idxs = split_dict['query_idx'].flatten() - 1\n gallery_idxs = split_dict['gallery_idx'].flatten() - 1\n filelist = split_dict['filelist'].flatten()\n train_info = _extract_set(filelist, pids, pid2label, train_idxs, img_dir, relabel=True)\n query_info = _extract_set(filelist, pids, pid2label, query_idxs, img_dir, relabel=False)\n gallery_info = _extract_set(filelist, pids, pid2label, gallery_idxs, img_dir, relabel=False)\n return train_info, query_info, gallery_info\n\n print(\"Creating new splits for detected images (767/700) ...\")\n train_info, query_info, gallery_info = _extract_new_split(\n loadmat(self.split_new_det_mat_path),\n self.imgs_detected_dir,\n )\n splits = [{\n 'train': train_info[0], 'query': query_info[0], 'gallery': gallery_info[0],\n 'num_train_pids': train_info[1], 'num_train_imgs': train_info[2],\n 'num_query_pids': query_info[1], 'num_query_imgs': query_info[2],\n 'num_gallery_pids': gallery_info[1], 'num_gallery_imgs': gallery_info[2],\n }]\n write_json(splits, self.split_new_det_json_path)\n\n print(\"Creating new splits for labeled images (767/700) ...\")\n train_info, query_info, gallery_info = _extract_new_split(\n loadmat(self.split_new_lab_mat_path),\n self.imgs_labeled_dir,\n )\n splits = [{\n 'train': train_info[0], 'query': query_info[0], 'gallery': gallery_info[0],\n 'num_train_pids': train_info[1], 'num_train_imgs': train_info[2],\n 'num_query_pids': query_info[1], 'num_query_imgs': query_info[2],\n 'num_gallery_pids': gallery_info[1], 'num_gallery_imgs': gallery_info[2],\n }]\n write_json(splits, self.split_new_lab_json_path)", "def _decodeMetadata(\n mdAtributesBytes, mdCalibrationBytes, mdTextBytes, size, Nrecords):\n ### TODO: probably XML or JSON decoder should work here?\n mdkeysXY = {\n 'Nx': b'\\x00W\\x00i\\x00d\\x00t\\x00h\\x00\\x00\\x00',\n 'NxBytes': b'\\x00W\\x00i\\x00d\\x00t\\x00h\\x00B\\x00y\\x00t\\x00e\\x00s\\x00\\x00\\x00',\n 'Ny': b'\\x00H\\x00e\\x00i\\x00g\\x00h\\x00t\\x00\\x00\\x00',}\n imgMD = {}\n for key, val in mdkeysXY.items():\n ind = mdAtributesBytes.index(val)\n start = ind + len(val)\n a = mdAtributesBytes[start: start + 2]\n imgMD[key] = frombuffer(a, 'int16')[0]\n mdkeysZ = {\n 'dxy': b'\\rd\\x00C\\x00a\\x00l\\x00i\\x00b\\x00r\\x00a\\x00t\\x00i\\x00o\\x00n\\x00\\x00\\x00',\n }\n for key, val in mdkeysZ.items():\n ind = mdCalibrationBytes.index(val)\n start = ind + len(val)\n if key == 'dxy':\n a = mdCalibrationBytes[start: start + 8]\n imgMD[key] = frombuffer(a, 'float64')[0]\n mdkeysText = {\n 'Nt': b'\\x00T\\x00i\\x00m\\x00e\\x00 \\x00L\\x00o\\x00o\\x00p\\x00:\\x00 '}\n ind = mdTextBytes.index(\n b'\\x00M\\x00e\\x00t\\x00a\\x00d\\x00a\\x00t\\x00a\\x00:')\n metadataText = mdTextBytes[ind:][1::2]\n ind = metadataText.index(b'\\x00\\x08')\n metadataText = metadataText[:ind]\n lines = metadataText.split(b'\\r\\n')\n imgMD['dz'] = 1.0\n for n, line in enumerate(lines):\n if b'Z Stack Loop:' in line and b'- Step:' in lines[n+1]:\n sline = lines[n+1].split(b' ')\n imgMD['dz'] = float64(sline[2])\n imgMD['dz units'] = sline[3]\n ind = mdTextBytes.index(mdkeysText['Nt'])\n di = len(mdkeysText['Nt'])\n val = mdTextBytes[ind + di: ind + di + 8][1::2].split(b'\\r')[0]\n imgMD['Nt'] = int(val)\n imgMD['Nz'] = int(Nrecords/imgMD['Nt'])\n imgMD['raw'] = metadataText\n imgMD['fileSize'] = size\n return imgMD", "def load_image_data():\n print(\"Loading image data...\")\n label_dict = get_label_vectors()\n categories = [c for c in os.listdir('images/') if c[0] != '.'] # ignore\n labels = [] # instantiate list for image labels\n data = [] # instantiate list for image data\n for i in categories:\n path = 'images/{}/'.format(i) # define path to category folder\n for j in os.listdir(path): # get images from category folder\n labels.append(label_dict[i]) # append label vector\n data.append(cv2.imread(path + j).flatten()) # append flattened image data\n\n labels = np.array(labels) # convert lists to array\n data = np.array(data)\n print(\"Done.\")\n\n return labels, data", "def _data_generation(self, batch_data):\n # Initialization\n batch_x = []\n batch_y = defaultdict(list)\n\n for ind, item_data in batch_data.iterrows():\n img_path = os.path.join(self.img_dir, \"images\", \"rgb\", item_data[\"name\"])\n img = cv2.imread(img_path)\n try:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n except Exception as error:\n print(img_path)\n print(error)\n not_valid_mask = self.read_masks_borders(item_data[\"name\"])\n img[not_valid_mask] = 0\n\n # getmasks\n targets = np.zeros((img.shape[0], img.shape[1], len(self.classes)))\n for i, c in enumerate(self.classes):\n mask_path = os.path.join(self.img_dir, \"labels\", c, item_data[\"name\"])\n mask = cv2.imread(\n mask_path.replace(\".jpg\", \".png\"), cv2.IMREAD_GRAYSCALE\n )\n mask[not_valid_mask[:, :, 0]] = 0\n mask = mask > 0\n targets[:, :, i] = mask\n\n res = self.reshape_func(image=img, mask=targets)\n img, targets = res['image'], res['mask']\n if self.do_aug:\n res = self.aug(image=img, mask=targets)\n img, targets = res['image'], res['mask']\n\n for i, c in enumerate(self.classes):\n batch_y[c].append(targets[:, :, i])\n\n batch_x.append(img)\n\n batch_x = np.array(batch_x, np.float32)\n batch_y = {k: np.array(v, np.float32) for k, v in batch_y.items()}\n batch_y = {k: np.expand_dims(v, axis=-1) for k, v in batch_y.items()}\n\n return (\n imagenet_utils.preprocess_input(batch_x, \"channels_last\", mode=\"tf\"),\n batch_y\n )", "def get_image_data(img):\n\tfrom EMAN2 import EMNumPy\n\treturn EMNumPy.em2numpy(img)", "def process_image(self):\n pass", "def _image_data(buff):\n code = buff.getvalue()\n m = _size(code)\n if m:\n size = int(m.group(1))\n else:\n raise Exception('Internal error: PPM header not found')\n return code[m.end():], size", "def data_dict0():\n\n # 0- Sample from detectron2 -> 5 different sections.\n info_val0 = [{\"date_created\": \"2020-03-15 04:59:45.442988\",\n \"description\": \"Automatically generated COCO json file for Detectron2.\"}]\n images0 = [{\"id\": \"image\", \"width\": 100,\n \"height\": 100, \"file_name\": \"image.png\"}]\n annotations0 = [{\"id\": 1, \"image_id\": \"image\", \"bbox\": [70.0, 30.0, 30.0, 40.0],\n \"area\": 1200.0, \"iscrowd\": 0, \"category_id\": 0}]\n categories0 = [{\"id\": 0, \"name\": \"first\"}]\n licence0 = 'null'\n\n return [{\"info\": info_val0,\n \"images\": images0,\n \"annotations\": annotations0,\n \"categories\": categories0,\n \"licenses\": licence0}]", "def prepare_data(rawimage, rawlabel, mapping, params):\n # rawimage: TF tensor: H x W x 3, tf.uint8\n # rawlabel: TF tensor: H x W, tf.uint8/16, [0,tf.uint8/16-1]\n # images: TF tensor: Nb x hf x wf x 3, tf.float32 in [0,1)\n # labels: TF tensor: Nb x hf x wf (in case of upsampling), tf.int32, [0, Nclasses] (in case of extra void class)\n\n image = tf.image.convert_image_dtype(rawimage, dtype=tf.float32)\n # resize to learnable system's dimensions\n image = tf.image.resize_images(image, [params.height_network, params.width_network])\n\n label_for_resize = tf.to_int32(rawlabel[tf.newaxis, ..., tf.newaxis])\n label = tf.image.resize_nearest_neighbor(label_for_resize, [params.height_network, params.width_network])\n label = tf.squeeze(label, axis=[0, 3])\n\n label = _lids2cids(mapping, label)\n\n return image, label", "def __init__(self, imagefile, labelfile,\n *, training=60, validation=15, test=25, seed=None):\n if training + validation + test != 100:\n raise ValueError(\"Sum of data sets is not 100\")\n images = load_files(imagefile, labelfile)\n\n\n bynum = {i: tuple(imgs) for i, imgs in groupby(images, key=getter('of'))}\n\n train = []\n valid = []\n test = []\n testids = []\n stats = {}\n for num, nimages in bynum.items():\n r = round(len(nimages) * training // 100)\n v = round(len(nimages) * validation // 100)\n train += nimages[:r]\n valid += nimages[r:r+v]\n test += nimages[r+v:]\n stats[num] = MiniData(r, v, len(nimages)-r-v)\n # Now train, valid, and test are the data, ordered by number\n\n # This is the best time to convert the lists into dicts, if you want to.\n\n \"\"\"\n I want to support these access methods:\n Get by number : int -> ImmutableCollection[Image]\n -- Get by set : (set ->) ImmutableCollection[Image]\n Get set as dict: set -> { int: Image }\n Get by both : set -> int -> ImmutableCollection[Image]\n Get as dict : -> { set: { int: Image } }\n -- Get as set\n \"\"\"\n\n data = train + valid + test\n Data._randomizer.seed(Data._default_seed if seed is None else seed)\n Data._randomizer.shuffle(train)\n Data._randomizer.shuffle(valid)\n Data._randomizer.shuffle(test)\n Data._randomizer.shuffle(data)\n self._training = tuple(train)\n self._validation = tuple(valid)\n self._test = tuple(test)\n self._alldata = tuple(data)\n\n self.stats = stats", "def generate_image_info(im, params):\n im = ee.Image(im)\n\n # some images are scaled to a factor of 10.\n if params.get('scale') == 'log':\n im = im.log10()\n\n im = im.sldStyle(params.get('sld_style'))\n\n m = im.getMapId()\n\n mapid = m.get('mapid')\n token = m.get('token')\n\n url = 'https://earthengine.googleapis.com/map/{mapid}/{{z}}/{{x}}/{{y}}?token={token}'.format(\n mapid=mapid,\n token=token\n )\n\n result = {\n 'mapid': mapid,\n 'token': token,\n 'url': url\n }\n return result" ]
[ "0.65801144", "0.6529789", "0.6423225", "0.63869387", "0.62811023", "0.6264642", "0.62389845", "0.6179889", "0.61555016", "0.61398166", "0.6100159", "0.6036023", "0.59768283", "0.59654045", "0.59536004", "0.59490883", "0.593953", "0.5929546", "0.59160763", "0.5884778", "0.58323", "0.58095205", "0.57822526", "0.5772493", "0.5758321", "0.5740606", "0.57184106", "0.5713239", "0.5688035", "0.5688035", "0.56761664", "0.5669157", "0.56581074", "0.565724", "0.56167537", "0.558184", "0.5562967", "0.5550756", "0.5550202", "0.5544905", "0.5533207", "0.5515874", "0.5511249", "0.5496764", "0.54876095", "0.5482801", "0.54797596", "0.5455056", "0.5452273", "0.54444474", "0.543992", "0.5433939", "0.5418186", "0.54177755", "0.53999466", "0.5397774", "0.5378563", "0.53711843", "0.53686684", "0.5366638", "0.5365637", "0.5362364", "0.5357577", "0.53558815", "0.53558815", "0.53468734", "0.5346588", "0.5340622", "0.53383994", "0.5336041", "0.53334653", "0.5318535", "0.53164655", "0.5311512", "0.53062433", "0.5305549", "0.52978134", "0.5292415", "0.528252", "0.5281412", "0.527506", "0.52733946", "0.52724767", "0.52692646", "0.52641964", "0.5261454", "0.5244906", "0.5244564", "0.5209254", "0.5204361", "0.5202012", "0.51993567", "0.51982874", "0.5194927", "0.5194175", "0.51940775", "0.5188211", "0.5178007", "0.5177199", "0.5174918", "0.5174783" ]
0.0
-1
Collects image data via appropriate protocol and returns time and data.
def poll(self) -> Tuple[np.ndarray]: t = time.time() try: v = self.controller.get(self.pvname) except TimeoutError: print(f"No process variable found for {self.pvname}") v = DEFAULT_SCALAR_VALUE[self.pvname] self.time = np.append(self.time, t) self.data = np.append(self.data, v) return self.time - self.tstart, self.data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data(self):\n global CAM\n while CAM.isOpened():\n _, frame = CAM.read()\n _, jpeg = cv2.imencode('.jpg', frame)\n encoded_img = \"data:image/jpg;base64,\" + str(base64.b64encode(jpeg.tobytes()).decode())\n SIO.emit('video_frame',\n {'frame': encoded_img},\n namespace='/live-stream')\n sleep(self.delay)", "def image_fetcher(year, month, day, name):\n entry = 'data/{year}/{month}/{day}/{name}'.format(year=year, month=month, day=day, type=type, name=name)\n img = open(entry)\n return send_file(img)", "def get_image_data(imagedir, model_kwds=dict(layer='fc2'),\n img_kwds=dict(size=(224,224)), timestamps_kwds=dict(source='auto'),\n pca_kwds=None):\n fingerprints_fn = pj(imagedir, ic_base_dir, 'fingerprints.pk')\n images_fn = pj(imagedir, ic_base_dir, 'images.pk')\n if os.path.exists(images_fn):\n print(f\"reading image arrays {images_fn} ...\")\n images = read_pk(images_fn)\n else:\n print(f\"create image arrays {images_fn}\")\n images = read_images(imagedir, **img_kwds)\n write_pk(images, images_fn)\n if os.path.exists(fingerprints_fn):\n print(f\"reading fingerprints {fingerprints_fn} ...\")\n fingerprints = read_pk(fingerprints_fn)\n else:\n print(f\"create fingerprints {fingerprints_fn}\")\n fingerprints = ic.fingerprints(images, ic.get_model(**model_kwds))\n if pca_kwds is not None:\n fingerprints = ic.pca(fingerprints, **pca_kwds)\n write_pk(fingerprints, fingerprints_fn)\n print(f\"reading timestamps ...\")\n if timestamps_kwds is not None:\n timestamps = read_timestamps(imagedir, **timestamps_kwds)\n return images, fingerprints, timestamps", "def get_image_data(self):\n raise NotImplementedError(str(type(self)) + 'does not'\n 'implement get_image.')", "def get_img_data(data_type, file_info, img_info, **kwargs):\n if file_info['ext']=='fits':\n hdulist = get_file(file_info)\n data = hdulist[int(img_info['frame'])].data\n else:\n try:\n from PIL import Image\n except ImportError:\n raise ToyzJobError(\n \"You must have PIL (Python Imaging Library) installed to \"\n \"open files of this type\"\n )\n img = get_file(file_info)\n data = np.array(img)\n \n if data_type == 'data':\n if 'scale' in kwargs:\n width = int(kwargs['width']/2/img_info['viewer']['scale'])\n height = int(kwargs['height']/2/img_info['viewer']['scale'])\n else:\n width = int(kwargs['width']/2)\n height = int(kwargs['height']/2)\n x0 = max(0, kwargs['x']-width)\n y0 = max(0, kwargs['y']-height)\n xf = min(data.shape[1], kwargs['x']+width)\n yf = min(data.shape[0], kwargs['y']+height)\n if 'scale' in kwargs:\n tile_data = {\n 'x0_idx': x0,\n 'y0_idx': y0,\n 'xf_idx': xf,\n 'yf_idx': yf\n }\n data = scale_data(file_info, img_info, tile_data, data)\n else:\n data = data[y0:yf, x0:xf]\n response = {\n 'id': 'data',\n 'min': float(data.min()),\n 'max': float(data.max()),\n 'mean': float(data.mean()),\n 'median': float(np.median(data)),\n 'std_dev': float(np.std(data)),\n 'data': data.tolist()\n }\n elif data_type == 'datapoint':\n if (kwargs['x']<data.shape[1] and kwargs['y']<data.shape[0] and\n kwargs['x']>=0 and kwargs['y']>=0):\n response = {\n 'id': 'datapoint',\n 'px_value': float(data[kwargs['y'],kwargs['x']])\n }\n else:\n response = {\n 'id': 'datapoint',\n 'px_value': 0\n }\n else:\n raise ToyzJobError(\"Loading that data type has not been implemented yet\")\n return response", "def populate_image_stats(self, image):\n ti = image\n image_data = ti.data\n if not ti.data:\n return ti\n ti.size = len(image_data)\n try:\n with connect(Blobby) as c:\n ti.shahash = c.get_data_bhash(image_data)\n except o.Exception, ex:\n raise o.Exception('oException getting shahash: %s' % ex.msg)\n except Exception, ex:\n raise o.Exception('Exception getting shahash: %s' % ex)\n\n try:\n b = StringIO(image_data)\n img = Image.open(b)\n except Exception, ex:\n raise o.Exception('Exception getting PIL img: %s' % ex)\n try:\n ti.xdim, ti.ydim = img.size\n except Exception, ex:\n raise o.Exception('Exception getting dimensions: %s' % ex)\n try:\n ti.vhash = str(average_hash(img))\n except Exception, ex:\n raise o.Exception('Exception getting vhash: %s' % ex)\n\n return ti", "def data(self):\n return self.image", "def _image_data(self):\n if self.header['Image']['bytes per pixel'] == 2:\n # 16-bit unsigned integers, short\n dt = np.dtype(self._bo + 'H')\n elif self.header['Image']['bytes per pixel'] == 4:\n # 32-bit unsigned integers, int\n dt = np.dtype(self._bo + 'I')\n\n shape = [self.header['Image']['planes'],\n self.header['Image']['masses'],\n self.header['Image']['height'],\n self.header['Image']['width']]\n\n self.fh.seek(self.header['header size'])\n\n compressedfiles = (\n gzip.GzipFile,\n bz2.BZ2File,\n tarfile.ExFileObject,\n lzma.LZMAFile,\n io.BytesIO\n )\n\n # fromfile is about 2x faster than frombuffer(fh.read())\n if isinstance(self.fh, compressedfiles):\n data = np.frombuffer(self.fh.read(), dtype=dt).reshape(shape)\n else:\n data = np.fromfile(self.fh, dtype=dt).reshape(shape)\n\n # We want to have a cube of contiguous data (stacked images) for each\n # mass. Swap axes 0 and 1. Returns a view, so make full copy to make\n # data access faster.\n data = data.swapaxes(0, 1).copy()\n\n self.data = xarray.DataArray(data,\n dims=('species', 'frame', 'y', 'x'),\n coords={'species': ('species', list(self.header['label list']))},\n attrs={'unit': 'counts'})", "def get_raw_data(self):\n if self._img and self.is_4d():\n temp = self._img.get_data(caching='unchanged')\n temp = np.rot90(temp)\n for tp in self._loaded_time_list:\n temp[..., tp] = self._data[..., tp]\n else:\n temp = self._data.copy()\n\n return np.rot90(temp, 3)", "def image_fetcher_depricated(year, month, day, name):\n entry = 'data/{year}/{month}/{day}/image/{name}'.format(year=year, month=month, day=day, type=type, name=name)\n img = open(entry)\n return send_file(img)", "def __get_image(self, source):\n if not source in self.__video_modules:\n return (None, None)\n with self.__video_locks[source]:\n last_time, last_img = self.__last_images[source]\n age = time.time() - last_time\n if age > 0.05:\n new_image = self.__video_modules[source].get_image()\n try:\n new_time = self.__video_modules[source].get_time()\n print \"Got time from ros: %f\" % new_time\n except:\n new_time = time.time()\n\n if new_image:\n last_time = new_time \n last_img = new_image\n self.__last_images[source] = (new_time, new_image)\n return (last_time, last_img)", "def get(self):\n\t\tif not self.threaded:\n\t\t\tself.record()\n\t\timg = self.Video[-1]\n\t\ttime = self.timestamps[-1]\n\t\tif self.newAvailable:\n\t\t\tnew = True\n\t\t\tself.newAvailable = False\n\t\t\treturn new, img, time\n\t\telse:\n\t\t\tnew = False\n\t\t\treturn new, img, time", "def _image_data(buff):\n code = buff.getvalue()\n m = _size(code)\n if m:\n size = int(m.group(1))\n else:\n raise Exception('Internal error: PPM header not found')\n return code[m.end():], size", "def _GetImageInfo(self,path):\n hd = Header(path, scan=True)\n hdr = hd.hdr\n self.hdr = hdr\n if hdr is None:\n# Either a ref.dat file or it isn't an imaging file.\n if 'ref' in path and 'dat' in path:\n self.refdats[os.path.realpath(path)] = True\n info = {'type':'refdat'}\n return info\n else:\n return None\n elif hdr['filetype'] == 'dicom' and not path.endswith('.yaml'):\n# Write a yaml file to the raw data directory if possible.\n dirname, outfile = self._yaml_filename(path)\n yaml_name = '%s/%s' % (dirname, outfile)\n if not os.path.exists(yaml_name):\n# Create yaml file using dirname,\n# e.g., ../anatomicals/S2_EFGRE3D/s2_efgre3d.yaml\n try:\n hd.write_hdr_to_yaml('%s/%s' % (dirname,outfile))\n except IOError:\n# This is a nonessential function, so ignore exceptions\n# such as access violations.\n pass\n elif hdr['filetype'] == 'dicom' or hdr['filetype'] == 'ge_ifile':\n if not os.path.isdir(path):\n path = os.path.dirname(path)\n shdr = hdr['subhdr']\n nhdr = hdr['native_header']\n self.shdr = shdr\n if 'dti' in shdr.get('PulseSequenceName','').lower() \\\n or 'dti' in nhdr.get('PulseSequenceFile',''):\n psdname = 'dti'\n else:\n psdname = os.path.basename((shdr.get('PulseSequenceName','').strip()).lower())\n info = {'psdname':psdname, \\\n 'acqtime':shdr['AcqTime'], \\\n 'series':int(shdr['SeriesNumber']), \\\n 'plane':hdr['plane'].strip(), \\\n 'type':self.imgtype.get(psdname,None), \\\n 'plane':hdr['plane'], \\\n 'acqtime':shdr['SeriesTime'], \\\n# 'fmapdir':None, \\\n 'refdat':None, \\\n 'imgfile':None, \\\n 'base':None, \\\n 'tdim':int(hdr['tdim']), \\\n 'echo_spacing':None, \\\n 'filetype':'brik', \\\n 'suffix':self.suffix.get(hdr['filetype'], 'brik'), \\\n 'data_filetype':hdr['filetype']}\n if info['type'] == 'localizer':\n# Don't process the localizer.\n return info\n if isinstance(info['acqtime'], int):\n info['acquisition_time'] = time.ctime(info['acqtime'])\n if nhdr.get('ImageFormat',('unknown'))[0] == 'DERIVED' and info['type'] == 'epi':\n# Sometimes screenshots are defined as epis.\n info['type'] = None\n\n# Call the method appropriate to the type of scan in this series.\n stat = apply( self.GetInfoMethods.get(info['type'], self._NoInfo), \\\n [info, path])\n if stat:\n info = {'type':'break'}\n return info\n info['suffix'] = self.suffix.get(info['filetype'], 'brik')\n return info", "def data(self):\n return self._img", "def receive_message(self, msg):\n # TODO(eric.cousineau): Consider moving decode logic.\n with self.lock:\n self.utime = msg.header.utime\n self._image = decode_lcmt_image(msg, self._image)\n self._is_depth_image = (msg.pixel_format\n == lcmt_image.PIXEL_FORMAT_DEPTH)", "def extract_data(filename, num_images, starting_id, context_factor):\n imgs = []\n for i in range(starting_id, num_images+starting_id):\n imageid = \"satImage_%.3d\" % i\n image_filename = filename + imageid + \".png\"\n if os.path.isfile(image_filename):\n print ('Loading ' + image_filename)\n img = mpimg.imread(image_filename)\n\n\n imgs.append(img)\n else:\n print ('File ' + image_filename + ' does not exist')\n\n num_images = len(imgs)\n IMG_WIDTH = int(imgs[0].shape[0]/DOWNSCALE)\n IMG_HEIGHT = int(imgs[0].shape[1]/DOWNSCALE)\n N_PATCHES_PER_IMAGE = (IMG_WIDTH/IMG_PATCH_SIZE)*(IMG_HEIGHT/IMG_PATCH_SIZE)\n\n\n img_patches = [img_crop_context(imgs[i], IMG_PATCH_SIZE, IMG_PATCH_SIZE,context_factor, sub_mean=True) for i in range(num_images)]\n data = [img_patches[i][j] for i in range(len(img_patches)) for j in range(len(img_patches[i]))]\n data = np.asarray(data)\n return data", "def read(self):\n self._sync()\n d = {tag: struct.unpack('<f', self.pic.read(4))[0] for tag in tags}\n d['ts_pic'] = struct.unpack('<i', self.pic.read(4))[0]\n return d", "def start(self) -> None:\n data = b\"\"\n while True:\n # while loop to get size of receiving data\n while len(data) < self.payload_size:\n packet = self.client_socket.recv(4 * 1024) # 4KB\n if not packet:\n break\n data += packet\n # counting size of sending data\n packed_msg_size = data[: self.payload_size]\n # if in first while loop there was download part of data, need to add it on start\n data = data[self.payload_size :]\n msg_size = struct.unpack(\"Q\", packed_msg_size)[0]\n # receiving concrete data\n while len(data) < msg_size:\n data += self.client_socket.recv(4 * 1024)\n # getting all data for current state\n data_recv_pickled = data[:msg_size]\n # setting data to whats left for next state\n data = data[msg_size:]\n # unpickle what we got\n data_recv = pickle.loads(data_recv_pickled)\n # show image and if q pressed - stop\n cv2.imshow(\"RECEIVING VIDEO\", data_recv.frame)\n print(\n f\"[CLIENT] GOT IMAGE AT TIME: {data_recv.decision} | WITH PERCENTAGE: {data_recv.percentage}% | DELAY: {datetime.datetime.now() - data_recv.time_sended}\"\n )\n key = cv2.waitKey(1) & 0xFF\n if key == ord(\"q\"):\n break\n # disconnect from server\n self.disconnect()", "def get_data_ge(logger, file):\n fp = open(file, 'rb')\n offset = 8192\n\n fp.seek(18)\n size, nframes = st.unpack('<ih',fp.read(6))\n if size != 2048:\n logger.error('GE image size unexpected: '+str(size))\n return None, 0, 0\n\n fsize = os.stat(str(fp).split(\"'\")[1]).st_size\n nframes_calc = (fsize - offset)/(2*size**2)\n\n if nframes != nframes_calc:\n logger.error('GE number frames unexpected: '+str(nframes))\n return None, 0, 0\n\n pos = offset\n fp.seek(pos)\n\n return fp, int(nframes_calc), size*size", "def process(self, image):", "def receive_image(self):\n code = self.socket.recv(1)\n self.verify_img_code(code)\n if code[0] == codes['timeout']:\n print(\"Ocurrió un timeout en la conexión\")\n self.close_connection()\n idpokemon = bytes_to_int(self.socket.recv(1))\n self.verify_pokemon(idpokemon)\n tam_image = bytes_to_int(self.socket.recv(4))\n f = open(\"../..\" + str(idpokemon) + \".png\", 'wb')\n l = 1\n while(l):\n l = self.socket.recv(1024)\n f.write(l)\n print(\"Se guardó una imagen del pokémon capturado en el archivo \" +\n str(idpokemon) + \".png.\")\n f.close()\n\n print(\"Sesión terminada.\")\n reply = self.socket.recv(1)\n self.close_connection()", "def get_data(self, t: int, **kwargs) -> Image:\n return self._get_single_frame(int(self._resolve_index(t)), **kwargs)", "def adjust_image_data(self):\r\n\r\n print('Adjusting image data: ')\r\n\r\n if self.removeFirstSequence: # used to remove the first trial from the sequence\r\n\r\n frames_per_rep = self.nFrames/self.nrepetitions\r\n\r\n self.imageData = self.imageData[frames_per_rep:, :, :]\r\n\r\n self.nFrames = self.imageData.shape[0]\r\n\r\n self.nrepetitions = int(self.nFrames/(self.period * self.framerate))\r\n\r\n self.times = np.arange(0, self.nFrames/self.framerate, 1.0/self.framerate)\r\n\r\n \r\n\r\n # first squeeze the image to 3d if it is 4d\r\n\r\n maxt = np.max(self.times) # find last image time\r\n\r\n sh = self.imageData.shape\r\n\r\n if len(sh) == 4:\r\n\r\n self.imageData = self.imageData.squeeze()\r\n\r\n sh = self.imageData.shape\r\n\r\n dt = np.mean(np.diff(self.times)) # get the mean dt\r\n\r\n n_Periods = int((maxt+dt)/self.period) # how many full periods in the image set - include the first?\r\n\r\n if self.nrepetitions > 0 and self.nrepetitions < n_Periods:\r\n\r\n n_Periods = self.nrepetitions\r\n\r\n n_PtsPerCycle = int(np.floor(self.period/dt)); # estimate image points in a stimulus cycle\r\n\r\n ndt = self.period/n_PtsPerCycle\r\n\r\n self.imageData = self.imageData[range(0, n_Periods*n_PtsPerCycle),:,:] # reduce to only what we need\r\n\r\n print (' Adjusted image info')\r\n\r\n print (\" # Periods: %d Pts/cycle: %d Cycle dt %8.4fs (%8.3fHz) Cycle: %7.4fs\" %(n_Periods, n_PtsPerCycle, ndt, 1.0/ndt, self.period))\r\n\r\n self.print_image_info()", "def __init__(self, data):\n # check if dataset contains time information\n # (fetched from bootloader storage)\n if len(data) == 61:\n (_, seconds, minutes, hours, days, months, years) = struct.unpack(\n '<55sBBBBBB', data)\n self.date = datetime(2000 + years, months, days, hours, minutes,\n seconds)\n\n # Only parse preceding data\n data = data[:55]\n power = [0, 0]\n kWh = [0, 0]\n MWh = [0, 0]\n (_, digital, speed, active, power[0], kWh[0], MWh[0], power[1], kWh[1],\n MWh[1]) = struct.unpack('<32sH4sBLHHLHH', data)\n\n analog = struct.unpack(\n '<{}{}'.format('H' * 16, 'x' * (len(data) - 32)), data)\n\n self.analog = {}\n for channel in range(0, 16):\n self.analog[channel + 1] = round(\n self._convert_analog(analog[channel]), 3)\n\n self.digital = {}\n for channel in range(0, 16):\n self.digital[channel + 1] = self._convert_digital(digital, channel)\n\n '''\n self.speed = {}\n for channel in range(0, 4):\n self.speed[channel + 1] = round(\n self._convert_speed(speed[channel]), 3)\n \n\n self.energy = {}\n for channel in range(0, 2):\n self.energy[channel + 1] = round(\n self._convert_energy(MWh[channel], kWh[channel], active,\n channel), 3)\n \n\n self.power = {}\n for channel in range(0, 2):\n self.power[channel + 1] = round(\n self._convert_power(power[channel], active, channel), 3)\n '''", "def retrieveImageInfo(self, filename):\t\t \n\t\tassert filename, \"Filename must be defined\"\n\t\tassert os.path.exists(filename), \"File that we're retrieving information \\\n\t\t\t\t\t\t\t\t\t\tfrom (%s) needs to exist, but doesn't.\" % filename\n\t\tself.ext = filename.split(\".\")[-1].lower()\n\t\trdr = self.getReaderByExtension(self.ext)\n\t\t\n\t\tif self.ext == \"bmp\":\n\t\t\trdr.Allow8BitBMPOn()\n\t\trdr.SetFileName(filename)\n\t\tif rdr.IsA(\"vtkExtTIFFReader\"):\n\t\t\trdr.UpdateInformation()\n\t\t\tif rdr.GetNumberOfScalarComponents() == 1:\n\t\t\t\trdr.RawModeOn()\n\n\t\tdata = rdr.GetOutput()\n\t\tdata.Update()\n\t\tself.numberOfComponents = data.GetNumberOfScalarComponents()\n\n\t\tif not self.ctf:\n\t\t\tbd = self.getDataBitDepth(data)\n\t\t\tself.ctf = vtk.vtkColorTransferFunction()\n\t\t\tif bd == 8 or bd == 12:\n\t\t\t\tself.ctf.AddRGBPoint(0, 0, 0, 0)\n\t\t\t\tself.ctf.AddRGBPoint((2 ** bd) - 1, 0, 1, 0)\n\t\t\telse:\n\t\t\t\trange = data.GetScalarRange()\n\t\t\t\tself.ctf.AddRGBPoint(range[0], 0, 0, 0)\n\t\t\t\tself.ctf.AddRGBPoint(range[1], 0, 1, 0)\n\t\t\t\n\t\tself.x, self.y, z = data.GetDimensions()\n\t\tself.dimensions = (self.x, self.y, self.slicesPerTimepoint)\n\t\tif z > 1:\n\t\t\tself.slicesPerTimepoint = z\n\t\t\tself.z = z\n\t\t\tself.dimensions = (self.x, self.y, self.slicesPerTimepoint)\n\t\t\tlib.messenger.send(self, \"update_dimensions\")\n\t\tself.originalDimensions = self.dimensions", "def capture_image(self, data={}):\n # call self.increment_count() after each image saved\n pass", "def parse_image(self, image):\n # parse the image data into a pygame surface for display or screenshot\n # raw image is BGRA\n # if image_type is segmentation, here will convert to the pre-defined color\n image.convert(self.image_type)\n\n array = np.frombuffer(image.raw_data, dtype=np.dtype(\"uint8\"))\n array = np.reshape(array, (image.height, image.width, 4))\n array = array[:, :, :3]\n array = array[:, :, ::-1] # BGR -> RGB\n self.rgb_image = array\n self.pygame_surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))\n\n self.last_image_seconds = image.timestamp\n self.last_image_frame_num = image.frame", "def image_data(verbose=False):\n # This is a principled use of the `global` statement; don't lint me.\n global _IMAGE_DATA # pylint: disable=global-statement\n if _IMAGE_DATA is None:\n if verbose:\n logger.info(\"--- Downloading image.\")\n with contextlib.closing(urllib.request.urlopen(IMAGE_URL)) as infile:\n _IMAGE_DATA = infile.read()\n return _IMAGE_DATA", "def __make_processing(self, img_name, abspath_dir_img, id_foot):\n data = {}\n data['data'] = ImageInfo.get_date(abspath_dir_img)\n data['total_part'] = TOTAL_PART\n data['nuvens'] = ImageInfo.get_cloud(abspath_dir_img)\n self.__make_tms(abspath_dir_img)\n data['geom'] = self.__make_footprint(abspath_dir_img, shp_out=id_foot)\n abspath_rgb, img_name_rgb = ImageInfo.get_image_rgb(\n abspath_dir_img, img_name\n )\n data['tms'] = ImageInfo.get_xml_tms(img_name_rgb)\n data['image'] = img_name_rgb\n data['quicklook'] = self.__make_png(abspath_rgb)\n data['path'] = ImageInfo.get_path(img_name)\n return data", "def time_info(input_file):\n original_path = os.getcwd() #set original directory\n save_path = input_file['save_path']\n planet = input_file['exoplanet'] #set exoplanet name\n print '\\nObtain the images .... \\n'\n print 'Change to ', save_path\n os.chdir(save_path) #change to save directory where is our scvience images\n images = sorted(glob.glob('AB'+input_file['exoplanet']+'*.fits'))\n print '\\nImages = \\n',images\n tempo_loc = [] #time object\n SUN = [] #Sun coordinate object\n ra_sun, dec_sun, dsun = np.zeros(len(images)),np.zeros(len(images)),np.zeros(len(images)) #sun coordinates\n JD = np.zeros(len(images)) #julian date from time object\n ST = np.zeros(len(images))\n HJD = np.zeros(len(images))\n #create the exoplanet object coordianate\n exoplanet = SkyCoord(dec=input_file['DEC'],ra=input_file['RA'],unit=('deg','deg'),frame=input_file['frame'])\n print '\\nObtain data info from header ....\\n'\n for i in range(len(images)):\n hdr = fits.getheader(images[i])\n UTC = hdr['date-obs']+'T'+hdr['UT'] #string that contain the time in UTC in isot format\n tempo_loc.append(Time(UTC,scale=input_file['scale-time'],format='isot',location=(input_file['lon-obs'],input_file['lat-obs'])))#,input_data['altitude'])))\n JD[i] = tempo_loc[i].jd\n ST[i] = tempo_loc[i].sidereal_time('apparent').hour\n SUN.append(get_sun(tempo_loc[i]))\n ra_sun[i],dec_sun[i] = SUN[i].ra.deg, SUN[i].dec.deg\n dsun[i] = SUN[i].distance.value\n HJD[i] = use.hjd_date(JD[i],dsun[i],dec_sun[i],ra_sun[i],exoplanet.dec.deg,exoplanet.ra.deg,circular_orbit=input_file['circular_orbit'])\n use.update_progress((i+1.)/len(images))\n print '\\n.... done.\\n'\n print '\\n Time from header = \\n'\n #print '\\nImages ** UTC (YYYY-MM-DDTHH:MM:SS) ** JD (7d.5d) ** ST (hours) ** ST (HH:MM:SS) ** Sun Coordinate (epoch,RA,DEC,Distance) (deg,deg,AU) \\n'\n ST_string = []\n for i in range(len(images)):\n ST1 = int(ST[i])\n ST2 = int((ST[i]-ST1)*60.)\n ST3 = (((ST[i]-ST1)*60.)-ST2)*60\n ST_string.append(str(ST1)+':'+str(ST2)+':'+str(ST3))\n tempo_loc[i] = tempo_loc[i].value\n use.update_progress((i+1.)/len(images))\n #print images[i], ' ** ',tempo_loc[i], ' ** ', JD[i], ' ** ', ST[i],' ** ',ST_string[i],' ** ',sun_loc[i],' ** ',HJD[i]\n print '\\nSave data file ... \\n'\n data = DataFrame([images,tempo_loc,list(JD),list(ST),list(ST_string),list(ra_sun),list(dec_sun),list(dsun),list(HJD)]).T\n data.columns=['images','UTC','JD','ST','ST_isot','RA_SUN','DEC_SUN','D_SUN','HJD']\n print data\n data.to_csv('results.csv')\n os.chdir(original_path)\n return", "def get_data(self):\n return {\"imgID\": self.image_id}", "def getImageStats(self, open_url):\n ret_image_info = None\n if \"image\" in open_url.headers.get(\"content-type\"):\n ret_image_info = self.ImageInfo()\n\n ret_image_info.size = open_url.headers.get(\"content-length\") or None\n if ret_image_info.size:\n ret_image_info.size = int(ret_image_info.size)\n self.getDataFromImage(open_url, ret_image_info)\n\n return ret_image_info", "def retrieve_data(vx_handle, i_bytes_captured, i_wait_count, s_channels):\n i_bytes_remaining = min(i_bytes_captured, i_wait_count * 4 * len(s_channels))\n i_block_offset = 0\n f_data = []\n i_retries = 0\n while i_bytes_remaining > 0:\n i_block_cnt = min(64, int(math.ceil(i_bytes_remaining / 1024.0)))\n vx_handle.write('CAPTUREGET? %d, %d'%(i_block_offset, i_block_cnt))\n buf = vx_handle.read_raw() # read whatever dut sends\n if not buf:\n print('empty response from dut for block %d'%i_block_offset)\n i_retries += 1\n if i_retries > 5:\n print('\\n\\n**** TOO MANY RETRIES ATTEMPTING TO GET DATA! ****')\n if not i_block_offset:\n print('**** NO DATA RETUNED ****\\n')\n sys.exit(-1)\n\n # binary block CAPTUREGET returns #nccccxxxxxxx...\n # with little-endian float x bytes see manual page 139\n # if b_show_debug:\n # print(' '.join(['%02X'%ord(x) for x in buf[:6]]))\n # print(str_blocks_hex(buf[6:262]))\n\n raw_data = buf[2 + int(buf[1]):]\n i_bytes_to_convert = min(i_bytes_remaining, len(raw_data))\n # convert to floats\n f_block_data = list(unpack_from('<%df'%(i_bytes_to_convert/4), raw_data))\n # if b_show_debug:\n # print(len(f_block_data), 'floats received')\n # print(str_blocks_float(f_block_data))\n f_data += f_block_data\n i_block_offset += i_block_cnt\n i_bytes_remaining -= i_block_cnt * 1024\n return f_data", "async def async_fetch_image_data(self, image_name, username, password):\n params = {}\n cookies = self.get_session_cookie()\n if username is not None and password is not None:\n params['user'] = self.encode_user(username, password)\n else:\n params['user'] = ''\n async with aiohttp.ClientSession(cookies=cookies) as session:\n resp = await session.get(\n '{}/{}.jpg'.format(self._base_url, image_name),\n params=params\n )\n if resp.headers['Content-Type'] == 'image/jpeg':\n data = await resp.read()\n else:\n data = None\n return data", "def process_images(img_xy, img_z):\n logging.info(\"paired {} and {}\".format(img_xy.ts, img_z.ts))\n for item in xy_imgs:\n assert(item.ts >= img_xy.ts)\n for item in z_imgs:\n assert(item.ts >= img_z.ts)\n\n xy_data = np.asarray(img_xy.data, dtype='uint8')\n z_data = np.asarray(img_z.data, dtype='uint8')\n\n xy_tracker.run_tracking(xy_data)\n z_tracker.run_tracking(z_data)\n\n try:\n x, y1 = xy_tracker.get_avg().astype(float)\n z, y2 = z_tracker.get_avg().astype(float)\n msg = dict(x=x, y=y1, z=z)\n msg = json.dumps(msg)\n send_socket_msg(msg)\n except Exception:\n pass", "def _collect_data(self) -> None:\n self.set_websocket_data()\n self.set_stratum_data()\n self.set_cache_data()\n self.collect_peer_connection_metrics()\n self.set_tx_storage_data()", "def main():\n # Set up socket\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.bind(('localhost', 12345))\n dat = b''\n dataSegement = [0] * 5\n\n while True:\n seg, addr = s.recvfrom(MAX_DGRAM)\n print(\"type: \", type(seg))\n chunk_number = struct.unpack(\"B\", seg[0:1])[0]\n if chunk_number > 1:\n print(\"chunk_number: \", chunk_number)\n dat += seg[1:]\n else:\n dat += seg[1:]\n img = cv2.imdecode(np.frombuffer(dat, dtype=np.uint8), 1)\n cv2.imwrite(\"image/4k_image_sample_compressed.jpg\", img)\n if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n break\n dat = b\"\"", "def process_download_other(self, data, meta_file_name, connection_time):\n block_size = 1024\n # content-length in bytes\n self.data_len = float(data.info().get('Content-length', None))\n config_pytomo.LOG.debug('Content-length: %s' % self.data_len)\n meta_file = open(meta_file_name, 'ab+')\n tries = 0\n self._total_bytes = 0\n self.state = INITIAL_BUFFERING_STATE\n start = time.time()\n while True:\n # Download and write\n before = time.time()\n if (before - start) > self.download_time:\n config_pytomo.LOG.debug('\\nDownloaded %i seconds from video'\n 'stopping' % (before - start))\n break\n # read in bytes\n data_block = data.read(block_size)\n if not self.time_to_get_first_byte:\n first_byte_time = time.time()\n self.time_to_get_first_byte = first_byte_time - connection_time\n if (not self.encoding_rate\n and tries <= config_pytomo.MAX_NB_TRIES_ENCODING):\n self.compute_encoding_rate(meta_file_name)\n tries += 1\n write_no_seek(meta_file, data_block)\n data_block_len = len(data_block)\n #config_pytomo.LOG.debug('\\ndata_block_len=%s' % data_block_len)\n if data_block_len == 0:\n config_pytomo.LOG.debug('\\nDowloaded complete video')\n break\n self._total_bytes += data_block_len\n self.update_without_tags()\n after = time.time()\n if not self.data_duration:\n try:\n self.data_duration = get_data_duration(meta_file_name)\n except ParseError, mes:\n config_pytomo.LOG.info('no data duration: %s' % mes)\n self.current_time = after - start\n time_difference = after - before\n self.update_state(time_difference)\n block_size = self.best_block_size(time_difference, data_block_len)\n instant_thp = (8e-3 * data_block_len / (time_difference)\n if (time_difference) != 0 else None)\n #config_pytomo.LOG.debug('max_instant_thp=%skb/s; instant_thp=%skb/s'\n # % (self.max_instant_thp, instant_thp))\n if time_difference > MAX_TH_MIN_UPDATE_TIME:\n self.max_instant_thp = max(self.max_instant_thp, instant_thp)\n if config_pytomo.LOG_LEVEL == config_pytomo.DEBUG:\n # Progress message\n progress_stats = {\n 'percent_str': self.calc_percent(self._total_bytes,\n self.data_len),\n 'data_len_str': self.format_bytes(self.data_len),\n 'eta_str': self.calc_eta(start, time.time(), self.data_len,\n self._total_bytes),\n 'speed_str': self.calc_speed(start, time.time(),\n self._total_bytes),\n # in order to avoid None convertion to float in\n # report_progress and still have information\n 'instant_thp': str(instant_thp),\n 'byte_counter': self._total_bytes,\n 'current_buffer': self.current_buffer,\n }\n self.report_progress(progress_stats)\n return after - start", "def process_image(self):\n pass", "def data(self) -> List[JpegImageFile]:\n return self._data", "def get_data(station,starttime,endtime,activity=False,\n rep='/GNOMEDrive/gnome/serverdata/',resample=None):\n setname = \"MagneticFields\"\n dstr = ['%Y','%m','%d','%H','%M']\n dsplit = '-'.join(dstr[:starttime.count('-')+1])\n start = datetime.strptime(starttime,dsplit)\n starttime = construct_utc_from_metadata(start.strftime(\"%Y/%m/%d\"),\n start.strftime(\"%H:%M:%S.%d\"))\n dsplit = '-'.join(dstr[:endtime.count('-')+1])\n end = datetime.strptime(endtime,dsplit)\n endtime = construct_utc_from_metadata(end.strftime(\"%Y/%m/%d\"),\n end.strftime(\"%H:%M:%S.%d\"))\n dataset = []\n for date in numpy.arange(start,end,timedelta(minutes=1)):\n date = date.astype(datetime)\n path1 = rep+station+'/'+date.strftime(\"%Y/%m/%d/\")\n path2 = station+'_'+date.strftime(\"%Y%m%d_%H%M*.hdf5\")\n fullpath = os.path.join(path1,path2)\n dataset += glob.glob(fullpath)\n if len(dataset)==0:\n print \"ERROR: No data files were found...\"\n quit()\n file_order,data_order = {},{}\n for fname in dataset:\n hfile = h5py.File(fname, \"r\")\n segfile = file_to_segment(hfile,setname)\n file_order[segfile] = fname\n data_order[segfile] = hfile\n # Extract sample rate from metadata of last read data file\n sample_rate = hfile[setname].attrs[\"SamplingRate(Hz)\"]\n # Estimate full segment activity list\n activity = create_activity_list(station,data_order)\n # Generate an ASCII representation of the GPS timestamped\n # segments of time covered by the input data\n seglist = segmentlist(data_order.keys())\n # Sort the segment list\n seglist.sort()\n # Create list of time series from every segment\n ts_list = generate_timeseries(file_order,setname)\n # Retrieve channel data for all the segments\n full_data = numpy.hstack([retrieve_channel_data(data_order[seg],setname)\n for seg in seglist])\n new_sample_rate = sample_rate if resample==None else resample\n new_data_length = len(full_data)*new_sample_rate/float(sample_rate)\n full_data = scipy.signal.resample(full_data,int(new_data_length))\n # Models a time series consisting of uniformly sampled scalar values\n ts_data = types.TimeSeries(full_data,delta_t=1./new_sample_rate,\n epoch=seglist[0][0])\n for v in data_order.values():\n v.close()\n return ts_data,ts_list,activity,int(starttime),int(endtime)", "def get_timepix_data_object(evt, src):\n o = evt.get(_psana.Timepix.DataV2, src)\n if o is not None: return o\n\n o = evt.get(_psana.Timepix.DataV1, src)\n if o is not None: return o\n\n return None", "def __init__(self):\n# This is the top container for all data. The gid is the global id (for a image).\n# Before calling convert most of the values are strings. Some additional\n# values are also calculated, see convert() for details. After calling\n# convert, most values are integers or floats where appropriat.\n # set through parser\n self.orientation = None\n self.tileheight = 0\n self.tilewidth = 0\n self.width = 0\n self.height = 0\n self.version = 0\n self.tile_sets = [] # TileSet\n self.layers = [] # WorldTileLayer <- what order? back to front (guessed)\n self.indexed_tiles = {} # {gid: (offsetx, offsety, image}\n self.object_groups = []\n self.properties = {} # {name: value}\n # additional info\n self.pixel_width = 0\n self.pixel_height = 0\n self.named_layers = {} # {name: layer}\n self.named_tile_sets = {} # {name: tile_set}\n self.map_file_name = \"\"\n self._image_loader = None", "def __init__(self):\n# This is the top container for all data. The gid is the global id (for a image).\n# Before calling convert most of the values are strings. Some additional\n# values are also calculated, see convert() for details. After calling\n# convert, most values are integers or floats where appropriat.\n # set through parser\n self.orientation = None\n self.tileheight = 0\n self.tilewidth = 0\n self.width = 0\n self.height = 0\n self.version = 0\n self.tile_sets = [] # TileSet\n self.layers = [] # WorldTileLayer <- what order? back to front (guessed)\n self.indexed_tiles = {} # {gid: (offsetx, offsety, image}\n self.object_groups = []\n self.properties = {} # {name: value}\n # additional info\n self.pixel_width = 0\n self.pixel_height = 0\n self.named_layers = {} # {name: layer}\n self.named_tile_sets = {} # {name: tile_set}\n self.map_file_name = \"\"\n self._image_loader = None", "def read(self):\n try:\n if self.Data.Sync.IsWritten == 1:\n\n if self._IsPauseOn:\n self.Data.Sync.IsPauseOn = 1\n else:\n self.Data.Sync.IsPauseOn = 0\n\n Width = self.Data.Image.ImageWidth\n Height = self.Data.Image.ImageHeight\n\n # Image = np.fromstring(self.Data.Image.Data, np.uint8, Width * Height * self.TARGET_IMAGE_CHANNELS)\n Image = np.frombuffer(self.Data.Image.Data, np.uint8, Width * Height * self.TARGET_IMAGE_CHANNELS)\n Image = Image.reshape(Height, Width, self.TARGET_IMAGE_CHANNELS)\n\n AspectRatio = Width / Height\n TargetWidth = int(self._TargetResolution[1] * AspectRatio)\n\n if TargetWidth >= self._TargetResolution[0]:\n if Width != TargetWidth or Height != self._TargetResolution[1]:\n Image = cv2.resize(Image, (TargetWidth, self._TargetResolution[1]))\n\n if TargetWidth != self._TargetResolution[0]:\n XStart = int(TargetWidth/2 - self._TargetResolution[0]/2)\n XStop = int(TargetWidth/2 + self._TargetResolution[0]/2)\n Image = Image[:, XStart:XStop]\n\n else:\n TargetHeight = int(self._TargetResolution[0]/AspectRatio)\n\n if Width != self._TargetResolution[0] or Height != TargetHeight:\n Image = cv2.resize(Image, (self._TargetResolution[1], TargetHeight))\n\n if TargetHeight != self._TargetResolution[1]:\n YStart = int(TargetHeight/2 - self._TargetResolution[1]/2)\n YStop = int(TargetHeight/2 + self._TargetResolution[1]/2)\n Image = Image[YStart:YStop, :]\n\n # Shall we convert this to 0 - 1 ?\n self._RawImage = Image\n self._Image = cv2.flip(Image, 0)\n\n # This one does not flip the image, but it rotate and crop !!\n # self._Image = np.array(cv2.flip(Image, 0)/255, dtype=np.float32)\n # self._Image = cv2.flip(Image, 0)\n\n\n # This one is flipped upside/down\n # print(\"Image from memory reshaped as WxH with Mean\", Width, Height, np.mean((self._Image), axis=(0, 1)))\n # self.store_to_file(self._Image)\n\n return True\n except:\n print(\"Unexpected error in Shared Memory Read\", sys.exc_info()[0])\n\n return False", "def extract_date_info(object_key):\n pacific = pytz.timezone('America/Los_Angeles')\n first_parts = object_key.split(\"/\")\n capture_type = first_parts[4]\n last_part_idx = len(first_parts) - 1\n file_name = first_parts[last_part_idx]\n\n # now parse the date and time out of the file name\n second_parts = file_name.split(\"_\")\n last_part_idx = len(second_parts) - 1\n if capture_type == 'snap':\n date_time_string = second_parts[last_part_idx]\n if date_time_string.endswith('.jpg'):\n date_time_string = date_time_string[:-4]\n # FIN\n final_parts = date_time_string.split(\"-\")\n date_part = final_parts[0]\n time_part = final_parts[1]\n\n # FIN\n # FIN\n if capture_type == 'record':\n time_part = second_parts[last_part_idx]\n date_part = second_parts[(last_part_idx - 1)]\n if time_part.endswith('.mp4'):\n time_part = time_part[:-4]\n # FIN\n\n # parse out our date\n year = date_part[:4]\n date_part = date_part[4:]\n month = date_part[:2]\n day = date_part[2:]\n\n # parse out the time\n hour = time_part[:2]\n time_part = time_part[2:]\n seconds = time_part[2:]\n minutes = time_part[:2]\n\n if hour[:1] == '0':\n hour = hour[1:]\n if month[:1] == '0':\n month = month[1:]\n if day[:1] == '0':\n day = day[1:]\n\n this_date = datetime.datetime(int(year), int(month), int(day), int(hour),\n int(minutes), int(seconds), 0, pacific)\n return_object = {'isodate': this_date.isoformat(),\n 'year': year,\n 'month': month,\n 'day': day,\n 'hour': hour,\n 'minutes': minutes,\n 'seconds': seconds}\n return return_object", "def _pngdata(self, task, c, imgdata):\n ctype = c.getinfo(pycurl.CONTENT_TYPE)\n if not (ctype and ctype.startswith(\"image/\")):\n cherrypy.log(\"SCRAPER ERROR %s content type '%s' not an image, headers %s\" %\n (c.getinfo(pycurl.EFFECTIVE_URL), ctype, c.headers))\n return None\n elif ctype != 'image/png':\n debug(self._ID, 3, \"%s: converting image %s to png\", task.key, ctype)\n png = StringIO()\n PILImage.open(StringIO(imgdata)).save(png, \"PNG\")\n imgdata = png.getvalue()\n png.close()\n return imgdata", "def convert_timestamp_info(data):\n videos = data.get('video_files', [])\n images = data.get('image_files', [])\n\n # judge the exits of video and images\n upload_path = current_app.config['UPLOAD_FOLDER']\n storage_path = current_app.config['FILE_STORAGE_PATH']\n title = data.get('title')\n storage_dir = os.path.join(storage_path, title)\n\n pathlib.Path(storage_dir).mkdir(parents=True, exist_ok=True)\n\n for video in videos:\n video_name = video.get('name')\n video_upload_path = os.path.join(upload_path, video.get('num'))\n video_storage_path = os.path.join(storage_dir, video_name)\n shutil.move(video_upload_path, video_storage_path)\n video['file_path'] = os.path.join(title, video_name)\n del video['num']\n\n for image in images:\n image_name = image.get('name')\n image_upload_path = os.path.join(upload_path, image.get('num'))\n image_storage_path = os.path.join(storage_dir, image_name)\n shutil.move(image_upload_path, image_storage_path)\n image['file_path'] = os.path.join(title, image_name)\n del image['num']\n\n return data", "def receive_data(self):\n chunks = []\n bytes_recd = 0\n while bytes_recd < 8:\n #I'm reading my data in byte chunks\n try:\n chunk = self.sockfd.recv(min(8 - bytes_recd, 4))\n chunks.append(chunk)\n bytes_recd = bytes_recd + len(chunk)\n except:\n print(f'{self.ip} socket failed')\n break\n # if chunk == '':\n # raise RuntimeError(\"Socket connection broken\")\n\n stat_tuple = struct.unpack('L', chunks[0])\n data_tuple = struct.unpack('L', chunks[1])\n stat = stat_tuple[0]\n data = data_tuple[0]\n return stat, chunks[1]", "def timeCalc(image):\n telheader = astropy.io.fits.open(image)\n UT = telheader[0].header['UT']\n secs = float(UT[6:10])\n mins = float(UT[3:5])\n hours = float(UT[0:2])\n time = secs+mins*60.+hours*(60.*60.)\n\n return time", "def extract(self, source):\n\t\tp = Parser()\n\t\tf = open_pds(source)\n\t\tif self.log: self.log.debug(\"Parsing '%s'\" % (source))\n\t\tself.labels = p.parse(f)\n\t\tif self.log: self.log.debug(\"Found %d labels\" % (len(self.labels)))\n\t\tif self._check_image_is_supported():\n\t\t\tif self.log: self.log.debug(\"Image in '%s' is supported\" % (source))\n\t\t\tdim = self._get_image_dimensions()\n\t\t\tloc = self._get_image_location()\n\t\t\timageSampleBits = int(self.labels['IMAGE']['SAMPLE_BITS'])\n\t\t\timageSampleType = self.labels['IMAGE']['SAMPLE_TYPE']\n\t\t\tmd5Checksum = self._get_image_checksum()\n\t\t\tif self.log: self.log.debug(\"Image dimensions should be %s\" % (str(dim)))\n\t\t\tif self.log: self.log.debug(\"Seeking to image data at %d\" % (loc))\n\t\t\tf.seek(loc)\n\t\t\tif imageSampleBits == 8:\n\t\t\t\treadSize = dim[0] * dim[1]\n\t\t\telif imageSampleBits == 16:\n\t\t\t\treadSize = dim[0] * dim[1] * 2\n\t\t\tprint readSize\n\t\t\tif self.log: self.log.debug(\"Seek successful, reading data (%s)\" % (readSize))\n\t\t\t# rawImageData = f.readline()\n\t\t\t# f.seek(-int(self.labels[\"RECORD_BYTES\"]), os.SEEK_CUR)\n\t\t\trawImageData = f.read(readSize)\n\t\t\tif md5Checksum:\n\t\t\t\trawImageChecksum = hashlib.md5(rawImageData).hexdigest()\n\t\t\t\tchecksumVerificationPassed = rawImageChecksum == md5Checksum and True or False\n\t\t\t\tif not checksumVerificationPassed:\n\t\t\t\t\tif self.log: self.log.debug(\"Secure hash verification failed\")\n\t\t\t\t\tif self.raisesChecksumError:\n\t\t\t\t\t\terrorMessage = \"Verification failed! Expected '%s' but got '%s'.\" % (md5Checksum, rawImageChecksum)\n\t\t\t\t\t\traise ChecksumError, errorMessage\n\t\t\t\telse:\n\t\t\t\t\tif self.log: self.log.debug(\"Secure hash verification passed\")\n\t\t\tif self.log: self.log.debug(\"Read successful (len: %d), creating Image object\" % (len(rawImageData)))\n\t\t\t# The frombuffer defaults may change in a future release;\n\t\t\t# for portability, change the call to read:\n\t\t\t# frombuffer(mode, size, data, 'raw', mode, 0, 1).\n\t\t\tif (imageSampleBits == 16) and imageSampleType == ('MSB_INTEGER'):\n\t\t\t\t#img = Image.frombuffer('I', dim, rawImageData, 'raw', 'I;16BS', 0, 1)\n\t\t\t\timg = Image.frombuffer('F', dim, rawImageData, 'raw', 'F;16B', 0, 1)\n\t\t\t\timg = ImageMath.eval(\"convert(a/16.0, 'L')\", a=img)\n\t\t\telse:\n\t\t\t\timg = Image.frombuffer('L', dim, rawImageData, 'raw', 'L', 0, 1)\n\t\t\tif self.log:\n\t\t\t\tself.log.debug(\"Image result: %s\" % (str(img)))\n\t\t\t\tself.log.debug(\"Image info: %s\" % (str(img.info)))\n\t\t\t\tself.log.debug(\"Image mode: %s\" % (str(img.mode)))\n\t\t\t\tself.log.debug(\"Image size: %s\" % (str(img.size)))\n\t\telse:\n\t\t\tif self.log: self.log.error(\"Image is not supported '%s'\" % (source))\n\t\t\timg = None\n\t\tf.close()\n\n\t\treturn img, self.labels", "def recv_img(self, filename):\n recv_data = b'' # packet of byte string data\n save_data = b'' # data to be saved to file\n img_not_recvd = True # flag to indicate if image has been recieved\n exp_seq = 0 # expected sequence number initially 0\n pkt = Packet()\n\n # get image data from client until all data received\n while True:\n try:\n if img_not_recvd:\n print(\"Client: Ready to receive image\", flush=True)\n # start = time()\n recv_data = self.client_socket.recv(self.pkt_size)\n\n pkt.pkt_unpack(recv_data)\n if pkt.seq_num != exp_seq or pkt.csum != pkt.checksum(pkt.seq_num, pkt.data):\n ack = Packet(exp_seq ^ 1, \"ACK\")\n else:\n save_data += pkt.data\n ack = Packet(exp_seq, \"ACK\")\n exp_seq ^= 1\n\n ack_pack = ack.pkt_pack()\n self.client_socket.sendto(ack_pack, self.server_addr)\n\n if img_not_recvd:\n img_not_recvd = False # img data began streaming if it reaches this point\n\n except socket.timeout:\n # if image not recieved yet, keep waiting\n if img_not_recvd:\n pass\n # image has been recieved\n else:\n # write data into a file\n # end = time()\n # print(\"Client: Time to receive image:\", end - start - 2)\n with open(filename, 'wb+') as server_img:\n server_img.write(save_data)\n print(\"Client: Received and saved image\", flush=True)\n break # exit loop", "def get_data():\n \n data = {\n 'loadAvg1Min': 0, #load average 1 min\n 'loadAvg5Min': 0, #load average 5 min\n 'loadAvg15Min': 0, #load average 15 min\n 'cpuUsage': [], #usage distribution for each cpu\n 'memUsage': {}, #memory usage \n 'networkReads': [], #network reads per second for each interface\n 'networkWrites': [], #network writes per second for each interface\n 'diskReads': [], #disk reads per second for each disk\n 'diskWrites': [] #disk writes per second for each disk\n }\n \n #metrics that doesnt need sampling\n data['loadAvg1Min'], data['loadAvg5Min'], data['loadAvg15Min'] = get_load_avg() #get load avg\n data['memUsage'].update(get_mem_usage()) #memory usage\n \n #metrics that needs sampling\n #they are written as a generator so that we can sleep before collection again\n sampling_duration = 1\n cpu_usage_gen = get_cpu_usage(sampling_duration) #generator for cpu usage\n net_rw_gen = get_net_rw(sampling_duration) #generator for network read write\n disk_rw_gen = get_disk_rw(sampling_duration) #generator for disk read write\n \n while 1: #now start sampling, whenever we have walid data, we can exit the loop\n cpu_usage = next(cpu_usage_gen)\n net_rw = next(net_rw_gen)\n disk_rw = next(disk_rw_gen)\n \n if cpu_usage or net_rw or disk_rw: #we have valid data\n break\n \n time.sleep(sampling_duration)\n \n #append cpu usage for each cpu core\n for cpu, usage in cpu_usage.items():\n data['cpuUsage'].append({'name': cpu, 'value': usage})\n \n #append network read and write for each interface\n for interface, rw in net_rw.items():\n data['networkReads'].append({'name': interface, 'value': rw['reads']})\n data['networkWrites'].append({'name': interface, 'value': rw['writes']}) \n \n #append disk read and write for each logical disk\n for device, rw in disk_rw.items():\n data['diskReads'].append({'name': device, 'value': rw['reads']})\n data['diskWrites'].append({'name': device, 'value': rw['writes']})\n \n return data", "def drag_data_received(self, widget, context, x, y, sel_data, info, time):\n if not sel_data:\n return\n #modern file managers provide URI_LIST. For Windows split sel_data.data\n files = sel_data.get_uris()\n for file in files:\n if win():\n clean_string = conv_to_unicode(\n file.replace('\\0',' ').replace(\"\\r\", \" \").strip(),\n None)\n else:\n clean_string = file\n protocol, site, mfile, j, k, l = urlparse(clean_string)\n if protocol == \"file\":\n name = url2pathname(mfile)\n mime = get_type(name)\n if not is_valid_type(mime):\n return\n photo = MediaObject()\n self.uistate.set_busy_cursor(True)\n photo.set_checksum(create_checksum(name))\n self.uistate.set_busy_cursor(False)\n base_dir = cuni(media_path(self.dbstate.db))\n if os.path.exists(base_dir):\n name = relative_path(name, base_dir)\n photo.set_path(name)\n photo.set_mime_type(mime)\n basename = os.path.basename(name)\n (root, ext) = os.path.splitext(basename)\n photo.set_description(root)\n with DbTxn(_(\"Drag Media Object\"), self.dbstate.db) as trans:\n self.dbstate.db.add_object(photo, trans)\n widget.emit_stop_by_name('drag_data_received')", "def get_data(self):\n data_str = get_cls_img(root=self.root, suffix=self.suffix)\n\n if not self.load_images:\n return data_str\n\n cls_img_data = dict.fromkeys(data_str.keys())\n for cls_ in data_str:\n temp = [0] * len(data_str[cls_])\n for i, img_name in enumerate(data_str[cls_]):\n img = _load_image(\n img_url=os.path.join(self.root, cls_, img_name),\n expand_dim=self.expand_dim\n )\n temp[i] = img\n cls_img_data[cls_] = list(temp)\n\n return cls_img_data", "def handle_req( self, req ):\n start_time_handle = time.time()\n stamp = req.stamp.data\n\n cv_image = None\n for i in range(3):\n cv_image, fail = self.pop_image_by_timestamp(stamp)\n if cv_image is None and fail == 0:\n rospy.logerr(\"Unable find image swarm loop too slow!\")\n result = WholeImageDescriptorComputeTSResponse()\n return result\n else:\n if fail == 1:\n print(\"Wait 0.02 sec for image come in and re find image\")\n rospy.sleep(0.02)\n cv_image = self.pop_image_by_timestamp(stamp)\n else:\n break\n\n if cv_image is None:\n rospy.logerr(\"Unable to find such image\")\n result = WholeImageDescriptorComputeTSResponse()\n return result\n\n\n # print( '[ProtoBufferModelImageDescriptor Handle Request#%5d] cv_image.shape' %(self.n_request_processed), cv_image.shape, '\\ta=', req.a, '\\tt=', stamp )\n if len(cv_image.shape)==2:\n # print 'Input dimensions are NxM but I am expecting it to be NxMxC, so np.expand_dims'\n cv_image = np.expand_dims( cv_image, -1 )\n elif len( cv_image.shape )==3:\n pass\n else:\n assert False\n\n\n assert (cv_image.shape[0] == self.im_rows and cv_image.shape[1] == self.im_cols and cv_image.shape[2] == self.im_chnls) , \\\n \"\\n[whole_image_descriptor_compute_server] Input shape of the image \\\n does not match with the allocated GPU memory. Expecting an input image of \\\n size %dx%dx%d, but received : %s\" %(self.im_rows, self.im_cols, self.im_chnls, str(cv_image.shape) )\n\n ## Compute Descriptor\n start_time = time.time()\n i__image = (np.expand_dims( cv_image.astype('float32'), 0 ) - 128.)*2.0/255. #[-1,1]\n print( 'Prepare in %4.4fms' %( 1000. *(time.time() - start_time_handle) ) )\n\n # u = self.model.predict( i__image )\n with self.sess.as_default():\n with self.sess.graph.as_default():\n # u = self.model.predict( i__image )\n u = self.sess.run(self.output_tensor, {'import/input_1:0': i__image})\n\n print( tcol.HEADER, 'Descriptor Computed in %4.4fms' %( 1000. *(time.time() - start_time) ), tcol.ENDC )\n # print( '\\tinput_image.shape=', cv_image.shape, )\n # print( '\\tinput_image dtype=', cv_image.dtype )\n # print( tcol.OKBLUE, '\\tinput image (to neuralnet) minmax=', np.min( i__image ), np.max( i__image ), tcol.ENDC )\n # print( '\\tdesc.shape=', u.shape, )\n # print( '\\tdesc minmax=', np.min( u ), np.max( u ), )\n # print( '\\tnorm=', np.linalg.norm(u[0]) )\n # print( '\\tmodel_type=', self.model_type )\n\n\n\n ## Populate output message\n result = WholeImageDescriptorComputeTSResponse()\n # result.desc = [ cv_image.shape[0], cv_image.shape[1] ]\n result.desc = u[0,:]\n result.model_type = self.model_type\n print( '[ProtoBufferModelImageDescriptor Handle Request] Callback returned in %4.4fms' %( 1000. *(time.time() - start_time_handle) ) )\n return result", "def _collectFrames(self):\n self._sources = sources = self._resolveFramePaths(self._info['sources'])\n self.logger.debug('Sources: %r', sources)\n\n frameDict = {'byFrame': {}, 'byAxes': {}, 'axesAllowed': True}\n numChecked = 0\n\n self._associatedImages = {}\n self._sourcePaths = {}\n self._channels = self._info.get('channels') or []\n\n absLargeImagePath = os.path.abspath(self._largeImagePath)\n computedWidth = computedHeight = 0\n self.tileWidth = self._info.get('tileWidth')\n self.tileHeight = self._info.get('tileHeight')\n self._nativeMagnification = {\n 'mm_x': self._info.get('scale', {}).get('mm_x') or None,\n 'mm_y': self._info.get('scale', {}).get('mm_y') or None,\n 'magnification': self._info.get('scale', {}).get('magnification') or None,\n }\n # Walk through the sources, opening at least the first two, and\n # construct a frame list. Each frame is a list of sources that affect\n # it along with the frame number from that source.\n lastSource = None\n for sourceIdx, source in enumerate(sources):\n path = source['path']\n if os.path.abspath(path) == absLargeImagePath:\n msg = 'Multi source specification is self-referential'\n raise TileSourceError(msg)\n similar = False\n if (lastSource and source['path'] == lastSource['path'] and\n source.get('params') == lastSource.get('params')):\n similar = True\n if not similar and (numChecked < 2 or not self._info.get('uniformSources')):\n # need kwargs of frame, style?\n ts = self._openSource(source)\n self.tileWidth = self.tileWidth or ts.tileWidth\n self.tileHeight = self.tileHeight or ts.tileHeight\n if not numChecked:\n tsMag = ts.getNativeMagnification()\n for key in self._nativeMagnification:\n self._nativeMagnification[key] = (\n self._nativeMagnification[key] or tsMag.get(key))\n numChecked += 1\n tsMeta = ts.getMetadata()\n if 'bands' in tsMeta:\n if not hasattr(self, '_bands'):\n self._bands = {}\n self._bands.update(tsMeta['bands'])\n lastSource = source\n bbox = self._sourceBoundingBox(source, tsMeta['sizeX'], tsMeta['sizeY'])\n computedWidth = max(computedWidth, int(math.ceil(bbox['right'])))\n computedHeight = max(computedHeight, int(math.ceil(bbox['bottom'])))\n # Record this path\n if path not in self._sourcePaths:\n self._sourcePaths[path] = {\n 'frames': set(),\n 'sourcenum': set(),\n }\n # collect associated images\n for basekey in ts.getAssociatedImagesList():\n key = basekey\n keyidx = 0\n while key in self._associatedImages:\n keyidx += 1\n key = '%s-%d' % (basekey, keyidx)\n self._associatedImages[key] = {\n 'sourcenum': sourceIdx,\n 'key': key,\n }\n source['metadata'] = tsMeta\n source['bbox'] = bbox\n self._sourcePaths[path]['sourcenum'].add(sourceIdx)\n # process metadata to determine what frames are used, etc.\n self._addSourceToFrames(tsMeta, source, sourceIdx, frameDict)\n # Check frameDict and create frame record\n self._frames = self._frameDictToFrames(frameDict)\n self.tileWidth = min(max(self.tileWidth, self._minTileSize), self._maxTileSize)\n self.tileHeight = min(max(self.tileHeight, self._minTileSize), self._maxTileSize)\n self.sizeX = self._info.get('width') or computedWidth\n self.sizeY = self._info.get('height') or computedHeight\n self.levels = int(max(1, math.ceil(math.log(\n max(self.sizeX / self.tileWidth, self.sizeY / self.tileHeight)) / math.log(2)) + 1))", "def _extract_features(self, times):\n times[1] = time()\n data = {n:self._extract_feature(f) for (n,f) in self.features.items()} \n times[2] = time()\n return (data, times, os.getpid())", "def add_rendered(self, image_id, data, width, height, crop):\n\t\ttry:\n\t\t\timage_id = validation.cast_integer(image_id, 'image_id')\n\t\t\tvalidation.required(data, 'data')\n\t\t\tif width:\n\t\t\t\twidth = validation.cast_integer(width, 'width')\n\t\t\t\theight = validation.cast_integer(height, 'height')\n\t\t\t\tcrop = validation.cast_boolean(crop, 'crop')\n\t\texcept errors.ValidationError, ex:\n\t\t\treturn utils.return_deferred_error(ex.value)\n\n\t\t@stack\n\t\tdef store(result):\n\t\t\tif result[0] != 0:\n\t\t\t\traise errors.APIError(result[1])\n\n\t\t\tpath = result[1]\n\t\t\tself.log.debug(\"writing to path: %s\" % path)\n\t\t\treturn self._write_binary(\"%s.jpg\" % path, data, True)\n\n\t\t@stack\n\t\tdef handle_nodes(result, media_id, owner_username):\n\t\t\tif result[0] != 0:\n\t\t\t\traise errors.APIError(result[1])\n\t\t\tnodes = result[1]\n\t\t\tself.log.debug(\"got nodes %s from locate_media()\" % pformat(nodes))\n\t\t\tdl = []\n\t\t\tfor n in nodes:\n\t\t\t\tself.log.debug(\"storing media %s on node %s\" % (media_id, n))\n\t\t\t\td2 = self._make_media_path(media_id, n, owner_username, width, height, crop)\n\t\t\t\td2.addCallback(store)\n\t\t\t\tdl.append(d2)\n\t\t\tdList = DeferredList(dl)\n\t\t\tdList.addCallback(lambda _: data)\n\t\t\treturn dList\n\n\t\t@stack\n\t\tdef handle_media_info(result):\n\t\t\tif result[0] != 0:\n\t\t\t\treturn result\n\n\t\t\tmedia_id = result[1]['media_id']\n\t\t\towner_username = result[1]['owner_username']\n\n\t\t\td2 = self._locate_media(media_id)\n\t\t\td2.addCallback(handle_nodes, media_id, owner_username)\n\t\t\td2.addCallback(lambda _: (0, _))\n\t\t\treturn d2\n\n\t\td = self.app.api.images.get_media_owner_id(image_id)\n\t\td.addCallback(handle_media_info)\n\t\td.addErrback(lambda _: (-1, _.getErrorMessage()))\n\t\treturn d", "def fetch_data(data_annotations, key):\n def _fetch_data(data, a_key):\n # Get the path and read the waveform\n wav_file_path = data[a_key]['wav_path']\n out_x, out_fs = io.AudioIO.wavRead(wav_file_path, mono=True)\n # Generate time-domain labels\n pointers_in = data[a_key]['start_time']\n pointers_out = data[a_key]['stop_time']\n if not len(pointers_in) == len(pointers_out):\n raise AttributeError(\"Unequal number of pointers. Problems may occur...\")\n out_y = np.zeros(out_x.shape)\n for p_indx in range(len(pointers_in)):\n c_pin = int(np.floor(pointers_in[p_indx] * out_fs))\n c_pout = int(np.floor(pointers_out[p_indx] * out_fs))\n out_y[c_pin:c_pout] = 1.\n\n return out_x, out_y, out_fs\n\n if type(key) == list:\n print('Number of key entries: ' + str(len(key)))\n print('Fetching: ' + key[0])\n x, y, fs = _fetch_data(data_annotations, key[0])\n for key_item in key[1:]:\n print('Fetching: ' + key_item)\n x_b, y_b, _ = _fetch_data(data_annotations, key_item)\n x = np.hstack((x, x_b))\n y = np.hstack((y, y_b))\n else:\n x, y, fs = _fetch_data(data_annotations, key)\n\n return x, y, fs", "def image_data_info(page):\n xObject = page['/Resources']['/XObject'].getObject()\n\n for obj_key in xObject:\n obj = xObject[obj_key]\n if obj['/Subtype'] == '/Image':\n width, height = (obj['/Width'], obj['/Height'])\n num_bytes = len(obj._data)\n density = num_bytes * 1.0 / (width * height)\n return {'width': width, 'height': height, 'size': num_bytes, 'density': density}\n\n return None", "def parse_data(self):\n\n msg = self.xml['dom'].childNodes[0]\n self.data = xml_to_dicts(msg, False)\n\n # Get some metadata together\n self.id = \"%s:%s\" % (self.data['src']['name']['#cdata'], self.data['src']['id']['#cdata'])\n self.temp = self.data['tmpr']['#cdata']\n self.watts = self.data['ch1']['watts']['#cdata']\n\n # Time - CurrentCost and local\n self.date = {}\n self.date['cc'] = [ int(self.data['date'][k]['#cdata']) for k in ('dsb','hr','min','sec') ]\n self.date['now'] = localtime()", "def image(self, state):\n valid_time = _to_datetime(state.valid_time)\n\n # 15 minute/1 hour slice of data?\n window = dt.timedelta(minutes=60) # 1 hour window\n paths = self.locator.find_period(valid_time, window)\n frame = self.loader.load(paths)\n frame = self.select_date(frame, valid_time, window)\n\n # Filter intra-cloud/cloud-ground rows\n if \"intra-cloud\" in state.variable.lower():\n frame = frame[frame[\"flash_type\"] == \"IC\"]\n elif \"cloud-ground\" in state.variable.lower():\n frame = frame[frame[\"flash_type\"] == \"CG\"]\n\n # EarthNetworks validity box (not needed if tiling algorithm)\n longitude_range = (26, 40)\n latitude_range = (-12, 4)\n x_range, y_range = geo.web_mercator(longitude_range, latitude_range)\n\n x, y = geo.web_mercator(frame[\"longitude\"], frame[\"latitude\"])\n frame[\"x\"] = x\n frame[\"y\"] = y\n pixels = 256\n canvas = datashader.Canvas(\n plot_width=pixels,\n plot_height=pixels,\n x_range=x_range,\n y_range=y_range,\n )\n\n if \"density\" in state.variable.lower():\n # N flashes per pixel\n agg = canvas.points(frame, \"x\", \"y\", datashader.count())\n else:\n frame[\"since_flash\"] = self.since_flash(frame[\"date\"], valid_time)\n agg = canvas.points(frame, \"x\", \"y\", datashader.max(\"since_flash\"))\n\n # Note: DataArray objects are not JSON serializable, .values is the\n # same data cast as a numpy array\n x = agg.x.values.min()\n y = agg.y.values.min()\n dw = agg.x.values.max() - x\n dh = agg.y.values.max() - y\n image = np.ma.masked_array(\n agg.values.astype(np.float), mask=np.isnan(agg.values)\n )\n if \"density\" in state.variable.lower():\n image[image == 0] = np.ma.masked # Remove pixels with no data\n\n # Update color_mapper\n color_mapper = self.color_mappers[\"image\"]\n if \"density\" in state.variable.lower():\n color_mapper.palette = bokeh.palettes.all_palettes[\"Spectral\"][8]\n color_mapper.low = 0\n color_mapper.high = agg.values.max()\n else:\n color_mapper.palette = bokeh.palettes.all_palettes[\"RdGy\"][8]\n color_mapper.low = 0\n color_mapper.high = 60 * 60 # 1 hour\n\n # Update tooltips\n for hover_tool in self.hover_tools[\"image\"]:\n hover_tool.tooltips = self.tooltips(state.variable)\n hover_tool.formatters = self.formatters(state.variable)\n\n if \"density\" in state.variable.lower():\n units = \"events\"\n else:\n units = \"seconds\"\n\n data = {\n \"x\": [x],\n \"y\": [y],\n \"dw\": [dw],\n \"dh\": [dh],\n \"image\": [image],\n }\n meta_data = {\n \"variable\": [state.variable],\n \"date\": [valid_time],\n \"units\": [units],\n \"window\": [window.total_seconds()],\n }\n data.update(meta_data)\n self.sources[\"image\"].data = data", "def getimage(self):", "def __init__(self, data):\n\t\tself.protocol_version, self.le_state, self.playback_state, \\\n\t\t self.source, self.le_flags, self.playback_flags, \\\n\t\t self.source_flags, self.fullness, self.point_rate, \\\n\t\t self.point_count = \\\n\t\t\tstruct.unpack(\"<BBBBHHHHII\", data)", "def read(self):\n\n # ret, image = self.video.read()\n (self.grabbed, self.frame) = self.cap.read()\n image = self.frame\n\n if image is not None:\n \"\"\"Update FPS, and incode received frame. \"\"\"\n self.fps.update()\n # TODO: add self.fps.fps() to image, if flagged raised.\n\n # We are using Motion JPEG, but OpenCV defaults to cap raw images,\n # so we must encode it into JPEG in order to correctly display the\n # video stream.\n\n # display a piece of text to the frame (so we can benchmark\n # fairly against the fast method)\n self.fps.stop()\n cv2.putText(image, \"FPS (simple): {:.2f}\".format(self.fps.fps()), (10, 30),\n cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)\n self.frame = image.copy()\n\n ret, jpeg = cv2.imencode('.jpg', image)\n return jpeg.tobytes()\n else:\n self.logger.debug(\"in 'get_frame', video.read not success\")", "def get_image(self, data_base):\n cursor = data_base.cursor(dictionary=True)\n cursor.execute(f\"SELECT data FROM image WHERE id = '{self.image_id}'\")\n image = cursor.fetchone()\n cursor.close()\n return b64encode(image['data']).decode('utf-8')", "def process_image():\n global last_frame, is_streaming\n i=0\n\n imgproc = ImgProc()\n while(True):\n if last_frame is not None and is_streaming:\n time.sleep(0.1)\n\n print(\"Processing frame \", i)\n imgproc.detect_object(last_frame, i)\n print(\"Processing complete \", i)\n i+=1", "def get_info(self):\n\n return (self.source,\n self.rate,\n self.numChannels,\n self.totalSamples,\n self.duration,\n self.dataType)", "def get_data_from_name(image_name):\n nome = image_name.split(\".\")[0]\n nome_recebido = list(nome)\n ano = ''.join(nome_recebido[:4])\n mes = ''.join(nome_recebido[4:6])\n dia = ''.join(nome_recebido[6:8])\n hora = ''.join(nome_recebido[8:10])\n minuto = ''.join(nome_recebido[10:12])\n segundo = ''.join(nome_recebido[12:14])\n codigo = ''.join(nome_recebido[14:24])\n certeza = ''.join(nome_recebido[24:27])\n placa = ''.join(nome_recebido[27:34])\n posicao = ''.join(nome_recebido[34])\n classificao = ''.join(nome_recebido[35:37])\n velocidade = ''.join(nome_recebido[37:40])\n comprimento = ''.join(nome_recebido[40:43])\n sequencial = ''.join(nome_recebido[43:])\n\n return [ano, mes, dia, hora, minuto, segundo, codigo, certeza, placa, posicao, classificao, velocidade, comprimento,\n sequencial]", "def get_data(self):\n\n if not self.checked:\n self.check_cache()\n h5f = h5py.File(self.data_filename, 'r')\n train_lbl = h5f['train_lbl'][:]\n train_img = h5f['train_img'][:]\n val_lbl = h5f['val_lbl'][:]\n val_img = h5f['val_img'][:]\n h5f.close()\n return train_img, train_lbl, val_img, val_lbl", "def step(inputs, state, outputs):\n if not inputs['time']['ena']:\n return\n\n (is_ok, outputs['img']['buff']) = state['cap'].read() # OpenCV |- BGR\n outputs['img']['ena'] = is_ok\n outputs['img']['ts'] = inputs['time']['ts']\n\n print('CAP: ' + str(is_ok))", "def udp_frame(self, img, client):\n compress_img = cv2.imencode('.jpg', img)[1]\n dat = compress_img.tostring()\n size = len(dat)\n count = math.ceil(size / self.MAX_IMAGE_DGRAM)\n array_pos_start = 0\n while count:\n array_pos_end = min(size, array_pos_start + self.MAX_IMAGE_DGRAM)\n self.send_sock.sendto(struct.pack(\"B\", count) +\n dat[array_pos_start:array_pos_end],\n client\n )\n array_pos_start = array_pos_end\n count -= 1", "def udp_frame(self, img):\r\n if img is not None:\r\n compress_img = cv2.imencode('.jpg', img)[1]\r\n dat = compress_img.tobytes()\r\n size = len(dat)\r\n count = math.ceil(size / self.MAX_IMAGE_DGRAM)\r\n array_pos_start = 0\r\n while count:\r\n array_pos_end = min(size, array_pos_start + self.MAX_IMAGE_DGRAM)\r\n self.s.sendto(struct.pack(\"B\", count) +\r\n dat[array_pos_start:array_pos_end],\r\n (self.addr, self.port)\r\n )\r\n array_pos_start = array_pos_end\r\n count -= 1", "def _get_data(self):\n raise NotImplementedError()", "def _parseData(self, payload):\n out=[]\n bytesParsed = 0\n while bytesParsed < len(payload):\n\n #check for the extended Code Level, code and length\n #count the number of EXCODE_BYTE\n #extendedCodeLevel = sum([1 for x in data if x == EXCODE_BYTE] )\n #bytesParsed += extendedCodeLevel\n\n #identify the length of the expected bytes in the payload\n code = payload[bytesParsed]\n bytesParsed +=1\n if code > 0x7F:\n # multi-byte code, length > 1\n length = payload[bytesParsed]\n bytesParsed +=1\n else:\n length = 1\n\n if code == SENSOR_STATUS:\n # value of 0==no contact, 200==contact\n #print \"leadoff: %i\" % payload[bytesParsed]\n out.append( {'timestamp': self.curtime, 'leadoff': payload[bytesParsed] } )\n bytesParsed +=1\n\n elif code == HEART_RATE:\n #print \"HR: %i\" % payload[bytesParsed]\n out.append( {'timestamp': self.curtime, 'HR': payload[bytesParsed:] } )\n bytesParsed +=1\n\n elif code == CONFIG_BYTE:\n #print \"config: %i\" % payload[bytesParsed]\n out.append( {'timestamp': self.curtime, 'config': payload[bytesParsed:] } )\n bytesParsed +=1\n\n elif code == RAW_ECG:\n # raw value is between -32768 and 32767, in twos compliment form\n # if the raw value is higher than 32768, it should be rolled around to allow for negative values\n raw = payload[bytesParsed]*256 + payload[bytesParsed]\n if raw >= 32768: \n raw = raw - 65536\n #print \"ecg: %i\" % ecg\n\n # create the timestamp on each ECG sample, starting from the first\n if self.starttime is None:\n self.starttime = time.time()\n self.curtime = self.starttime\n else:\n self.curtime = self.curtime + 1./self.Fs\n\n out.append( {'timestamp': self.curtime, 'ecg_raw': raw } )\n bytesParsed += length\n\n elif code == DEBUG_1:\n #print \"debug1: \" + str(payload[bytesParsed:]).strip('[]')\n out.append( {'timestamp': self.curtime, 'debug1': payload[bytesParsed:] } )\n bytesParsed += length\n\n elif code == DEBUG_2:\n #print \"debug2: \" + str(payload[bytesParsed:]).strip('[]')\n out.append( {'timestamp': self.curtime, 'debug2': payload[bytesParsed:] } )\n bytesParsed += length\n\n else:\n print \"unknown code: %i\" % code\n\n return out", "def captureimage(self):\n if not self.total_time:\n return self.frames[-1]\n return None", "def save_mem_load(self):\n if len(self.get_data_shape())==4 and self._img:\n data = np.zeros(self.get_data_shape())\n self._data = np.rot90(data)\n self._loaded_time_list = [0]\n self._data[..., 0] = np.rot90(self._img.dataobj[..., 0])\n else:\n self._loaded_time_list = [0]\n data = self._img.get_data(caching='unchanged')\n self._data = np.rot90(data)", "def add_modified(self, image_id, media_type, data):\n\t\ttry:\n\t\t\timage_id = validation.cast_integer(image_id, 'image_id')\n\t\t\tmedia_type = validation.cast_integer(media_type, 'media_type')\n\t\texcept errors.ValidationError, ex:\n\t\t\treturn utils.return_deferred_error(ex.value)\n\n\t\tif isinstance(data, xmlrpclib.Binary):\n\t\t\tdata = data.data # looks strange, but that's how xmlrpc works :)\n\n\t\t@stack\n\t\tdef store(result):\n\t\t\tif result[0] != 0:\n\t\t\t\traise errors.APIError(result[1])\n\n\t\t\tpath = result[1]\n\t\t\tself.log.debug(\"writing to path: %s\" % path)\n\t\t\treturn self._write_binary(\"%s.jpg\" % path, data, True)\n\n\t\t@stack\n\t\tdef handle_nodes(result, media_id, owner_username):\n\t\t\t\"\"\"\n\t\t\tI don't know what the hell this does. looks like nothing.\n\n\t\t\t@return: Unknown\n\t\t\t@rtype: Unknown\n\n\t\t\tThe above comment was added by Clint.\n\t\t\tI left it here to illustrate something:\n\n\t\t\t\t\tClint's full of shit.\n\n\t\t\tV\n\t\t\t\"\"\"\n\t\t\tif result[0] != 0:\n\t\t\t\traise errors.APIError(result[1])\n\n\t\t\tnodes = result[1]\n\t\t\tdl = []\n\t\t\tfor n in nodes:\n\t\t\t\td2 = self._make_media_path(media_id, n, owner_username)\n\t\t\t\td2.addCallback(store)\n\t\t\t\td2.addCallback(lambda _: self.clear_renders(media_id, owner_username, n))\n\t\t\t\tdl.append(d2)\n\t\t\tdList = DeferredList(dl)\n\t\t\tdList.addCallback(lambda _: \"success\")\n\t\t\treturn dList\n\n\t\t@stack\n\t\tdef handle_media_info(result):\n\t\t\tif result[0] != 0:\n\t\t\t\treturn result\n\n\t\t\tmedia_id = result[1]['media_id']\n\t\t\towner_username = result[1]['owner_username']\n\t\t\td3 = self._locate_media(media_id)\n\t\t\td3.addCallback(handle_nodes, media_id, owner_username)\n\t\t\td3.addCallback(lambda _: (0, _))\n\t\t\treturn d3\n\n\t\td = self.app.api.images.get_media_owner_id(image_id)\n\t\td.addCallback(handle_media_info)\n\t\td.addErrback(lambda _: (-1, _.getErrorMessage()))\n\t\treturn d", "def _retrieveCachedData(self):", "def process_images(images, cam, params):\n print cam, params\n groups = groupby(images, \"EXPTIME\")\n for time, ims in groups.items():\n func = {\"sbc\": make_sbc_flat_name, \"sky\": make_sky_flat_name}[cam]\n out = func(time, params)\n out = os.path.join(FLATPATH, out)\n print time, len(ims), out\n make_flat_avg(ims, out)", "def extract_data(filename,norm_shift=False,norm_scale=True,tag=1):\n print('Extracting',filename)\n data = extractdb_images(filename,tag)\n\n if norm_shift:\n data = data-(PIXEL_DEPTH/2.0)\n if norm_scale:\n data = data/PIXEL_DEPTH\n\n num = data.shape[0]\n data = np.reshape(data,[num,-1])\n # print(data.shape) #(2304,4096) #(576,4096)\n\n return data", "def print_image_info(self):\r\n\r\n maxt = np.max(self.times)\r\n\r\n print (\" Duration of Image Stack: %9.3f s (%8.3f min) period = %8.3f s\" % (maxt, maxt/60.0, self.period))\r\n\r\n print (' Image shape: ', self.imageData.shape)\r\n\r\n print (' nFrames: %d framerate: %9.3f\\n' % (self.nFrames, self.framerate))", "def extract_metadata(self, msg, payload, text, part):\n\n if part.get_content_maintype() == \"image\":\n\n name = part.get_param(\"name\")\n subtype = part.get_content_subtype()\n\n self._add_name(msg, name)\n self._update_counts(msg, subtype, by=1)\n self._save_stats(msg, part.get_payload(decode=True), subtype)", "def store_img_infos(self, msg):\n # msg is technically a ConsumerRecord that is a collections.namedtuple, see:\n # https://github.com/dpkp/kafka-python/blob/master/kafka/consumer/fetcher.py#L30\n strk = str(msg['sha1'])\n self.dict_sha1_infos[strk] = dict()\n for key in msg:\n # dumps json of 'img_info'\n # We actually need that only for DIG...\n if key == \"img_info\":\n self.dict_sha1_infos[strk][key] = json.dumps(msg[key])\n else:\n # discard 'img_buffer' (if it exists?...), and 'sha1'\n # if k != \"img_buffer\" and k != \"sha1\":\n # self.dict_sha1_infos[strk][k] = msg[k]\n # discard 'sha1'\n if key != \"sha1\":\n self.dict_sha1_infos[strk][key] = msg[key]", "def loadData(catalog):\n delta_time = -1.0\n delta_memory = -1.0\n\n tracemalloc.start()\n start_time = getTime()\n start_memory = getMemory()\n\n loadVideos(catalog)\n loadVideosCategory(catalog)\n\n stop_memory = getMemory()\n stop_time = getTime()\n tracemalloc.stop()\n\n delta_time = stop_time - start_time\n delta_memory = deltaMemory(start_memory, stop_memory)\n\n return delta_time, delta_memory", "def get_image_info_struct(nimage, path_len,\n image_id_len=None,\n wcs_len=None,\n ext_len=None,\n extra_dtype=None):\n dt = get_image_info_dtype(\n path_len,\n image_id_len=image_id_len,\n wcs_len=wcs_len,\n ext_len=ext_len,\n extra_dtype=extra_dtype,\n )\n\n data = np.zeros(nimage, dtype=dt)\n\n data['scale'] = 1.0\n\n return data", "def fetch(self,image_type):\n if image_type == self.IMAGE:\n return self.image\n elif image_type == self.FRAME:\n return self.frame\n elif image_type ==self.DIFFERENCE:\n return self.diff_frame\n elif image_type == self.ABS_DIFFERENCE:\n return self.abs_diff_frame\n else:\n print('Error defining frame to be fetched!!!')", "def image_test_case(img, expected_results, info_string):\n global passed_count, failed_count\n\n path = TEST_IMGS + img\n\n print(\"\\n\\nTEST: {}\".format(info_string))\n print(\"\\nTesting image handling of {}\".format(path))\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.connect((HOST, PORT))\n\n with open(path, 'rb') as f:\n img_bytes = f.read()\n\n sock.send(START)\n sock.send(GPS)\n sock.send(b'51.5138')\n sock.send(LONG)\n sock.send(b'-0.09847899999999754')\n sock.send(SOF)\n sock.send(img_bytes)\n sock.send(END_MESSAGE)\n\n response_1 = sock.recv(4)\n response_2 = sock.recv(4)\n responses = [response_1, response_2]\n\n for expected in expected_results:\n if expected not in responses:\n print(\"\\n\\tResult: FAILED.\")\n print(\"Expected server response {}. Received {}.\".format(\n expected_results, responses))\n failed_count += 1\n return\n\n print(\"\\n\\tResult: PASSED.\\n\")\n passed_count += 1", "def getImageInfo(self, path, timestamp=None):\n\n key = self.generateCacheKey(path, timestamp)\n if not key in self.cache:\n info = self.fetchInfo(path)\n self.cache[key] = info\n\n return self.cache[key]", "def mean_time(self) -> 'ImageCollection':\n\n process_id = 'mean_time'\n\n args = {\n 'imagery': self.graph\n }\n\n return self.graph_add_process(process_id, args)", "def mean_time(self) -> 'ImageCollection':\n\n process_id = 'mean_time'\n\n args = {\n 'imagery': self.graph\n }\n\n return self.graph_add_process(process_id, args)", "def camera_image_callback(self, ros_data):\n self.last_call_back_time = rospy.get_time()\n\n # self.logger.info(\"Got image\")\n if self.lastCameraInfo is not None:\n # Collect latest ros_data\n self.image_queue.put((ros_data, self.lastCameraInfo, self.seq_stamper), block=True)\n self.seq_stamper += 1\n\n # self.logger.warning(str(len(multiprocessing.active_children())))\n else:\n self.logger.warning(\"No camera info\")", "def get_message(message_type, source, data=None):\n if message_type == 'image':\n header = (MESSAGE_HEADER % source)\n body = (MESSAGE_BODY % (data['location'], data['width'], data['height']))\n urllib.request.urlretrieve(data['last_media'], 'tmp.jpg')\n return header + body\n else:\n header = (MESSAGE_HEADER % source)\n return header + MESSAGE_FOOTER", "def get_observation(self, sensor_data):\n image = post_process_image(sensor_data['birdview'][1], normalized = False, grayscale = False)\n\n if self.prev_image_0 is None:\n self.prev_image_0 = image\n self.prev_image_1 = self.prev_image_0\n self.prev_image_2 = self.prev_image_1\n\n images = image\n\n if self.frame_stack >= 2:\n images = np.concatenate([self.prev_image_0, images], axis=2)\n if self.frame_stack >= 3 and images is not None:\n images = np.concatenate([self.prev_image_1, images], axis=2)\n if self.frame_stack >= 4 and images is not None:\n images = np.concatenate([self.prev_image_2, images], axis=2)\n\n self.prev_image_2 = self.prev_image_1\n self.prev_image_1 = self.prev_image_0\n self.prev_image_0 = image\n\n return images, {}", "def send_image_frame_REP_watcher(self, text, image):\n\n self.REQ_sent_time.append(datetime.utcnow()) # utcnow 2x faster than now\n try:\n hub_reply = self.sender.send_image(text, image)\n except: # add more specific exception, e.g. ZMQError, after testing\n print(\"Exception at sender.send_image in REP_watcher function.\")\n self. fix_comm_link()\n self.REP_recd_time.append(datetime.utcnow())\n return hub_reply", "def run(self):\n\n im = None\n while im == None:\n im = self.vid_mem_reader.get_latest_image()\n if im == None:\n print \"not receiving images yet...\"\n time.sleep(0.2)\n\n #Wait for video source to be ready:\n #TODO: Shoud use vidmemreader, but this one never seem to return a resolution (at time of writing):\n #res = self.vid_mem_reader.get_resolution()\n \n #TODO: This should work, but it doesn't because OpenCV keeps on complaining about that im is not a IPL image \n #(while if you print it, it seems to be a IPL image).\n #print im\n size = cv.GetSize(im[0])\n #print size\n self.res = ({'width':size[0], 'height':size[1]})\n res = self.res\n\n self.transformer = util.speed_angle.SpeedAngle(None, res['width'], res['height'])\n \n while True:\n self.__ticker.tick()\n start_time = time.time()\n img = self.get_new_image()\n ''' Parallel Process Inside this module\n \n im = np.asarray(img[:,:])\n time_spent = time.time() - start_time\n \n #Parallel process\n \n self.parallel_rotate_image(im)\n self.logger.debug(\"Set one finished\")\n \n print \"Image Length: \", self.rotatedImages\n for img in self.rotatedImages:\n self.get_faces(img[0])\n self.update()\n \n self.rotatedImages = []\n '''\n im = np.asarray(img[:,:])\n \n image = self.rotate_image( im, [self.rotation])\n self.get_faces(image)\n self.update()\n\n #TODO: To be removed and or configurable:\n directory = \"/tmp/emergency/\"\n if not os.path.exists(directory):\n os.makedirs(directory) \n try:\n cv.SaveImage(directory + \"image.png\", image)\n except:\n print \"ERROR: Could not write image to /tmp/emergency/\"", "def parse_and_map(self, local_inet_path):\n for file_name in tqdm(self.filenames):\n # TODO: Add some log while processing data\n # Reads file name from full file path\n sliced_list = file_name.split(sep='/t')[-1].split(sep='_')\n self.data_dict['path'].append(file_name)\n self.data_dict['dataset'].append(sliced_list[1])\n self.data_dict['device'].append(sliced_list[2])\n self.data_dict['wn_id'].append(sliced_list[3])\n self.data_dict['im_id'].append(sliced_list[4])\n self.data_dict['eeg_session'].append(sliced_list[5])\n self.data_dict['global_session'].append(sliced_list[6].split(sep='.')[0])\n # File name: /MindBigData_Imagenet_Insight_n00007846_6247_1_785\n # Imagenet file path: /n00007846/n00007846_6247.JPEG\n file_name = str(sliced_list[3] + '_' + sliced_list[4] + '.JPEG')\n inet_path = os.path.join(local_inet_path, sliced_list[3], file_name)\n # If copy is true, data related local ImageNet images will be copied to separate folder\n if self.copy:\n try:\n # New file paths\n new_dir_path = os.path.join(self.copy_path, sliced_list[3])\n new_inet_path = os.path.join(new_dir_path, file_name)\n # Creates recursive folders in disk\n os.makedirs(new_dir_path, exist_ok=True, mode=0o771)\n # Copies file to destination\n shutil.copy(inet_path, new_inet_path)\n # Appends new file path to list\n self.data_dict['inet_path'].append(new_inet_path)\n except Exception as e:\n # TODO: More useful exception\n print(e)\n else:\n # Append local ImageNet path to list\n self.data_dict['inet_path'].append(inet_path)", "def readSrc_byTime(self):\n for msg in self.srcFile:\n msg = msg[0:-1] # remove \\n at the end of the string\n if '%%' in msg:\n self.srcHeader.append(msg)\n else:\n msg = msg.split()\n time = float(msg[0])\n meas = msg[1]\n sens = msg[2]\n valu = msg[3]\n if time not in self.srcData: # none from this time yet\n self.srcData[time] = {}\n if sens not in self.srcData[time]: # none at this time from this gSensor\n self.srcData[time][sens] = {}\n self.srcData[time][sens][meas] = valu # assume only one message per meas from sens at a time", "def __next__(self):\n while True:\n self.stream_bytes += self.stream_conn.read(1024)\n first = bytearray(self.stream_bytes).find(b'\\xff\\xd8')\n last = bytearray(self.stream_bytes).find(b'\\xff\\xd9')\n if first != -1 and last != -1:\n jpg = self.stream_bytes[first:last + 2]\n self.stream_bytes = self.stream_bytes[last + 2:]\n image = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), 0)\n self.total_frame += 1\n return image" ]
[ "0.58509904", "0.5848849", "0.5797124", "0.5720454", "0.5719993", "0.5676541", "0.5621858", "0.5572144", "0.55561996", "0.55333614", "0.5499547", "0.54932874", "0.5475753", "0.54749036", "0.5426874", "0.5406819", "0.54067355", "0.53759944", "0.5365759", "0.53495574", "0.5326141", "0.5318431", "0.529023", "0.5287006", "0.52861005", "0.52511775", "0.5248045", "0.5238396", "0.5226677", "0.5206917", "0.52008253", "0.5184441", "0.51415765", "0.5137786", "0.5115012", "0.51147366", "0.51146185", "0.50856805", "0.5072967", "0.5062784", "0.50612473", "0.50595176", "0.5051646", "0.50460374", "0.50460374", "0.50443083", "0.50428545", "0.50399214", "0.503645", "0.5036421", "0.5032316", "0.503143", "0.5029291", "0.50269103", "0.50253844", "0.5017713", "0.5015826", "0.5013432", "0.50102454", "0.5004568", "0.5001388", "0.49697492", "0.49678424", "0.4952955", "0.49522048", "0.49515608", "0.49483564", "0.4946394", "0.49422252", "0.49289578", "0.49226597", "0.49151328", "0.49130204", "0.49115595", "0.49097532", "0.49065912", "0.48946238", "0.48895162", "0.4888275", "0.4883383", "0.4880773", "0.4878288", "0.4876914", "0.487447", "0.48725834", "0.48713714", "0.4864696", "0.48623505", "0.4861418", "0.48590323", "0.48586938", "0.48582557", "0.48582557", "0.4857388", "0.48572886", "0.48570216", "0.4854874", "0.48541164", "0.4852796", "0.4850466", "0.48486894" ]
0.0
-1
Collects image data via appropriate protocol and returns time and data.
def poll(self) -> Tuple[np.ndarray]: try: v = self.controller.get(self.pvname) except TimeoutError: print(f"No process variable found for {self.pvname}") v = DEFAULT_SCALAR_VALUE return v
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data(self):\n global CAM\n while CAM.isOpened():\n _, frame = CAM.read()\n _, jpeg = cv2.imencode('.jpg', frame)\n encoded_img = \"data:image/jpg;base64,\" + str(base64.b64encode(jpeg.tobytes()).decode())\n SIO.emit('video_frame',\n {'frame': encoded_img},\n namespace='/live-stream')\n sleep(self.delay)", "def image_fetcher(year, month, day, name):\n entry = 'data/{year}/{month}/{day}/{name}'.format(year=year, month=month, day=day, type=type, name=name)\n img = open(entry)\n return send_file(img)", "def get_image_data(imagedir, model_kwds=dict(layer='fc2'),\n img_kwds=dict(size=(224,224)), timestamps_kwds=dict(source='auto'),\n pca_kwds=None):\n fingerprints_fn = pj(imagedir, ic_base_dir, 'fingerprints.pk')\n images_fn = pj(imagedir, ic_base_dir, 'images.pk')\n if os.path.exists(images_fn):\n print(f\"reading image arrays {images_fn} ...\")\n images = read_pk(images_fn)\n else:\n print(f\"create image arrays {images_fn}\")\n images = read_images(imagedir, **img_kwds)\n write_pk(images, images_fn)\n if os.path.exists(fingerprints_fn):\n print(f\"reading fingerprints {fingerprints_fn} ...\")\n fingerprints = read_pk(fingerprints_fn)\n else:\n print(f\"create fingerprints {fingerprints_fn}\")\n fingerprints = ic.fingerprints(images, ic.get_model(**model_kwds))\n if pca_kwds is not None:\n fingerprints = ic.pca(fingerprints, **pca_kwds)\n write_pk(fingerprints, fingerprints_fn)\n print(f\"reading timestamps ...\")\n if timestamps_kwds is not None:\n timestamps = read_timestamps(imagedir, **timestamps_kwds)\n return images, fingerprints, timestamps", "def get_image_data(self):\n raise NotImplementedError(str(type(self)) + 'does not'\n 'implement get_image.')", "def get_img_data(data_type, file_info, img_info, **kwargs):\n if file_info['ext']=='fits':\n hdulist = get_file(file_info)\n data = hdulist[int(img_info['frame'])].data\n else:\n try:\n from PIL import Image\n except ImportError:\n raise ToyzJobError(\n \"You must have PIL (Python Imaging Library) installed to \"\n \"open files of this type\"\n )\n img = get_file(file_info)\n data = np.array(img)\n \n if data_type == 'data':\n if 'scale' in kwargs:\n width = int(kwargs['width']/2/img_info['viewer']['scale'])\n height = int(kwargs['height']/2/img_info['viewer']['scale'])\n else:\n width = int(kwargs['width']/2)\n height = int(kwargs['height']/2)\n x0 = max(0, kwargs['x']-width)\n y0 = max(0, kwargs['y']-height)\n xf = min(data.shape[1], kwargs['x']+width)\n yf = min(data.shape[0], kwargs['y']+height)\n if 'scale' in kwargs:\n tile_data = {\n 'x0_idx': x0,\n 'y0_idx': y0,\n 'xf_idx': xf,\n 'yf_idx': yf\n }\n data = scale_data(file_info, img_info, tile_data, data)\n else:\n data = data[y0:yf, x0:xf]\n response = {\n 'id': 'data',\n 'min': float(data.min()),\n 'max': float(data.max()),\n 'mean': float(data.mean()),\n 'median': float(np.median(data)),\n 'std_dev': float(np.std(data)),\n 'data': data.tolist()\n }\n elif data_type == 'datapoint':\n if (kwargs['x']<data.shape[1] and kwargs['y']<data.shape[0] and\n kwargs['x']>=0 and kwargs['y']>=0):\n response = {\n 'id': 'datapoint',\n 'px_value': float(data[kwargs['y'],kwargs['x']])\n }\n else:\n response = {\n 'id': 'datapoint',\n 'px_value': 0\n }\n else:\n raise ToyzJobError(\"Loading that data type has not been implemented yet\")\n return response", "def populate_image_stats(self, image):\n ti = image\n image_data = ti.data\n if not ti.data:\n return ti\n ti.size = len(image_data)\n try:\n with connect(Blobby) as c:\n ti.shahash = c.get_data_bhash(image_data)\n except o.Exception, ex:\n raise o.Exception('oException getting shahash: %s' % ex.msg)\n except Exception, ex:\n raise o.Exception('Exception getting shahash: %s' % ex)\n\n try:\n b = StringIO(image_data)\n img = Image.open(b)\n except Exception, ex:\n raise o.Exception('Exception getting PIL img: %s' % ex)\n try:\n ti.xdim, ti.ydim = img.size\n except Exception, ex:\n raise o.Exception('Exception getting dimensions: %s' % ex)\n try:\n ti.vhash = str(average_hash(img))\n except Exception, ex:\n raise o.Exception('Exception getting vhash: %s' % ex)\n\n return ti", "def data(self):\n return self.image", "def _image_data(self):\n if self.header['Image']['bytes per pixel'] == 2:\n # 16-bit unsigned integers, short\n dt = np.dtype(self._bo + 'H')\n elif self.header['Image']['bytes per pixel'] == 4:\n # 32-bit unsigned integers, int\n dt = np.dtype(self._bo + 'I')\n\n shape = [self.header['Image']['planes'],\n self.header['Image']['masses'],\n self.header['Image']['height'],\n self.header['Image']['width']]\n\n self.fh.seek(self.header['header size'])\n\n compressedfiles = (\n gzip.GzipFile,\n bz2.BZ2File,\n tarfile.ExFileObject,\n lzma.LZMAFile,\n io.BytesIO\n )\n\n # fromfile is about 2x faster than frombuffer(fh.read())\n if isinstance(self.fh, compressedfiles):\n data = np.frombuffer(self.fh.read(), dtype=dt).reshape(shape)\n else:\n data = np.fromfile(self.fh, dtype=dt).reshape(shape)\n\n # We want to have a cube of contiguous data (stacked images) for each\n # mass. Swap axes 0 and 1. Returns a view, so make full copy to make\n # data access faster.\n data = data.swapaxes(0, 1).copy()\n\n self.data = xarray.DataArray(data,\n dims=('species', 'frame', 'y', 'x'),\n coords={'species': ('species', list(self.header['label list']))},\n attrs={'unit': 'counts'})", "def get_raw_data(self):\n if self._img and self.is_4d():\n temp = self._img.get_data(caching='unchanged')\n temp = np.rot90(temp)\n for tp in self._loaded_time_list:\n temp[..., tp] = self._data[..., tp]\n else:\n temp = self._data.copy()\n\n return np.rot90(temp, 3)", "def image_fetcher_depricated(year, month, day, name):\n entry = 'data/{year}/{month}/{day}/image/{name}'.format(year=year, month=month, day=day, type=type, name=name)\n img = open(entry)\n return send_file(img)", "def __get_image(self, source):\n if not source in self.__video_modules:\n return (None, None)\n with self.__video_locks[source]:\n last_time, last_img = self.__last_images[source]\n age = time.time() - last_time\n if age > 0.05:\n new_image = self.__video_modules[source].get_image()\n try:\n new_time = self.__video_modules[source].get_time()\n print \"Got time from ros: %f\" % new_time\n except:\n new_time = time.time()\n\n if new_image:\n last_time = new_time \n last_img = new_image\n self.__last_images[source] = (new_time, new_image)\n return (last_time, last_img)", "def get(self):\n\t\tif not self.threaded:\n\t\t\tself.record()\n\t\timg = self.Video[-1]\n\t\ttime = self.timestamps[-1]\n\t\tif self.newAvailable:\n\t\t\tnew = True\n\t\t\tself.newAvailable = False\n\t\t\treturn new, img, time\n\t\telse:\n\t\t\tnew = False\n\t\t\treturn new, img, time", "def _image_data(buff):\n code = buff.getvalue()\n m = _size(code)\n if m:\n size = int(m.group(1))\n else:\n raise Exception('Internal error: PPM header not found')\n return code[m.end():], size", "def _GetImageInfo(self,path):\n hd = Header(path, scan=True)\n hdr = hd.hdr\n self.hdr = hdr\n if hdr is None:\n# Either a ref.dat file or it isn't an imaging file.\n if 'ref' in path and 'dat' in path:\n self.refdats[os.path.realpath(path)] = True\n info = {'type':'refdat'}\n return info\n else:\n return None\n elif hdr['filetype'] == 'dicom' and not path.endswith('.yaml'):\n# Write a yaml file to the raw data directory if possible.\n dirname, outfile = self._yaml_filename(path)\n yaml_name = '%s/%s' % (dirname, outfile)\n if not os.path.exists(yaml_name):\n# Create yaml file using dirname,\n# e.g., ../anatomicals/S2_EFGRE3D/s2_efgre3d.yaml\n try:\n hd.write_hdr_to_yaml('%s/%s' % (dirname,outfile))\n except IOError:\n# This is a nonessential function, so ignore exceptions\n# such as access violations.\n pass\n elif hdr['filetype'] == 'dicom' or hdr['filetype'] == 'ge_ifile':\n if not os.path.isdir(path):\n path = os.path.dirname(path)\n shdr = hdr['subhdr']\n nhdr = hdr['native_header']\n self.shdr = shdr\n if 'dti' in shdr.get('PulseSequenceName','').lower() \\\n or 'dti' in nhdr.get('PulseSequenceFile',''):\n psdname = 'dti'\n else:\n psdname = os.path.basename((shdr.get('PulseSequenceName','').strip()).lower())\n info = {'psdname':psdname, \\\n 'acqtime':shdr['AcqTime'], \\\n 'series':int(shdr['SeriesNumber']), \\\n 'plane':hdr['plane'].strip(), \\\n 'type':self.imgtype.get(psdname,None), \\\n 'plane':hdr['plane'], \\\n 'acqtime':shdr['SeriesTime'], \\\n# 'fmapdir':None, \\\n 'refdat':None, \\\n 'imgfile':None, \\\n 'base':None, \\\n 'tdim':int(hdr['tdim']), \\\n 'echo_spacing':None, \\\n 'filetype':'brik', \\\n 'suffix':self.suffix.get(hdr['filetype'], 'brik'), \\\n 'data_filetype':hdr['filetype']}\n if info['type'] == 'localizer':\n# Don't process the localizer.\n return info\n if isinstance(info['acqtime'], int):\n info['acquisition_time'] = time.ctime(info['acqtime'])\n if nhdr.get('ImageFormat',('unknown'))[0] == 'DERIVED' and info['type'] == 'epi':\n# Sometimes screenshots are defined as epis.\n info['type'] = None\n\n# Call the method appropriate to the type of scan in this series.\n stat = apply( self.GetInfoMethods.get(info['type'], self._NoInfo), \\\n [info, path])\n if stat:\n info = {'type':'break'}\n return info\n info['suffix'] = self.suffix.get(info['filetype'], 'brik')\n return info", "def data(self):\n return self._img", "def extract_data(filename, num_images, starting_id, context_factor):\n imgs = []\n for i in range(starting_id, num_images+starting_id):\n imageid = \"satImage_%.3d\" % i\n image_filename = filename + imageid + \".png\"\n if os.path.isfile(image_filename):\n print ('Loading ' + image_filename)\n img = mpimg.imread(image_filename)\n\n\n imgs.append(img)\n else:\n print ('File ' + image_filename + ' does not exist')\n\n num_images = len(imgs)\n IMG_WIDTH = int(imgs[0].shape[0]/DOWNSCALE)\n IMG_HEIGHT = int(imgs[0].shape[1]/DOWNSCALE)\n N_PATCHES_PER_IMAGE = (IMG_WIDTH/IMG_PATCH_SIZE)*(IMG_HEIGHT/IMG_PATCH_SIZE)\n\n\n img_patches = [img_crop_context(imgs[i], IMG_PATCH_SIZE, IMG_PATCH_SIZE,context_factor, sub_mean=True) for i in range(num_images)]\n data = [img_patches[i][j] for i in range(len(img_patches)) for j in range(len(img_patches[i]))]\n data = np.asarray(data)\n return data", "def receive_message(self, msg):\n # TODO(eric.cousineau): Consider moving decode logic.\n with self.lock:\n self.utime = msg.header.utime\n self._image = decode_lcmt_image(msg, self._image)\n self._is_depth_image = (msg.pixel_format\n == lcmt_image.PIXEL_FORMAT_DEPTH)", "def read(self):\n self._sync()\n d = {tag: struct.unpack('<f', self.pic.read(4))[0] for tag in tags}\n d['ts_pic'] = struct.unpack('<i', self.pic.read(4))[0]\n return d", "def start(self) -> None:\n data = b\"\"\n while True:\n # while loop to get size of receiving data\n while len(data) < self.payload_size:\n packet = self.client_socket.recv(4 * 1024) # 4KB\n if not packet:\n break\n data += packet\n # counting size of sending data\n packed_msg_size = data[: self.payload_size]\n # if in first while loop there was download part of data, need to add it on start\n data = data[self.payload_size :]\n msg_size = struct.unpack(\"Q\", packed_msg_size)[0]\n # receiving concrete data\n while len(data) < msg_size:\n data += self.client_socket.recv(4 * 1024)\n # getting all data for current state\n data_recv_pickled = data[:msg_size]\n # setting data to whats left for next state\n data = data[msg_size:]\n # unpickle what we got\n data_recv = pickle.loads(data_recv_pickled)\n # show image and if q pressed - stop\n cv2.imshow(\"RECEIVING VIDEO\", data_recv.frame)\n print(\n f\"[CLIENT] GOT IMAGE AT TIME: {data_recv.decision} | WITH PERCENTAGE: {data_recv.percentage}% | DELAY: {datetime.datetime.now() - data_recv.time_sended}\"\n )\n key = cv2.waitKey(1) & 0xFF\n if key == ord(\"q\"):\n break\n # disconnect from server\n self.disconnect()", "def get_data_ge(logger, file):\n fp = open(file, 'rb')\n offset = 8192\n\n fp.seek(18)\n size, nframes = st.unpack('<ih',fp.read(6))\n if size != 2048:\n logger.error('GE image size unexpected: '+str(size))\n return None, 0, 0\n\n fsize = os.stat(str(fp).split(\"'\")[1]).st_size\n nframes_calc = (fsize - offset)/(2*size**2)\n\n if nframes != nframes_calc:\n logger.error('GE number frames unexpected: '+str(nframes))\n return None, 0, 0\n\n pos = offset\n fp.seek(pos)\n\n return fp, int(nframes_calc), size*size", "def process(self, image):", "def receive_image(self):\n code = self.socket.recv(1)\n self.verify_img_code(code)\n if code[0] == codes['timeout']:\n print(\"Ocurrió un timeout en la conexión\")\n self.close_connection()\n idpokemon = bytes_to_int(self.socket.recv(1))\n self.verify_pokemon(idpokemon)\n tam_image = bytes_to_int(self.socket.recv(4))\n f = open(\"../..\" + str(idpokemon) + \".png\", 'wb')\n l = 1\n while(l):\n l = self.socket.recv(1024)\n f.write(l)\n print(\"Se guardó una imagen del pokémon capturado en el archivo \" +\n str(idpokemon) + \".png.\")\n f.close()\n\n print(\"Sesión terminada.\")\n reply = self.socket.recv(1)\n self.close_connection()", "def get_data(self, t: int, **kwargs) -> Image:\n return self._get_single_frame(int(self._resolve_index(t)), **kwargs)", "def adjust_image_data(self):\r\n\r\n print('Adjusting image data: ')\r\n\r\n if self.removeFirstSequence: # used to remove the first trial from the sequence\r\n\r\n frames_per_rep = self.nFrames/self.nrepetitions\r\n\r\n self.imageData = self.imageData[frames_per_rep:, :, :]\r\n\r\n self.nFrames = self.imageData.shape[0]\r\n\r\n self.nrepetitions = int(self.nFrames/(self.period * self.framerate))\r\n\r\n self.times = np.arange(0, self.nFrames/self.framerate, 1.0/self.framerate)\r\n\r\n \r\n\r\n # first squeeze the image to 3d if it is 4d\r\n\r\n maxt = np.max(self.times) # find last image time\r\n\r\n sh = self.imageData.shape\r\n\r\n if len(sh) == 4:\r\n\r\n self.imageData = self.imageData.squeeze()\r\n\r\n sh = self.imageData.shape\r\n\r\n dt = np.mean(np.diff(self.times)) # get the mean dt\r\n\r\n n_Periods = int((maxt+dt)/self.period) # how many full periods in the image set - include the first?\r\n\r\n if self.nrepetitions > 0 and self.nrepetitions < n_Periods:\r\n\r\n n_Periods = self.nrepetitions\r\n\r\n n_PtsPerCycle = int(np.floor(self.period/dt)); # estimate image points in a stimulus cycle\r\n\r\n ndt = self.period/n_PtsPerCycle\r\n\r\n self.imageData = self.imageData[range(0, n_Periods*n_PtsPerCycle),:,:] # reduce to only what we need\r\n\r\n print (' Adjusted image info')\r\n\r\n print (\" # Periods: %d Pts/cycle: %d Cycle dt %8.4fs (%8.3fHz) Cycle: %7.4fs\" %(n_Periods, n_PtsPerCycle, ndt, 1.0/ndt, self.period))\r\n\r\n self.print_image_info()", "def __init__(self, data):\n # check if dataset contains time information\n # (fetched from bootloader storage)\n if len(data) == 61:\n (_, seconds, minutes, hours, days, months, years) = struct.unpack(\n '<55sBBBBBB', data)\n self.date = datetime(2000 + years, months, days, hours, minutes,\n seconds)\n\n # Only parse preceding data\n data = data[:55]\n power = [0, 0]\n kWh = [0, 0]\n MWh = [0, 0]\n (_, digital, speed, active, power[0], kWh[0], MWh[0], power[1], kWh[1],\n MWh[1]) = struct.unpack('<32sH4sBLHHLHH', data)\n\n analog = struct.unpack(\n '<{}{}'.format('H' * 16, 'x' * (len(data) - 32)), data)\n\n self.analog = {}\n for channel in range(0, 16):\n self.analog[channel + 1] = round(\n self._convert_analog(analog[channel]), 3)\n\n self.digital = {}\n for channel in range(0, 16):\n self.digital[channel + 1] = self._convert_digital(digital, channel)\n\n '''\n self.speed = {}\n for channel in range(0, 4):\n self.speed[channel + 1] = round(\n self._convert_speed(speed[channel]), 3)\n \n\n self.energy = {}\n for channel in range(0, 2):\n self.energy[channel + 1] = round(\n self._convert_energy(MWh[channel], kWh[channel], active,\n channel), 3)\n \n\n self.power = {}\n for channel in range(0, 2):\n self.power[channel + 1] = round(\n self._convert_power(power[channel], active, channel), 3)\n '''", "def retrieveImageInfo(self, filename):\t\t \n\t\tassert filename, \"Filename must be defined\"\n\t\tassert os.path.exists(filename), \"File that we're retrieving information \\\n\t\t\t\t\t\t\t\t\t\tfrom (%s) needs to exist, but doesn't.\" % filename\n\t\tself.ext = filename.split(\".\")[-1].lower()\n\t\trdr = self.getReaderByExtension(self.ext)\n\t\t\n\t\tif self.ext == \"bmp\":\n\t\t\trdr.Allow8BitBMPOn()\n\t\trdr.SetFileName(filename)\n\t\tif rdr.IsA(\"vtkExtTIFFReader\"):\n\t\t\trdr.UpdateInformation()\n\t\t\tif rdr.GetNumberOfScalarComponents() == 1:\n\t\t\t\trdr.RawModeOn()\n\n\t\tdata = rdr.GetOutput()\n\t\tdata.Update()\n\t\tself.numberOfComponents = data.GetNumberOfScalarComponents()\n\n\t\tif not self.ctf:\n\t\t\tbd = self.getDataBitDepth(data)\n\t\t\tself.ctf = vtk.vtkColorTransferFunction()\n\t\t\tif bd == 8 or bd == 12:\n\t\t\t\tself.ctf.AddRGBPoint(0, 0, 0, 0)\n\t\t\t\tself.ctf.AddRGBPoint((2 ** bd) - 1, 0, 1, 0)\n\t\t\telse:\n\t\t\t\trange = data.GetScalarRange()\n\t\t\t\tself.ctf.AddRGBPoint(range[0], 0, 0, 0)\n\t\t\t\tself.ctf.AddRGBPoint(range[1], 0, 1, 0)\n\t\t\t\n\t\tself.x, self.y, z = data.GetDimensions()\n\t\tself.dimensions = (self.x, self.y, self.slicesPerTimepoint)\n\t\tif z > 1:\n\t\t\tself.slicesPerTimepoint = z\n\t\t\tself.z = z\n\t\t\tself.dimensions = (self.x, self.y, self.slicesPerTimepoint)\n\t\t\tlib.messenger.send(self, \"update_dimensions\")\n\t\tself.originalDimensions = self.dimensions", "def capture_image(self, data={}):\n # call self.increment_count() after each image saved\n pass", "def parse_image(self, image):\n # parse the image data into a pygame surface for display or screenshot\n # raw image is BGRA\n # if image_type is segmentation, here will convert to the pre-defined color\n image.convert(self.image_type)\n\n array = np.frombuffer(image.raw_data, dtype=np.dtype(\"uint8\"))\n array = np.reshape(array, (image.height, image.width, 4))\n array = array[:, :, :3]\n array = array[:, :, ::-1] # BGR -> RGB\n self.rgb_image = array\n self.pygame_surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))\n\n self.last_image_seconds = image.timestamp\n self.last_image_frame_num = image.frame", "def image_data(verbose=False):\n # This is a principled use of the `global` statement; don't lint me.\n global _IMAGE_DATA # pylint: disable=global-statement\n if _IMAGE_DATA is None:\n if verbose:\n logger.info(\"--- Downloading image.\")\n with contextlib.closing(urllib.request.urlopen(IMAGE_URL)) as infile:\n _IMAGE_DATA = infile.read()\n return _IMAGE_DATA", "def __make_processing(self, img_name, abspath_dir_img, id_foot):\n data = {}\n data['data'] = ImageInfo.get_date(abspath_dir_img)\n data['total_part'] = TOTAL_PART\n data['nuvens'] = ImageInfo.get_cloud(abspath_dir_img)\n self.__make_tms(abspath_dir_img)\n data['geom'] = self.__make_footprint(abspath_dir_img, shp_out=id_foot)\n abspath_rgb, img_name_rgb = ImageInfo.get_image_rgb(\n abspath_dir_img, img_name\n )\n data['tms'] = ImageInfo.get_xml_tms(img_name_rgb)\n data['image'] = img_name_rgb\n data['quicklook'] = self.__make_png(abspath_rgb)\n data['path'] = ImageInfo.get_path(img_name)\n return data", "def time_info(input_file):\n original_path = os.getcwd() #set original directory\n save_path = input_file['save_path']\n planet = input_file['exoplanet'] #set exoplanet name\n print '\\nObtain the images .... \\n'\n print 'Change to ', save_path\n os.chdir(save_path) #change to save directory where is our scvience images\n images = sorted(glob.glob('AB'+input_file['exoplanet']+'*.fits'))\n print '\\nImages = \\n',images\n tempo_loc = [] #time object\n SUN = [] #Sun coordinate object\n ra_sun, dec_sun, dsun = np.zeros(len(images)),np.zeros(len(images)),np.zeros(len(images)) #sun coordinates\n JD = np.zeros(len(images)) #julian date from time object\n ST = np.zeros(len(images))\n HJD = np.zeros(len(images))\n #create the exoplanet object coordianate\n exoplanet = SkyCoord(dec=input_file['DEC'],ra=input_file['RA'],unit=('deg','deg'),frame=input_file['frame'])\n print '\\nObtain data info from header ....\\n'\n for i in range(len(images)):\n hdr = fits.getheader(images[i])\n UTC = hdr['date-obs']+'T'+hdr['UT'] #string that contain the time in UTC in isot format\n tempo_loc.append(Time(UTC,scale=input_file['scale-time'],format='isot',location=(input_file['lon-obs'],input_file['lat-obs'])))#,input_data['altitude'])))\n JD[i] = tempo_loc[i].jd\n ST[i] = tempo_loc[i].sidereal_time('apparent').hour\n SUN.append(get_sun(tempo_loc[i]))\n ra_sun[i],dec_sun[i] = SUN[i].ra.deg, SUN[i].dec.deg\n dsun[i] = SUN[i].distance.value\n HJD[i] = use.hjd_date(JD[i],dsun[i],dec_sun[i],ra_sun[i],exoplanet.dec.deg,exoplanet.ra.deg,circular_orbit=input_file['circular_orbit'])\n use.update_progress((i+1.)/len(images))\n print '\\n.... done.\\n'\n print '\\n Time from header = \\n'\n #print '\\nImages ** UTC (YYYY-MM-DDTHH:MM:SS) ** JD (7d.5d) ** ST (hours) ** ST (HH:MM:SS) ** Sun Coordinate (epoch,RA,DEC,Distance) (deg,deg,AU) \\n'\n ST_string = []\n for i in range(len(images)):\n ST1 = int(ST[i])\n ST2 = int((ST[i]-ST1)*60.)\n ST3 = (((ST[i]-ST1)*60.)-ST2)*60\n ST_string.append(str(ST1)+':'+str(ST2)+':'+str(ST3))\n tempo_loc[i] = tempo_loc[i].value\n use.update_progress((i+1.)/len(images))\n #print images[i], ' ** ',tempo_loc[i], ' ** ', JD[i], ' ** ', ST[i],' ** ',ST_string[i],' ** ',sun_loc[i],' ** ',HJD[i]\n print '\\nSave data file ... \\n'\n data = DataFrame([images,tempo_loc,list(JD),list(ST),list(ST_string),list(ra_sun),list(dec_sun),list(dsun),list(HJD)]).T\n data.columns=['images','UTC','JD','ST','ST_isot','RA_SUN','DEC_SUN','D_SUN','HJD']\n print data\n data.to_csv('results.csv')\n os.chdir(original_path)\n return", "def get_data(self):\n return {\"imgID\": self.image_id}", "def getImageStats(self, open_url):\n ret_image_info = None\n if \"image\" in open_url.headers.get(\"content-type\"):\n ret_image_info = self.ImageInfo()\n\n ret_image_info.size = open_url.headers.get(\"content-length\") or None\n if ret_image_info.size:\n ret_image_info.size = int(ret_image_info.size)\n self.getDataFromImage(open_url, ret_image_info)\n\n return ret_image_info", "def retrieve_data(vx_handle, i_bytes_captured, i_wait_count, s_channels):\n i_bytes_remaining = min(i_bytes_captured, i_wait_count * 4 * len(s_channels))\n i_block_offset = 0\n f_data = []\n i_retries = 0\n while i_bytes_remaining > 0:\n i_block_cnt = min(64, int(math.ceil(i_bytes_remaining / 1024.0)))\n vx_handle.write('CAPTUREGET? %d, %d'%(i_block_offset, i_block_cnt))\n buf = vx_handle.read_raw() # read whatever dut sends\n if not buf:\n print('empty response from dut for block %d'%i_block_offset)\n i_retries += 1\n if i_retries > 5:\n print('\\n\\n**** TOO MANY RETRIES ATTEMPTING TO GET DATA! ****')\n if not i_block_offset:\n print('**** NO DATA RETUNED ****\\n')\n sys.exit(-1)\n\n # binary block CAPTUREGET returns #nccccxxxxxxx...\n # with little-endian float x bytes see manual page 139\n # if b_show_debug:\n # print(' '.join(['%02X'%ord(x) for x in buf[:6]]))\n # print(str_blocks_hex(buf[6:262]))\n\n raw_data = buf[2 + int(buf[1]):]\n i_bytes_to_convert = min(i_bytes_remaining, len(raw_data))\n # convert to floats\n f_block_data = list(unpack_from('<%df'%(i_bytes_to_convert/4), raw_data))\n # if b_show_debug:\n # print(len(f_block_data), 'floats received')\n # print(str_blocks_float(f_block_data))\n f_data += f_block_data\n i_block_offset += i_block_cnt\n i_bytes_remaining -= i_block_cnt * 1024\n return f_data", "async def async_fetch_image_data(self, image_name, username, password):\n params = {}\n cookies = self.get_session_cookie()\n if username is not None and password is not None:\n params['user'] = self.encode_user(username, password)\n else:\n params['user'] = ''\n async with aiohttp.ClientSession(cookies=cookies) as session:\n resp = await session.get(\n '{}/{}.jpg'.format(self._base_url, image_name),\n params=params\n )\n if resp.headers['Content-Type'] == 'image/jpeg':\n data = await resp.read()\n else:\n data = None\n return data", "def process_images(img_xy, img_z):\n logging.info(\"paired {} and {}\".format(img_xy.ts, img_z.ts))\n for item in xy_imgs:\n assert(item.ts >= img_xy.ts)\n for item in z_imgs:\n assert(item.ts >= img_z.ts)\n\n xy_data = np.asarray(img_xy.data, dtype='uint8')\n z_data = np.asarray(img_z.data, dtype='uint8')\n\n xy_tracker.run_tracking(xy_data)\n z_tracker.run_tracking(z_data)\n\n try:\n x, y1 = xy_tracker.get_avg().astype(float)\n z, y2 = z_tracker.get_avg().astype(float)\n msg = dict(x=x, y=y1, z=z)\n msg = json.dumps(msg)\n send_socket_msg(msg)\n except Exception:\n pass", "def _collect_data(self) -> None:\n self.set_websocket_data()\n self.set_stratum_data()\n self.set_cache_data()\n self.collect_peer_connection_metrics()\n self.set_tx_storage_data()", "def main():\n # Set up socket\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.bind(('localhost', 12345))\n dat = b''\n dataSegement = [0] * 5\n\n while True:\n seg, addr = s.recvfrom(MAX_DGRAM)\n print(\"type: \", type(seg))\n chunk_number = struct.unpack(\"B\", seg[0:1])[0]\n if chunk_number > 1:\n print(\"chunk_number: \", chunk_number)\n dat += seg[1:]\n else:\n dat += seg[1:]\n img = cv2.imdecode(np.frombuffer(dat, dtype=np.uint8), 1)\n cv2.imwrite(\"image/4k_image_sample_compressed.jpg\", img)\n if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n break\n dat = b\"\"", "def process_download_other(self, data, meta_file_name, connection_time):\n block_size = 1024\n # content-length in bytes\n self.data_len = float(data.info().get('Content-length', None))\n config_pytomo.LOG.debug('Content-length: %s' % self.data_len)\n meta_file = open(meta_file_name, 'ab+')\n tries = 0\n self._total_bytes = 0\n self.state = INITIAL_BUFFERING_STATE\n start = time.time()\n while True:\n # Download and write\n before = time.time()\n if (before - start) > self.download_time:\n config_pytomo.LOG.debug('\\nDownloaded %i seconds from video'\n 'stopping' % (before - start))\n break\n # read in bytes\n data_block = data.read(block_size)\n if not self.time_to_get_first_byte:\n first_byte_time = time.time()\n self.time_to_get_first_byte = first_byte_time - connection_time\n if (not self.encoding_rate\n and tries <= config_pytomo.MAX_NB_TRIES_ENCODING):\n self.compute_encoding_rate(meta_file_name)\n tries += 1\n write_no_seek(meta_file, data_block)\n data_block_len = len(data_block)\n #config_pytomo.LOG.debug('\\ndata_block_len=%s' % data_block_len)\n if data_block_len == 0:\n config_pytomo.LOG.debug('\\nDowloaded complete video')\n break\n self._total_bytes += data_block_len\n self.update_without_tags()\n after = time.time()\n if not self.data_duration:\n try:\n self.data_duration = get_data_duration(meta_file_name)\n except ParseError, mes:\n config_pytomo.LOG.info('no data duration: %s' % mes)\n self.current_time = after - start\n time_difference = after - before\n self.update_state(time_difference)\n block_size = self.best_block_size(time_difference, data_block_len)\n instant_thp = (8e-3 * data_block_len / (time_difference)\n if (time_difference) != 0 else None)\n #config_pytomo.LOG.debug('max_instant_thp=%skb/s; instant_thp=%skb/s'\n # % (self.max_instant_thp, instant_thp))\n if time_difference > MAX_TH_MIN_UPDATE_TIME:\n self.max_instant_thp = max(self.max_instant_thp, instant_thp)\n if config_pytomo.LOG_LEVEL == config_pytomo.DEBUG:\n # Progress message\n progress_stats = {\n 'percent_str': self.calc_percent(self._total_bytes,\n self.data_len),\n 'data_len_str': self.format_bytes(self.data_len),\n 'eta_str': self.calc_eta(start, time.time(), self.data_len,\n self._total_bytes),\n 'speed_str': self.calc_speed(start, time.time(),\n self._total_bytes),\n # in order to avoid None convertion to float in\n # report_progress and still have information\n 'instant_thp': str(instant_thp),\n 'byte_counter': self._total_bytes,\n 'current_buffer': self.current_buffer,\n }\n self.report_progress(progress_stats)\n return after - start", "def process_image(self):\n pass", "def data(self) -> List[JpegImageFile]:\n return self._data", "def get_data(station,starttime,endtime,activity=False,\n rep='/GNOMEDrive/gnome/serverdata/',resample=None):\n setname = \"MagneticFields\"\n dstr = ['%Y','%m','%d','%H','%M']\n dsplit = '-'.join(dstr[:starttime.count('-')+1])\n start = datetime.strptime(starttime,dsplit)\n starttime = construct_utc_from_metadata(start.strftime(\"%Y/%m/%d\"),\n start.strftime(\"%H:%M:%S.%d\"))\n dsplit = '-'.join(dstr[:endtime.count('-')+1])\n end = datetime.strptime(endtime,dsplit)\n endtime = construct_utc_from_metadata(end.strftime(\"%Y/%m/%d\"),\n end.strftime(\"%H:%M:%S.%d\"))\n dataset = []\n for date in numpy.arange(start,end,timedelta(minutes=1)):\n date = date.astype(datetime)\n path1 = rep+station+'/'+date.strftime(\"%Y/%m/%d/\")\n path2 = station+'_'+date.strftime(\"%Y%m%d_%H%M*.hdf5\")\n fullpath = os.path.join(path1,path2)\n dataset += glob.glob(fullpath)\n if len(dataset)==0:\n print \"ERROR: No data files were found...\"\n quit()\n file_order,data_order = {},{}\n for fname in dataset:\n hfile = h5py.File(fname, \"r\")\n segfile = file_to_segment(hfile,setname)\n file_order[segfile] = fname\n data_order[segfile] = hfile\n # Extract sample rate from metadata of last read data file\n sample_rate = hfile[setname].attrs[\"SamplingRate(Hz)\"]\n # Estimate full segment activity list\n activity = create_activity_list(station,data_order)\n # Generate an ASCII representation of the GPS timestamped\n # segments of time covered by the input data\n seglist = segmentlist(data_order.keys())\n # Sort the segment list\n seglist.sort()\n # Create list of time series from every segment\n ts_list = generate_timeseries(file_order,setname)\n # Retrieve channel data for all the segments\n full_data = numpy.hstack([retrieve_channel_data(data_order[seg],setname)\n for seg in seglist])\n new_sample_rate = sample_rate if resample==None else resample\n new_data_length = len(full_data)*new_sample_rate/float(sample_rate)\n full_data = scipy.signal.resample(full_data,int(new_data_length))\n # Models a time series consisting of uniformly sampled scalar values\n ts_data = types.TimeSeries(full_data,delta_t=1./new_sample_rate,\n epoch=seglist[0][0])\n for v in data_order.values():\n v.close()\n return ts_data,ts_list,activity,int(starttime),int(endtime)", "def get_timepix_data_object(evt, src):\n o = evt.get(_psana.Timepix.DataV2, src)\n if o is not None: return o\n\n o = evt.get(_psana.Timepix.DataV1, src)\n if o is not None: return o\n\n return None", "def read(self):\n try:\n if self.Data.Sync.IsWritten == 1:\n\n if self._IsPauseOn:\n self.Data.Sync.IsPauseOn = 1\n else:\n self.Data.Sync.IsPauseOn = 0\n\n Width = self.Data.Image.ImageWidth\n Height = self.Data.Image.ImageHeight\n\n # Image = np.fromstring(self.Data.Image.Data, np.uint8, Width * Height * self.TARGET_IMAGE_CHANNELS)\n Image = np.frombuffer(self.Data.Image.Data, np.uint8, Width * Height * self.TARGET_IMAGE_CHANNELS)\n Image = Image.reshape(Height, Width, self.TARGET_IMAGE_CHANNELS)\n\n AspectRatio = Width / Height\n TargetWidth = int(self._TargetResolution[1] * AspectRatio)\n\n if TargetWidth >= self._TargetResolution[0]:\n if Width != TargetWidth or Height != self._TargetResolution[1]:\n Image = cv2.resize(Image, (TargetWidth, self._TargetResolution[1]))\n\n if TargetWidth != self._TargetResolution[0]:\n XStart = int(TargetWidth/2 - self._TargetResolution[0]/2)\n XStop = int(TargetWidth/2 + self._TargetResolution[0]/2)\n Image = Image[:, XStart:XStop]\n\n else:\n TargetHeight = int(self._TargetResolution[0]/AspectRatio)\n\n if Width != self._TargetResolution[0] or Height != TargetHeight:\n Image = cv2.resize(Image, (self._TargetResolution[1], TargetHeight))\n\n if TargetHeight != self._TargetResolution[1]:\n YStart = int(TargetHeight/2 - self._TargetResolution[1]/2)\n YStop = int(TargetHeight/2 + self._TargetResolution[1]/2)\n Image = Image[YStart:YStop, :]\n\n # Shall we convert this to 0 - 1 ?\n self._RawImage = Image\n self._Image = cv2.flip(Image, 0)\n\n # This one does not flip the image, but it rotate and crop !!\n # self._Image = np.array(cv2.flip(Image, 0)/255, dtype=np.float32)\n # self._Image = cv2.flip(Image, 0)\n\n\n # This one is flipped upside/down\n # print(\"Image from memory reshaped as WxH with Mean\", Width, Height, np.mean((self._Image), axis=(0, 1)))\n # self.store_to_file(self._Image)\n\n return True\n except:\n print(\"Unexpected error in Shared Memory Read\", sys.exc_info()[0])\n\n return False", "def __init__(self):\n# This is the top container for all data. The gid is the global id (for a image).\n# Before calling convert most of the values are strings. Some additional\n# values are also calculated, see convert() for details. After calling\n# convert, most values are integers or floats where appropriat.\n # set through parser\n self.orientation = None\n self.tileheight = 0\n self.tilewidth = 0\n self.width = 0\n self.height = 0\n self.version = 0\n self.tile_sets = [] # TileSet\n self.layers = [] # WorldTileLayer <- what order? back to front (guessed)\n self.indexed_tiles = {} # {gid: (offsetx, offsety, image}\n self.object_groups = []\n self.properties = {} # {name: value}\n # additional info\n self.pixel_width = 0\n self.pixel_height = 0\n self.named_layers = {} # {name: layer}\n self.named_tile_sets = {} # {name: tile_set}\n self.map_file_name = \"\"\n self._image_loader = None", "def __init__(self):\n# This is the top container for all data. The gid is the global id (for a image).\n# Before calling convert most of the values are strings. Some additional\n# values are also calculated, see convert() for details. After calling\n# convert, most values are integers or floats where appropriat.\n # set through parser\n self.orientation = None\n self.tileheight = 0\n self.tilewidth = 0\n self.width = 0\n self.height = 0\n self.version = 0\n self.tile_sets = [] # TileSet\n self.layers = [] # WorldTileLayer <- what order? back to front (guessed)\n self.indexed_tiles = {} # {gid: (offsetx, offsety, image}\n self.object_groups = []\n self.properties = {} # {name: value}\n # additional info\n self.pixel_width = 0\n self.pixel_height = 0\n self.named_layers = {} # {name: layer}\n self.named_tile_sets = {} # {name: tile_set}\n self.map_file_name = \"\"\n self._image_loader = None", "def extract_date_info(object_key):\n pacific = pytz.timezone('America/Los_Angeles')\n first_parts = object_key.split(\"/\")\n capture_type = first_parts[4]\n last_part_idx = len(first_parts) - 1\n file_name = first_parts[last_part_idx]\n\n # now parse the date and time out of the file name\n second_parts = file_name.split(\"_\")\n last_part_idx = len(second_parts) - 1\n if capture_type == 'snap':\n date_time_string = second_parts[last_part_idx]\n if date_time_string.endswith('.jpg'):\n date_time_string = date_time_string[:-4]\n # FIN\n final_parts = date_time_string.split(\"-\")\n date_part = final_parts[0]\n time_part = final_parts[1]\n\n # FIN\n # FIN\n if capture_type == 'record':\n time_part = second_parts[last_part_idx]\n date_part = second_parts[(last_part_idx - 1)]\n if time_part.endswith('.mp4'):\n time_part = time_part[:-4]\n # FIN\n\n # parse out our date\n year = date_part[:4]\n date_part = date_part[4:]\n month = date_part[:2]\n day = date_part[2:]\n\n # parse out the time\n hour = time_part[:2]\n time_part = time_part[2:]\n seconds = time_part[2:]\n minutes = time_part[:2]\n\n if hour[:1] == '0':\n hour = hour[1:]\n if month[:1] == '0':\n month = month[1:]\n if day[:1] == '0':\n day = day[1:]\n\n this_date = datetime.datetime(int(year), int(month), int(day), int(hour),\n int(minutes), int(seconds), 0, pacific)\n return_object = {'isodate': this_date.isoformat(),\n 'year': year,\n 'month': month,\n 'day': day,\n 'hour': hour,\n 'minutes': minutes,\n 'seconds': seconds}\n return return_object", "def _pngdata(self, task, c, imgdata):\n ctype = c.getinfo(pycurl.CONTENT_TYPE)\n if not (ctype and ctype.startswith(\"image/\")):\n cherrypy.log(\"SCRAPER ERROR %s content type '%s' not an image, headers %s\" %\n (c.getinfo(pycurl.EFFECTIVE_URL), ctype, c.headers))\n return None\n elif ctype != 'image/png':\n debug(self._ID, 3, \"%s: converting image %s to png\", task.key, ctype)\n png = StringIO()\n PILImage.open(StringIO(imgdata)).save(png, \"PNG\")\n imgdata = png.getvalue()\n png.close()\n return imgdata", "def convert_timestamp_info(data):\n videos = data.get('video_files', [])\n images = data.get('image_files', [])\n\n # judge the exits of video and images\n upload_path = current_app.config['UPLOAD_FOLDER']\n storage_path = current_app.config['FILE_STORAGE_PATH']\n title = data.get('title')\n storage_dir = os.path.join(storage_path, title)\n\n pathlib.Path(storage_dir).mkdir(parents=True, exist_ok=True)\n\n for video in videos:\n video_name = video.get('name')\n video_upload_path = os.path.join(upload_path, video.get('num'))\n video_storage_path = os.path.join(storage_dir, video_name)\n shutil.move(video_upload_path, video_storage_path)\n video['file_path'] = os.path.join(title, video_name)\n del video['num']\n\n for image in images:\n image_name = image.get('name')\n image_upload_path = os.path.join(upload_path, image.get('num'))\n image_storage_path = os.path.join(storage_dir, image_name)\n shutil.move(image_upload_path, image_storage_path)\n image['file_path'] = os.path.join(title, image_name)\n del image['num']\n\n return data", "def receive_data(self):\n chunks = []\n bytes_recd = 0\n while bytes_recd < 8:\n #I'm reading my data in byte chunks\n try:\n chunk = self.sockfd.recv(min(8 - bytes_recd, 4))\n chunks.append(chunk)\n bytes_recd = bytes_recd + len(chunk)\n except:\n print(f'{self.ip} socket failed')\n break\n # if chunk == '':\n # raise RuntimeError(\"Socket connection broken\")\n\n stat_tuple = struct.unpack('L', chunks[0])\n data_tuple = struct.unpack('L', chunks[1])\n stat = stat_tuple[0]\n data = data_tuple[0]\n return stat, chunks[1]", "def extract(self, source):\n\t\tp = Parser()\n\t\tf = open_pds(source)\n\t\tif self.log: self.log.debug(\"Parsing '%s'\" % (source))\n\t\tself.labels = p.parse(f)\n\t\tif self.log: self.log.debug(\"Found %d labels\" % (len(self.labels)))\n\t\tif self._check_image_is_supported():\n\t\t\tif self.log: self.log.debug(\"Image in '%s' is supported\" % (source))\n\t\t\tdim = self._get_image_dimensions()\n\t\t\tloc = self._get_image_location()\n\t\t\timageSampleBits = int(self.labels['IMAGE']['SAMPLE_BITS'])\n\t\t\timageSampleType = self.labels['IMAGE']['SAMPLE_TYPE']\n\t\t\tmd5Checksum = self._get_image_checksum()\n\t\t\tif self.log: self.log.debug(\"Image dimensions should be %s\" % (str(dim)))\n\t\t\tif self.log: self.log.debug(\"Seeking to image data at %d\" % (loc))\n\t\t\tf.seek(loc)\n\t\t\tif imageSampleBits == 8:\n\t\t\t\treadSize = dim[0] * dim[1]\n\t\t\telif imageSampleBits == 16:\n\t\t\t\treadSize = dim[0] * dim[1] * 2\n\t\t\tprint readSize\n\t\t\tif self.log: self.log.debug(\"Seek successful, reading data (%s)\" % (readSize))\n\t\t\t# rawImageData = f.readline()\n\t\t\t# f.seek(-int(self.labels[\"RECORD_BYTES\"]), os.SEEK_CUR)\n\t\t\trawImageData = f.read(readSize)\n\t\t\tif md5Checksum:\n\t\t\t\trawImageChecksum = hashlib.md5(rawImageData).hexdigest()\n\t\t\t\tchecksumVerificationPassed = rawImageChecksum == md5Checksum and True or False\n\t\t\t\tif not checksumVerificationPassed:\n\t\t\t\t\tif self.log: self.log.debug(\"Secure hash verification failed\")\n\t\t\t\t\tif self.raisesChecksumError:\n\t\t\t\t\t\terrorMessage = \"Verification failed! Expected '%s' but got '%s'.\" % (md5Checksum, rawImageChecksum)\n\t\t\t\t\t\traise ChecksumError, errorMessage\n\t\t\t\telse:\n\t\t\t\t\tif self.log: self.log.debug(\"Secure hash verification passed\")\n\t\t\tif self.log: self.log.debug(\"Read successful (len: %d), creating Image object\" % (len(rawImageData)))\n\t\t\t# The frombuffer defaults may change in a future release;\n\t\t\t# for portability, change the call to read:\n\t\t\t# frombuffer(mode, size, data, 'raw', mode, 0, 1).\n\t\t\tif (imageSampleBits == 16) and imageSampleType == ('MSB_INTEGER'):\n\t\t\t\t#img = Image.frombuffer('I', dim, rawImageData, 'raw', 'I;16BS', 0, 1)\n\t\t\t\timg = Image.frombuffer('F', dim, rawImageData, 'raw', 'F;16B', 0, 1)\n\t\t\t\timg = ImageMath.eval(\"convert(a/16.0, 'L')\", a=img)\n\t\t\telse:\n\t\t\t\timg = Image.frombuffer('L', dim, rawImageData, 'raw', 'L', 0, 1)\n\t\t\tif self.log:\n\t\t\t\tself.log.debug(\"Image result: %s\" % (str(img)))\n\t\t\t\tself.log.debug(\"Image info: %s\" % (str(img.info)))\n\t\t\t\tself.log.debug(\"Image mode: %s\" % (str(img.mode)))\n\t\t\t\tself.log.debug(\"Image size: %s\" % (str(img.size)))\n\t\telse:\n\t\t\tif self.log: self.log.error(\"Image is not supported '%s'\" % (source))\n\t\t\timg = None\n\t\tf.close()\n\n\t\treturn img, self.labels", "def timeCalc(image):\n telheader = astropy.io.fits.open(image)\n UT = telheader[0].header['UT']\n secs = float(UT[6:10])\n mins = float(UT[3:5])\n hours = float(UT[0:2])\n time = secs+mins*60.+hours*(60.*60.)\n\n return time", "def recv_img(self, filename):\n recv_data = b'' # packet of byte string data\n save_data = b'' # data to be saved to file\n img_not_recvd = True # flag to indicate if image has been recieved\n exp_seq = 0 # expected sequence number initially 0\n pkt = Packet()\n\n # get image data from client until all data received\n while True:\n try:\n if img_not_recvd:\n print(\"Client: Ready to receive image\", flush=True)\n # start = time()\n recv_data = self.client_socket.recv(self.pkt_size)\n\n pkt.pkt_unpack(recv_data)\n if pkt.seq_num != exp_seq or pkt.csum != pkt.checksum(pkt.seq_num, pkt.data):\n ack = Packet(exp_seq ^ 1, \"ACK\")\n else:\n save_data += pkt.data\n ack = Packet(exp_seq, \"ACK\")\n exp_seq ^= 1\n\n ack_pack = ack.pkt_pack()\n self.client_socket.sendto(ack_pack, self.server_addr)\n\n if img_not_recvd:\n img_not_recvd = False # img data began streaming if it reaches this point\n\n except socket.timeout:\n # if image not recieved yet, keep waiting\n if img_not_recvd:\n pass\n # image has been recieved\n else:\n # write data into a file\n # end = time()\n # print(\"Client: Time to receive image:\", end - start - 2)\n with open(filename, 'wb+') as server_img:\n server_img.write(save_data)\n print(\"Client: Received and saved image\", flush=True)\n break # exit loop", "def drag_data_received(self, widget, context, x, y, sel_data, info, time):\n if not sel_data:\n return\n #modern file managers provide URI_LIST. For Windows split sel_data.data\n files = sel_data.get_uris()\n for file in files:\n if win():\n clean_string = conv_to_unicode(\n file.replace('\\0',' ').replace(\"\\r\", \" \").strip(),\n None)\n else:\n clean_string = file\n protocol, site, mfile, j, k, l = urlparse(clean_string)\n if protocol == \"file\":\n name = url2pathname(mfile)\n mime = get_type(name)\n if not is_valid_type(mime):\n return\n photo = MediaObject()\n self.uistate.set_busy_cursor(True)\n photo.set_checksum(create_checksum(name))\n self.uistate.set_busy_cursor(False)\n base_dir = cuni(media_path(self.dbstate.db))\n if os.path.exists(base_dir):\n name = relative_path(name, base_dir)\n photo.set_path(name)\n photo.set_mime_type(mime)\n basename = os.path.basename(name)\n (root, ext) = os.path.splitext(basename)\n photo.set_description(root)\n with DbTxn(_(\"Drag Media Object\"), self.dbstate.db) as trans:\n self.dbstate.db.add_object(photo, trans)\n widget.emit_stop_by_name('drag_data_received')", "def get_data():\n \n data = {\n 'loadAvg1Min': 0, #load average 1 min\n 'loadAvg5Min': 0, #load average 5 min\n 'loadAvg15Min': 0, #load average 15 min\n 'cpuUsage': [], #usage distribution for each cpu\n 'memUsage': {}, #memory usage \n 'networkReads': [], #network reads per second for each interface\n 'networkWrites': [], #network writes per second for each interface\n 'diskReads': [], #disk reads per second for each disk\n 'diskWrites': [] #disk writes per second for each disk\n }\n \n #metrics that doesnt need sampling\n data['loadAvg1Min'], data['loadAvg5Min'], data['loadAvg15Min'] = get_load_avg() #get load avg\n data['memUsage'].update(get_mem_usage()) #memory usage\n \n #metrics that needs sampling\n #they are written as a generator so that we can sleep before collection again\n sampling_duration = 1\n cpu_usage_gen = get_cpu_usage(sampling_duration) #generator for cpu usage\n net_rw_gen = get_net_rw(sampling_duration) #generator for network read write\n disk_rw_gen = get_disk_rw(sampling_duration) #generator for disk read write\n \n while 1: #now start sampling, whenever we have walid data, we can exit the loop\n cpu_usage = next(cpu_usage_gen)\n net_rw = next(net_rw_gen)\n disk_rw = next(disk_rw_gen)\n \n if cpu_usage or net_rw or disk_rw: #we have valid data\n break\n \n time.sleep(sampling_duration)\n \n #append cpu usage for each cpu core\n for cpu, usage in cpu_usage.items():\n data['cpuUsage'].append({'name': cpu, 'value': usage})\n \n #append network read and write for each interface\n for interface, rw in net_rw.items():\n data['networkReads'].append({'name': interface, 'value': rw['reads']})\n data['networkWrites'].append({'name': interface, 'value': rw['writes']}) \n \n #append disk read and write for each logical disk\n for device, rw in disk_rw.items():\n data['diskReads'].append({'name': device, 'value': rw['reads']})\n data['diskWrites'].append({'name': device, 'value': rw['writes']})\n \n return data", "def get_data(self):\n data_str = get_cls_img(root=self.root, suffix=self.suffix)\n\n if not self.load_images:\n return data_str\n\n cls_img_data = dict.fromkeys(data_str.keys())\n for cls_ in data_str:\n temp = [0] * len(data_str[cls_])\n for i, img_name in enumerate(data_str[cls_]):\n img = _load_image(\n img_url=os.path.join(self.root, cls_, img_name),\n expand_dim=self.expand_dim\n )\n temp[i] = img\n cls_img_data[cls_] = list(temp)\n\n return cls_img_data", "def handle_req( self, req ):\n start_time_handle = time.time()\n stamp = req.stamp.data\n\n cv_image = None\n for i in range(3):\n cv_image, fail = self.pop_image_by_timestamp(stamp)\n if cv_image is None and fail == 0:\n rospy.logerr(\"Unable find image swarm loop too slow!\")\n result = WholeImageDescriptorComputeTSResponse()\n return result\n else:\n if fail == 1:\n print(\"Wait 0.02 sec for image come in and re find image\")\n rospy.sleep(0.02)\n cv_image = self.pop_image_by_timestamp(stamp)\n else:\n break\n\n if cv_image is None:\n rospy.logerr(\"Unable to find such image\")\n result = WholeImageDescriptorComputeTSResponse()\n return result\n\n\n # print( '[ProtoBufferModelImageDescriptor Handle Request#%5d] cv_image.shape' %(self.n_request_processed), cv_image.shape, '\\ta=', req.a, '\\tt=', stamp )\n if len(cv_image.shape)==2:\n # print 'Input dimensions are NxM but I am expecting it to be NxMxC, so np.expand_dims'\n cv_image = np.expand_dims( cv_image, -1 )\n elif len( cv_image.shape )==3:\n pass\n else:\n assert False\n\n\n assert (cv_image.shape[0] == self.im_rows and cv_image.shape[1] == self.im_cols and cv_image.shape[2] == self.im_chnls) , \\\n \"\\n[whole_image_descriptor_compute_server] Input shape of the image \\\n does not match with the allocated GPU memory. Expecting an input image of \\\n size %dx%dx%d, but received : %s\" %(self.im_rows, self.im_cols, self.im_chnls, str(cv_image.shape) )\n\n ## Compute Descriptor\n start_time = time.time()\n i__image = (np.expand_dims( cv_image.astype('float32'), 0 ) - 128.)*2.0/255. #[-1,1]\n print( 'Prepare in %4.4fms' %( 1000. *(time.time() - start_time_handle) ) )\n\n # u = self.model.predict( i__image )\n with self.sess.as_default():\n with self.sess.graph.as_default():\n # u = self.model.predict( i__image )\n u = self.sess.run(self.output_tensor, {'import/input_1:0': i__image})\n\n print( tcol.HEADER, 'Descriptor Computed in %4.4fms' %( 1000. *(time.time() - start_time) ), tcol.ENDC )\n # print( '\\tinput_image.shape=', cv_image.shape, )\n # print( '\\tinput_image dtype=', cv_image.dtype )\n # print( tcol.OKBLUE, '\\tinput image (to neuralnet) minmax=', np.min( i__image ), np.max( i__image ), tcol.ENDC )\n # print( '\\tdesc.shape=', u.shape, )\n # print( '\\tdesc minmax=', np.min( u ), np.max( u ), )\n # print( '\\tnorm=', np.linalg.norm(u[0]) )\n # print( '\\tmodel_type=', self.model_type )\n\n\n\n ## Populate output message\n result = WholeImageDescriptorComputeTSResponse()\n # result.desc = [ cv_image.shape[0], cv_image.shape[1] ]\n result.desc = u[0,:]\n result.model_type = self.model_type\n print( '[ProtoBufferModelImageDescriptor Handle Request] Callback returned in %4.4fms' %( 1000. *(time.time() - start_time_handle) ) )\n return result", "def _collectFrames(self):\n self._sources = sources = self._resolveFramePaths(self._info['sources'])\n self.logger.debug('Sources: %r', sources)\n\n frameDict = {'byFrame': {}, 'byAxes': {}, 'axesAllowed': True}\n numChecked = 0\n\n self._associatedImages = {}\n self._sourcePaths = {}\n self._channels = self._info.get('channels') or []\n\n absLargeImagePath = os.path.abspath(self._largeImagePath)\n computedWidth = computedHeight = 0\n self.tileWidth = self._info.get('tileWidth')\n self.tileHeight = self._info.get('tileHeight')\n self._nativeMagnification = {\n 'mm_x': self._info.get('scale', {}).get('mm_x') or None,\n 'mm_y': self._info.get('scale', {}).get('mm_y') or None,\n 'magnification': self._info.get('scale', {}).get('magnification') or None,\n }\n # Walk through the sources, opening at least the first two, and\n # construct a frame list. Each frame is a list of sources that affect\n # it along with the frame number from that source.\n lastSource = None\n for sourceIdx, source in enumerate(sources):\n path = source['path']\n if os.path.abspath(path) == absLargeImagePath:\n msg = 'Multi source specification is self-referential'\n raise TileSourceError(msg)\n similar = False\n if (lastSource and source['path'] == lastSource['path'] and\n source.get('params') == lastSource.get('params')):\n similar = True\n if not similar and (numChecked < 2 or not self._info.get('uniformSources')):\n # need kwargs of frame, style?\n ts = self._openSource(source)\n self.tileWidth = self.tileWidth or ts.tileWidth\n self.tileHeight = self.tileHeight or ts.tileHeight\n if not numChecked:\n tsMag = ts.getNativeMagnification()\n for key in self._nativeMagnification:\n self._nativeMagnification[key] = (\n self._nativeMagnification[key] or tsMag.get(key))\n numChecked += 1\n tsMeta = ts.getMetadata()\n if 'bands' in tsMeta:\n if not hasattr(self, '_bands'):\n self._bands = {}\n self._bands.update(tsMeta['bands'])\n lastSource = source\n bbox = self._sourceBoundingBox(source, tsMeta['sizeX'], tsMeta['sizeY'])\n computedWidth = max(computedWidth, int(math.ceil(bbox['right'])))\n computedHeight = max(computedHeight, int(math.ceil(bbox['bottom'])))\n # Record this path\n if path not in self._sourcePaths:\n self._sourcePaths[path] = {\n 'frames': set(),\n 'sourcenum': set(),\n }\n # collect associated images\n for basekey in ts.getAssociatedImagesList():\n key = basekey\n keyidx = 0\n while key in self._associatedImages:\n keyidx += 1\n key = '%s-%d' % (basekey, keyidx)\n self._associatedImages[key] = {\n 'sourcenum': sourceIdx,\n 'key': key,\n }\n source['metadata'] = tsMeta\n source['bbox'] = bbox\n self._sourcePaths[path]['sourcenum'].add(sourceIdx)\n # process metadata to determine what frames are used, etc.\n self._addSourceToFrames(tsMeta, source, sourceIdx, frameDict)\n # Check frameDict and create frame record\n self._frames = self._frameDictToFrames(frameDict)\n self.tileWidth = min(max(self.tileWidth, self._minTileSize), self._maxTileSize)\n self.tileHeight = min(max(self.tileHeight, self._minTileSize), self._maxTileSize)\n self.sizeX = self._info.get('width') or computedWidth\n self.sizeY = self._info.get('height') or computedHeight\n self.levels = int(max(1, math.ceil(math.log(\n max(self.sizeX / self.tileWidth, self.sizeY / self.tileHeight)) / math.log(2)) + 1))", "def _extract_features(self, times):\n times[1] = time()\n data = {n:self._extract_feature(f) for (n,f) in self.features.items()} \n times[2] = time()\n return (data, times, os.getpid())", "def add_rendered(self, image_id, data, width, height, crop):\n\t\ttry:\n\t\t\timage_id = validation.cast_integer(image_id, 'image_id')\n\t\t\tvalidation.required(data, 'data')\n\t\t\tif width:\n\t\t\t\twidth = validation.cast_integer(width, 'width')\n\t\t\t\theight = validation.cast_integer(height, 'height')\n\t\t\t\tcrop = validation.cast_boolean(crop, 'crop')\n\t\texcept errors.ValidationError, ex:\n\t\t\treturn utils.return_deferred_error(ex.value)\n\n\t\t@stack\n\t\tdef store(result):\n\t\t\tif result[0] != 0:\n\t\t\t\traise errors.APIError(result[1])\n\n\t\t\tpath = result[1]\n\t\t\tself.log.debug(\"writing to path: %s\" % path)\n\t\t\treturn self._write_binary(\"%s.jpg\" % path, data, True)\n\n\t\t@stack\n\t\tdef handle_nodes(result, media_id, owner_username):\n\t\t\tif result[0] != 0:\n\t\t\t\traise errors.APIError(result[1])\n\t\t\tnodes = result[1]\n\t\t\tself.log.debug(\"got nodes %s from locate_media()\" % pformat(nodes))\n\t\t\tdl = []\n\t\t\tfor n in nodes:\n\t\t\t\tself.log.debug(\"storing media %s on node %s\" % (media_id, n))\n\t\t\t\td2 = self._make_media_path(media_id, n, owner_username, width, height, crop)\n\t\t\t\td2.addCallback(store)\n\t\t\t\tdl.append(d2)\n\t\t\tdList = DeferredList(dl)\n\t\t\tdList.addCallback(lambda _: data)\n\t\t\treturn dList\n\n\t\t@stack\n\t\tdef handle_media_info(result):\n\t\t\tif result[0] != 0:\n\t\t\t\treturn result\n\n\t\t\tmedia_id = result[1]['media_id']\n\t\t\towner_username = result[1]['owner_username']\n\n\t\t\td2 = self._locate_media(media_id)\n\t\t\td2.addCallback(handle_nodes, media_id, owner_username)\n\t\t\td2.addCallback(lambda _: (0, _))\n\t\t\treturn d2\n\n\t\td = self.app.api.images.get_media_owner_id(image_id)\n\t\td.addCallback(handle_media_info)\n\t\td.addErrback(lambda _: (-1, _.getErrorMessage()))\n\t\treturn d", "def fetch_data(data_annotations, key):\n def _fetch_data(data, a_key):\n # Get the path and read the waveform\n wav_file_path = data[a_key]['wav_path']\n out_x, out_fs = io.AudioIO.wavRead(wav_file_path, mono=True)\n # Generate time-domain labels\n pointers_in = data[a_key]['start_time']\n pointers_out = data[a_key]['stop_time']\n if not len(pointers_in) == len(pointers_out):\n raise AttributeError(\"Unequal number of pointers. Problems may occur...\")\n out_y = np.zeros(out_x.shape)\n for p_indx in range(len(pointers_in)):\n c_pin = int(np.floor(pointers_in[p_indx] * out_fs))\n c_pout = int(np.floor(pointers_out[p_indx] * out_fs))\n out_y[c_pin:c_pout] = 1.\n\n return out_x, out_y, out_fs\n\n if type(key) == list:\n print('Number of key entries: ' + str(len(key)))\n print('Fetching: ' + key[0])\n x, y, fs = _fetch_data(data_annotations, key[0])\n for key_item in key[1:]:\n print('Fetching: ' + key_item)\n x_b, y_b, _ = _fetch_data(data_annotations, key_item)\n x = np.hstack((x, x_b))\n y = np.hstack((y, y_b))\n else:\n x, y, fs = _fetch_data(data_annotations, key)\n\n return x, y, fs", "def image_data_info(page):\n xObject = page['/Resources']['/XObject'].getObject()\n\n for obj_key in xObject:\n obj = xObject[obj_key]\n if obj['/Subtype'] == '/Image':\n width, height = (obj['/Width'], obj['/Height'])\n num_bytes = len(obj._data)\n density = num_bytes * 1.0 / (width * height)\n return {'width': width, 'height': height, 'size': num_bytes, 'density': density}\n\n return None", "def parse_data(self):\n\n msg = self.xml['dom'].childNodes[0]\n self.data = xml_to_dicts(msg, False)\n\n # Get some metadata together\n self.id = \"%s:%s\" % (self.data['src']['name']['#cdata'], self.data['src']['id']['#cdata'])\n self.temp = self.data['tmpr']['#cdata']\n self.watts = self.data['ch1']['watts']['#cdata']\n\n # Time - CurrentCost and local\n self.date = {}\n self.date['cc'] = [ int(self.data['date'][k]['#cdata']) for k in ('dsb','hr','min','sec') ]\n self.date['now'] = localtime()", "def image(self, state):\n valid_time = _to_datetime(state.valid_time)\n\n # 15 minute/1 hour slice of data?\n window = dt.timedelta(minutes=60) # 1 hour window\n paths = self.locator.find_period(valid_time, window)\n frame = self.loader.load(paths)\n frame = self.select_date(frame, valid_time, window)\n\n # Filter intra-cloud/cloud-ground rows\n if \"intra-cloud\" in state.variable.lower():\n frame = frame[frame[\"flash_type\"] == \"IC\"]\n elif \"cloud-ground\" in state.variable.lower():\n frame = frame[frame[\"flash_type\"] == \"CG\"]\n\n # EarthNetworks validity box (not needed if tiling algorithm)\n longitude_range = (26, 40)\n latitude_range = (-12, 4)\n x_range, y_range = geo.web_mercator(longitude_range, latitude_range)\n\n x, y = geo.web_mercator(frame[\"longitude\"], frame[\"latitude\"])\n frame[\"x\"] = x\n frame[\"y\"] = y\n pixels = 256\n canvas = datashader.Canvas(\n plot_width=pixels,\n plot_height=pixels,\n x_range=x_range,\n y_range=y_range,\n )\n\n if \"density\" in state.variable.lower():\n # N flashes per pixel\n agg = canvas.points(frame, \"x\", \"y\", datashader.count())\n else:\n frame[\"since_flash\"] = self.since_flash(frame[\"date\"], valid_time)\n agg = canvas.points(frame, \"x\", \"y\", datashader.max(\"since_flash\"))\n\n # Note: DataArray objects are not JSON serializable, .values is the\n # same data cast as a numpy array\n x = agg.x.values.min()\n y = agg.y.values.min()\n dw = agg.x.values.max() - x\n dh = agg.y.values.max() - y\n image = np.ma.masked_array(\n agg.values.astype(np.float), mask=np.isnan(agg.values)\n )\n if \"density\" in state.variable.lower():\n image[image == 0] = np.ma.masked # Remove pixels with no data\n\n # Update color_mapper\n color_mapper = self.color_mappers[\"image\"]\n if \"density\" in state.variable.lower():\n color_mapper.palette = bokeh.palettes.all_palettes[\"Spectral\"][8]\n color_mapper.low = 0\n color_mapper.high = agg.values.max()\n else:\n color_mapper.palette = bokeh.palettes.all_palettes[\"RdGy\"][8]\n color_mapper.low = 0\n color_mapper.high = 60 * 60 # 1 hour\n\n # Update tooltips\n for hover_tool in self.hover_tools[\"image\"]:\n hover_tool.tooltips = self.tooltips(state.variable)\n hover_tool.formatters = self.formatters(state.variable)\n\n if \"density\" in state.variable.lower():\n units = \"events\"\n else:\n units = \"seconds\"\n\n data = {\n \"x\": [x],\n \"y\": [y],\n \"dw\": [dw],\n \"dh\": [dh],\n \"image\": [image],\n }\n meta_data = {\n \"variable\": [state.variable],\n \"date\": [valid_time],\n \"units\": [units],\n \"window\": [window.total_seconds()],\n }\n data.update(meta_data)\n self.sources[\"image\"].data = data", "def getimage(self):", "def __init__(self, data):\n\t\tself.protocol_version, self.le_state, self.playback_state, \\\n\t\t self.source, self.le_flags, self.playback_flags, \\\n\t\t self.source_flags, self.fullness, self.point_rate, \\\n\t\t self.point_count = \\\n\t\t\tstruct.unpack(\"<BBBBHHHHII\", data)", "def read(self):\n\n # ret, image = self.video.read()\n (self.grabbed, self.frame) = self.cap.read()\n image = self.frame\n\n if image is not None:\n \"\"\"Update FPS, and incode received frame. \"\"\"\n self.fps.update()\n # TODO: add self.fps.fps() to image, if flagged raised.\n\n # We are using Motion JPEG, but OpenCV defaults to cap raw images,\n # so we must encode it into JPEG in order to correctly display the\n # video stream.\n\n # display a piece of text to the frame (so we can benchmark\n # fairly against the fast method)\n self.fps.stop()\n cv2.putText(image, \"FPS (simple): {:.2f}\".format(self.fps.fps()), (10, 30),\n cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)\n self.frame = image.copy()\n\n ret, jpeg = cv2.imencode('.jpg', image)\n return jpeg.tobytes()\n else:\n self.logger.debug(\"in 'get_frame', video.read not success\")", "def get_image(self, data_base):\n cursor = data_base.cursor(dictionary=True)\n cursor.execute(f\"SELECT data FROM image WHERE id = '{self.image_id}'\")\n image = cursor.fetchone()\n cursor.close()\n return b64encode(image['data']).decode('utf-8')", "def process_image():\n global last_frame, is_streaming\n i=0\n\n imgproc = ImgProc()\n while(True):\n if last_frame is not None and is_streaming:\n time.sleep(0.1)\n\n print(\"Processing frame \", i)\n imgproc.detect_object(last_frame, i)\n print(\"Processing complete \", i)\n i+=1", "def get_info(self):\n\n return (self.source,\n self.rate,\n self.numChannels,\n self.totalSamples,\n self.duration,\n self.dataType)", "def get_data_from_name(image_name):\n nome = image_name.split(\".\")[0]\n nome_recebido = list(nome)\n ano = ''.join(nome_recebido[:4])\n mes = ''.join(nome_recebido[4:6])\n dia = ''.join(nome_recebido[6:8])\n hora = ''.join(nome_recebido[8:10])\n minuto = ''.join(nome_recebido[10:12])\n segundo = ''.join(nome_recebido[12:14])\n codigo = ''.join(nome_recebido[14:24])\n certeza = ''.join(nome_recebido[24:27])\n placa = ''.join(nome_recebido[27:34])\n posicao = ''.join(nome_recebido[34])\n classificao = ''.join(nome_recebido[35:37])\n velocidade = ''.join(nome_recebido[37:40])\n comprimento = ''.join(nome_recebido[40:43])\n sequencial = ''.join(nome_recebido[43:])\n\n return [ano, mes, dia, hora, minuto, segundo, codigo, certeza, placa, posicao, classificao, velocidade, comprimento,\n sequencial]", "def get_data(self):\n\n if not self.checked:\n self.check_cache()\n h5f = h5py.File(self.data_filename, 'r')\n train_lbl = h5f['train_lbl'][:]\n train_img = h5f['train_img'][:]\n val_lbl = h5f['val_lbl'][:]\n val_img = h5f['val_img'][:]\n h5f.close()\n return train_img, train_lbl, val_img, val_lbl", "def step(inputs, state, outputs):\n if not inputs['time']['ena']:\n return\n\n (is_ok, outputs['img']['buff']) = state['cap'].read() # OpenCV |- BGR\n outputs['img']['ena'] = is_ok\n outputs['img']['ts'] = inputs['time']['ts']\n\n print('CAP: ' + str(is_ok))", "def udp_frame(self, img, client):\n compress_img = cv2.imencode('.jpg', img)[1]\n dat = compress_img.tostring()\n size = len(dat)\n count = math.ceil(size / self.MAX_IMAGE_DGRAM)\n array_pos_start = 0\n while count:\n array_pos_end = min(size, array_pos_start + self.MAX_IMAGE_DGRAM)\n self.send_sock.sendto(struct.pack(\"B\", count) +\n dat[array_pos_start:array_pos_end],\n client\n )\n array_pos_start = array_pos_end\n count -= 1", "def udp_frame(self, img):\r\n if img is not None:\r\n compress_img = cv2.imencode('.jpg', img)[1]\r\n dat = compress_img.tobytes()\r\n size = len(dat)\r\n count = math.ceil(size / self.MAX_IMAGE_DGRAM)\r\n array_pos_start = 0\r\n while count:\r\n array_pos_end = min(size, array_pos_start + self.MAX_IMAGE_DGRAM)\r\n self.s.sendto(struct.pack(\"B\", count) +\r\n dat[array_pos_start:array_pos_end],\r\n (self.addr, self.port)\r\n )\r\n array_pos_start = array_pos_end\r\n count -= 1", "def _get_data(self):\n raise NotImplementedError()", "def _parseData(self, payload):\n out=[]\n bytesParsed = 0\n while bytesParsed < len(payload):\n\n #check for the extended Code Level, code and length\n #count the number of EXCODE_BYTE\n #extendedCodeLevel = sum([1 for x in data if x == EXCODE_BYTE] )\n #bytesParsed += extendedCodeLevel\n\n #identify the length of the expected bytes in the payload\n code = payload[bytesParsed]\n bytesParsed +=1\n if code > 0x7F:\n # multi-byte code, length > 1\n length = payload[bytesParsed]\n bytesParsed +=1\n else:\n length = 1\n\n if code == SENSOR_STATUS:\n # value of 0==no contact, 200==contact\n #print \"leadoff: %i\" % payload[bytesParsed]\n out.append( {'timestamp': self.curtime, 'leadoff': payload[bytesParsed] } )\n bytesParsed +=1\n\n elif code == HEART_RATE:\n #print \"HR: %i\" % payload[bytesParsed]\n out.append( {'timestamp': self.curtime, 'HR': payload[bytesParsed:] } )\n bytesParsed +=1\n\n elif code == CONFIG_BYTE:\n #print \"config: %i\" % payload[bytesParsed]\n out.append( {'timestamp': self.curtime, 'config': payload[bytesParsed:] } )\n bytesParsed +=1\n\n elif code == RAW_ECG:\n # raw value is between -32768 and 32767, in twos compliment form\n # if the raw value is higher than 32768, it should be rolled around to allow for negative values\n raw = payload[bytesParsed]*256 + payload[bytesParsed]\n if raw >= 32768: \n raw = raw - 65536\n #print \"ecg: %i\" % ecg\n\n # create the timestamp on each ECG sample, starting from the first\n if self.starttime is None:\n self.starttime = time.time()\n self.curtime = self.starttime\n else:\n self.curtime = self.curtime + 1./self.Fs\n\n out.append( {'timestamp': self.curtime, 'ecg_raw': raw } )\n bytesParsed += length\n\n elif code == DEBUG_1:\n #print \"debug1: \" + str(payload[bytesParsed:]).strip('[]')\n out.append( {'timestamp': self.curtime, 'debug1': payload[bytesParsed:] } )\n bytesParsed += length\n\n elif code == DEBUG_2:\n #print \"debug2: \" + str(payload[bytesParsed:]).strip('[]')\n out.append( {'timestamp': self.curtime, 'debug2': payload[bytesParsed:] } )\n bytesParsed += length\n\n else:\n print \"unknown code: %i\" % code\n\n return out", "def captureimage(self):\n if not self.total_time:\n return self.frames[-1]\n return None", "def save_mem_load(self):\n if len(self.get_data_shape())==4 and self._img:\n data = np.zeros(self.get_data_shape())\n self._data = np.rot90(data)\n self._loaded_time_list = [0]\n self._data[..., 0] = np.rot90(self._img.dataobj[..., 0])\n else:\n self._loaded_time_list = [0]\n data = self._img.get_data(caching='unchanged')\n self._data = np.rot90(data)", "def add_modified(self, image_id, media_type, data):\n\t\ttry:\n\t\t\timage_id = validation.cast_integer(image_id, 'image_id')\n\t\t\tmedia_type = validation.cast_integer(media_type, 'media_type')\n\t\texcept errors.ValidationError, ex:\n\t\t\treturn utils.return_deferred_error(ex.value)\n\n\t\tif isinstance(data, xmlrpclib.Binary):\n\t\t\tdata = data.data # looks strange, but that's how xmlrpc works :)\n\n\t\t@stack\n\t\tdef store(result):\n\t\t\tif result[0] != 0:\n\t\t\t\traise errors.APIError(result[1])\n\n\t\t\tpath = result[1]\n\t\t\tself.log.debug(\"writing to path: %s\" % path)\n\t\t\treturn self._write_binary(\"%s.jpg\" % path, data, True)\n\n\t\t@stack\n\t\tdef handle_nodes(result, media_id, owner_username):\n\t\t\t\"\"\"\n\t\t\tI don't know what the hell this does. looks like nothing.\n\n\t\t\t@return: Unknown\n\t\t\t@rtype: Unknown\n\n\t\t\tThe above comment was added by Clint.\n\t\t\tI left it here to illustrate something:\n\n\t\t\t\t\tClint's full of shit.\n\n\t\t\tV\n\t\t\t\"\"\"\n\t\t\tif result[0] != 0:\n\t\t\t\traise errors.APIError(result[1])\n\n\t\t\tnodes = result[1]\n\t\t\tdl = []\n\t\t\tfor n in nodes:\n\t\t\t\td2 = self._make_media_path(media_id, n, owner_username)\n\t\t\t\td2.addCallback(store)\n\t\t\t\td2.addCallback(lambda _: self.clear_renders(media_id, owner_username, n))\n\t\t\t\tdl.append(d2)\n\t\t\tdList = DeferredList(dl)\n\t\t\tdList.addCallback(lambda _: \"success\")\n\t\t\treturn dList\n\n\t\t@stack\n\t\tdef handle_media_info(result):\n\t\t\tif result[0] != 0:\n\t\t\t\treturn result\n\n\t\t\tmedia_id = result[1]['media_id']\n\t\t\towner_username = result[1]['owner_username']\n\t\t\td3 = self._locate_media(media_id)\n\t\t\td3.addCallback(handle_nodes, media_id, owner_username)\n\t\t\td3.addCallback(lambda _: (0, _))\n\t\t\treturn d3\n\n\t\td = self.app.api.images.get_media_owner_id(image_id)\n\t\td.addCallback(handle_media_info)\n\t\td.addErrback(lambda _: (-1, _.getErrorMessage()))\n\t\treturn d", "def process_images(images, cam, params):\n print cam, params\n groups = groupby(images, \"EXPTIME\")\n for time, ims in groups.items():\n func = {\"sbc\": make_sbc_flat_name, \"sky\": make_sky_flat_name}[cam]\n out = func(time, params)\n out = os.path.join(FLATPATH, out)\n print time, len(ims), out\n make_flat_avg(ims, out)", "def _retrieveCachedData(self):", "def extract_data(filename,norm_shift=False,norm_scale=True,tag=1):\n print('Extracting',filename)\n data = extractdb_images(filename,tag)\n\n if norm_shift:\n data = data-(PIXEL_DEPTH/2.0)\n if norm_scale:\n data = data/PIXEL_DEPTH\n\n num = data.shape[0]\n data = np.reshape(data,[num,-1])\n # print(data.shape) #(2304,4096) #(576,4096)\n\n return data", "def print_image_info(self):\r\n\r\n maxt = np.max(self.times)\r\n\r\n print (\" Duration of Image Stack: %9.3f s (%8.3f min) period = %8.3f s\" % (maxt, maxt/60.0, self.period))\r\n\r\n print (' Image shape: ', self.imageData.shape)\r\n\r\n print (' nFrames: %d framerate: %9.3f\\n' % (self.nFrames, self.framerate))", "def extract_metadata(self, msg, payload, text, part):\n\n if part.get_content_maintype() == \"image\":\n\n name = part.get_param(\"name\")\n subtype = part.get_content_subtype()\n\n self._add_name(msg, name)\n self._update_counts(msg, subtype, by=1)\n self._save_stats(msg, part.get_payload(decode=True), subtype)", "def store_img_infos(self, msg):\n # msg is technically a ConsumerRecord that is a collections.namedtuple, see:\n # https://github.com/dpkp/kafka-python/blob/master/kafka/consumer/fetcher.py#L30\n strk = str(msg['sha1'])\n self.dict_sha1_infos[strk] = dict()\n for key in msg:\n # dumps json of 'img_info'\n # We actually need that only for DIG...\n if key == \"img_info\":\n self.dict_sha1_infos[strk][key] = json.dumps(msg[key])\n else:\n # discard 'img_buffer' (if it exists?...), and 'sha1'\n # if k != \"img_buffer\" and k != \"sha1\":\n # self.dict_sha1_infos[strk][k] = msg[k]\n # discard 'sha1'\n if key != \"sha1\":\n self.dict_sha1_infos[strk][key] = msg[key]", "def get_image_info_struct(nimage, path_len,\n image_id_len=None,\n wcs_len=None,\n ext_len=None,\n extra_dtype=None):\n dt = get_image_info_dtype(\n path_len,\n image_id_len=image_id_len,\n wcs_len=wcs_len,\n ext_len=ext_len,\n extra_dtype=extra_dtype,\n )\n\n data = np.zeros(nimage, dtype=dt)\n\n data['scale'] = 1.0\n\n return data", "def loadData(catalog):\n delta_time = -1.0\n delta_memory = -1.0\n\n tracemalloc.start()\n start_time = getTime()\n start_memory = getMemory()\n\n loadVideos(catalog)\n loadVideosCategory(catalog)\n\n stop_memory = getMemory()\n stop_time = getTime()\n tracemalloc.stop()\n\n delta_time = stop_time - start_time\n delta_memory = deltaMemory(start_memory, stop_memory)\n\n return delta_time, delta_memory", "def fetch(self,image_type):\n if image_type == self.IMAGE:\n return self.image\n elif image_type == self.FRAME:\n return self.frame\n elif image_type ==self.DIFFERENCE:\n return self.diff_frame\n elif image_type == self.ABS_DIFFERENCE:\n return self.abs_diff_frame\n else:\n print('Error defining frame to be fetched!!!')", "def camera_image_callback(self, ros_data):\n self.last_call_back_time = rospy.get_time()\n\n # self.logger.info(\"Got image\")\n if self.lastCameraInfo is not None:\n # Collect latest ros_data\n self.image_queue.put((ros_data, self.lastCameraInfo, self.seq_stamper), block=True)\n self.seq_stamper += 1\n\n # self.logger.warning(str(len(multiprocessing.active_children())))\n else:\n self.logger.warning(\"No camera info\")", "def image_test_case(img, expected_results, info_string):\n global passed_count, failed_count\n\n path = TEST_IMGS + img\n\n print(\"\\n\\nTEST: {}\".format(info_string))\n print(\"\\nTesting image handling of {}\".format(path))\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.connect((HOST, PORT))\n\n with open(path, 'rb') as f:\n img_bytes = f.read()\n\n sock.send(START)\n sock.send(GPS)\n sock.send(b'51.5138')\n sock.send(LONG)\n sock.send(b'-0.09847899999999754')\n sock.send(SOF)\n sock.send(img_bytes)\n sock.send(END_MESSAGE)\n\n response_1 = sock.recv(4)\n response_2 = sock.recv(4)\n responses = [response_1, response_2]\n\n for expected in expected_results:\n if expected not in responses:\n print(\"\\n\\tResult: FAILED.\")\n print(\"Expected server response {}. Received {}.\".format(\n expected_results, responses))\n failed_count += 1\n return\n\n print(\"\\n\\tResult: PASSED.\\n\")\n passed_count += 1", "def get_observation(self, sensor_data):\n image = post_process_image(sensor_data['birdview'][1], normalized = False, grayscale = False)\n\n if self.prev_image_0 is None:\n self.prev_image_0 = image\n self.prev_image_1 = self.prev_image_0\n self.prev_image_2 = self.prev_image_1\n\n images = image\n\n if self.frame_stack >= 2:\n images = np.concatenate([self.prev_image_0, images], axis=2)\n if self.frame_stack >= 3 and images is not None:\n images = np.concatenate([self.prev_image_1, images], axis=2)\n if self.frame_stack >= 4 and images is not None:\n images = np.concatenate([self.prev_image_2, images], axis=2)\n\n self.prev_image_2 = self.prev_image_1\n self.prev_image_1 = self.prev_image_0\n self.prev_image_0 = image\n\n return images, {}", "def mean_time(self) -> 'ImageCollection':\n\n process_id = 'mean_time'\n\n args = {\n 'imagery': self.graph\n }\n\n return self.graph_add_process(process_id, args)", "def mean_time(self) -> 'ImageCollection':\n\n process_id = 'mean_time'\n\n args = {\n 'imagery': self.graph\n }\n\n return self.graph_add_process(process_id, args)", "def getImageInfo(self, path, timestamp=None):\n\n key = self.generateCacheKey(path, timestamp)\n if not key in self.cache:\n info = self.fetchInfo(path)\n self.cache[key] = info\n\n return self.cache[key]", "def get_message(message_type, source, data=None):\n if message_type == 'image':\n header = (MESSAGE_HEADER % source)\n body = (MESSAGE_BODY % (data['location'], data['width'], data['height']))\n urllib.request.urlretrieve(data['last_media'], 'tmp.jpg')\n return header + body\n else:\n header = (MESSAGE_HEADER % source)\n return header + MESSAGE_FOOTER", "def send_image_frame_REP_watcher(self, text, image):\n\n self.REQ_sent_time.append(datetime.utcnow()) # utcnow 2x faster than now\n try:\n hub_reply = self.sender.send_image(text, image)\n except: # add more specific exception, e.g. ZMQError, after testing\n print(\"Exception at sender.send_image in REP_watcher function.\")\n self. fix_comm_link()\n self.REP_recd_time.append(datetime.utcnow())\n return hub_reply", "def run(self):\n\n im = None\n while im == None:\n im = self.vid_mem_reader.get_latest_image()\n if im == None:\n print \"not receiving images yet...\"\n time.sleep(0.2)\n\n #Wait for video source to be ready:\n #TODO: Shoud use vidmemreader, but this one never seem to return a resolution (at time of writing):\n #res = self.vid_mem_reader.get_resolution()\n \n #TODO: This should work, but it doesn't because OpenCV keeps on complaining about that im is not a IPL image \n #(while if you print it, it seems to be a IPL image).\n #print im\n size = cv.GetSize(im[0])\n #print size\n self.res = ({'width':size[0], 'height':size[1]})\n res = self.res\n\n self.transformer = util.speed_angle.SpeedAngle(None, res['width'], res['height'])\n \n while True:\n self.__ticker.tick()\n start_time = time.time()\n img = self.get_new_image()\n ''' Parallel Process Inside this module\n \n im = np.asarray(img[:,:])\n time_spent = time.time() - start_time\n \n #Parallel process\n \n self.parallel_rotate_image(im)\n self.logger.debug(\"Set one finished\")\n \n print \"Image Length: \", self.rotatedImages\n for img in self.rotatedImages:\n self.get_faces(img[0])\n self.update()\n \n self.rotatedImages = []\n '''\n im = np.asarray(img[:,:])\n \n image = self.rotate_image( im, [self.rotation])\n self.get_faces(image)\n self.update()\n\n #TODO: To be removed and or configurable:\n directory = \"/tmp/emergency/\"\n if not os.path.exists(directory):\n os.makedirs(directory) \n try:\n cv.SaveImage(directory + \"image.png\", image)\n except:\n print \"ERROR: Could not write image to /tmp/emergency/\"", "def parse_and_map(self, local_inet_path):\n for file_name in tqdm(self.filenames):\n # TODO: Add some log while processing data\n # Reads file name from full file path\n sliced_list = file_name.split(sep='/t')[-1].split(sep='_')\n self.data_dict['path'].append(file_name)\n self.data_dict['dataset'].append(sliced_list[1])\n self.data_dict['device'].append(sliced_list[2])\n self.data_dict['wn_id'].append(sliced_list[3])\n self.data_dict['im_id'].append(sliced_list[4])\n self.data_dict['eeg_session'].append(sliced_list[5])\n self.data_dict['global_session'].append(sliced_list[6].split(sep='.')[0])\n # File name: /MindBigData_Imagenet_Insight_n00007846_6247_1_785\n # Imagenet file path: /n00007846/n00007846_6247.JPEG\n file_name = str(sliced_list[3] + '_' + sliced_list[4] + '.JPEG')\n inet_path = os.path.join(local_inet_path, sliced_list[3], file_name)\n # If copy is true, data related local ImageNet images will be copied to separate folder\n if self.copy:\n try:\n # New file paths\n new_dir_path = os.path.join(self.copy_path, sliced_list[3])\n new_inet_path = os.path.join(new_dir_path, file_name)\n # Creates recursive folders in disk\n os.makedirs(new_dir_path, exist_ok=True, mode=0o771)\n # Copies file to destination\n shutil.copy(inet_path, new_inet_path)\n # Appends new file path to list\n self.data_dict['inet_path'].append(new_inet_path)\n except Exception as e:\n # TODO: More useful exception\n print(e)\n else:\n # Append local ImageNet path to list\n self.data_dict['inet_path'].append(inet_path)", "def getDataFromImage(self, open_url, img_info=None):\n\n img_parser = ImageFile.Parser()\n\n for block_buf in self.readImageDataPerByte(urllib2.urlopen(open_url.geturl())):\n img_parser.feed(block_buf)\n if img_parser.image:\n img_info.width, img_info.height = img_parser.image.size\n return img_info", "def readSrc_byTime(self):\n for msg in self.srcFile:\n msg = msg[0:-1] # remove \\n at the end of the string\n if '%%' in msg:\n self.srcHeader.append(msg)\n else:\n msg = msg.split()\n time = float(msg[0])\n meas = msg[1]\n sens = msg[2]\n valu = msg[3]\n if time not in self.srcData: # none from this time yet\n self.srcData[time] = {}\n if sens not in self.srcData[time]: # none at this time from this gSensor\n self.srcData[time][sens] = {}\n self.srcData[time][sens][meas] = valu # assume only one message per meas from sens at a time" ]
[ "0.5853295", "0.5850195", "0.5799188", "0.5721936", "0.57214046", "0.56789905", "0.562323", "0.5574254", "0.55553085", "0.5534616", "0.54999393", "0.5493341", "0.5476053", "0.5475871", "0.54282975", "0.5408226", "0.54078424", "0.53759134", "0.53672683", "0.534906", "0.53287125", "0.532066", "0.5290919", "0.5288634", "0.5285877", "0.5252423", "0.5251451", "0.52393496", "0.5229116", "0.5208581", "0.5201241", "0.5185292", "0.51421237", "0.51363045", "0.5116923", "0.5116411", "0.51141465", "0.5086795", "0.50732934", "0.5065092", "0.50616044", "0.5059579", "0.5052462", "0.50458586", "0.5045634", "0.5045634", "0.50424755", "0.50413764", "0.50357896", "0.5035114", "0.50323427", "0.5032188", "0.50316095", "0.5027173", "0.50259686", "0.5018123", "0.50169647", "0.501438", "0.50092137", "0.5006554", "0.5001444", "0.49709412", "0.49676093", "0.49555466", "0.49535707", "0.49510852", "0.49488488", "0.49469283", "0.49448115", "0.49274102", "0.4923144", "0.49161437", "0.49151635", "0.49129766", "0.49115837", "0.490515", "0.4892584", "0.48905578", "0.48891807", "0.48851967", "0.48806995", "0.4878701", "0.4877299", "0.48751014", "0.48746997", "0.48727852", "0.4863568", "0.48633227", "0.4863152", "0.48610526", "0.4860716", "0.4858822", "0.48582572", "0.48582572", "0.485817", "0.485812", "0.48567918", "0.48555574", "0.48540437", "0.48501045", "0.48497063" ]
0.0
-1
Encode image for Dash Application
def encodedImage(imageFile): imageFile = "".join([METRICS_PATH, imageFile]) encoded = base64.b64encode(open(imageFile, 'rb').read()) return 'data:image/jpg;base64,{}'.format(encoded.decode())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def encode(image):\n from encoder import launch\n launch(image)", "def encode(self, image) -> bytes:\n raise NotImplementedError()", "def prepare_output(image: np.ndarray) -> str:\n response_image = Image.fromarray(np.uint8(image * 255))\n buffer = BytesIO()\n response_image.save(buffer, \"PNG\")\n encoded = base64.b64encode(buffer.getvalue())\n return \"data:image/png;base64,\" + str(encoded)[2:-1]", "def encode_image(self, image):\n\t\t# Encode in Base64 and print encoded string for copying\n\t\twith open(image, 'rb') as image:\n\t\t\tprint(\"[+] Image has been encoded. Copy this string:\\n\")\n\t\t\timg_64 = '<img src=\"data:image/png;base64,{}\">'.format(base64.b64encode(image.read()).decode('ascii'))\n\t\t\tprint(img_64 + \"\\n\")\n\t\t\tprint(\"[+] End of encoded string.\")", "def convertImage(img):\n return '\\\\includegraphicsdata{%s}' % \":\".join([\n 'data',\n img.contentType,\n \"base64,%s\" % img.data.encode(\"base64\").replace(\"\\n\", \"\"),\n ])", "def b64_image(self) -> bytes:\n buffer = BytesIO()\n self.image.save(buffer, \"PNG\") \n im_b64 = base64.b64encode(buffer.getvalue())\n im_b64 = b\"data:image/png;base64,\" + im_b64\n return im_b64", "def encode_image(image):\n return base64.b64encode(image).decode('ascii')", "def encode_images(self, images):\n # todo\n pass", "def embed_image_pred(image):\n image_pil2 = Image.fromarray((255 * image).astype('uint8'))\n #image_pil2 = image_pil.resize((256, 256))\n string_buf2 = StringIO.StringIO()\n image_pil2.save(string_buf2, format='png')\n data = string_buf2.getvalue().encode('base64').replace('\\n', '')\n return 'data:image/png;base64,' + data", "def encode(self, encode_data, image):\r\n raise NotImplementedError(\"Not Implemented\")", "def generate_image(self):\n pass", "def encode(output_image_path):\n with open(output_image_path, 'rb') as image_file:\n encoded_string = base64.b64encode(image_file.read()).decode('utf-8')\n return encoded_string", "def save_img_base64(_preds):\n img = Image.fromarray(_preds)\n buff = BytesIO()\n img.save(buff, format=\"JPEG\")\n return base64.b64encode(buff.getvalue())", "def formatImage(imgData):\n imgstr = re.search(b'base64,(.*)', imgData).group(1)\n with open('output.png','wb') as output:\n output.write(base64.decodebytes(imgstr))", "def encode(pixels):\n # save the image to a bytes buffer\n buffered = BytesIO()\n image = Image.fromarray(pixels.astype('uint8'))\n image = image.convert('RGB')\n image.save(buffered, format=\"PNG\")\n\n # decode the bytes as a string\n img_str = base64.b64encode(buffered.getvalue()).decode('utf-8')\n\n return img_str", "def encode_decode(self, img, img_metas):\n pass", "def save_emotion_image(image):\n\n image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)\n pil_image = draw_rects(image, detect_faces(image))\n image_file = 'temp/' + uuid.uuid4().hex + '.jpg'\n pil_image.save('app/static/' + image_file)\n\n return image_file", "def base64(self):\n image = self.png.getvalue()\n return base64.encodestring(image).decode('utf-8')", "def encode(img):\r\n msg=getInput(img); #Get User input\r\n ints=stringToInts(msg); #Convert all characters in the input to their ascii values\r\n ImageUtilities.setPixelAlphasFromIntsRandom(img,ints); #For every ascii value set a different pixel's alpha value to that ascii value.\r\n return img;", "def _encode_image(image_array, fmt):\n from PIL import Image # pylint: disable=g-import-not-at-top\n pil_image = Image.fromarray(image_array)\n image_io = io.BytesIO()\n pil_image.save(image_io, format=fmt)\n return image_io.getvalue()", "def _get_image(x):\n return b64encode(x).decode('ascii')", "def EncoderImage(config):\n\n # data_name, img_dim, embed_size, finetune=False,\n # cnn_type='vgg19', use_abs=False, no_imgnorm=False):\n\n embed_size = config['model']['embed-size']\n order_embeddings = config['training']['measure'] == 'order'\n if config['image-model']['name'] == 'bottomup':\n transformer_layers = config['image-model']['transformer-layers']\n pos_encoding = config['image-model']['pos-encoding']\n visual_feat_dim = config['image-model']['feat-dim']\n dropout = config['image-model']['dropout']\n img_enc = TransformerPostProcessing(transformer_layers, visual_feat_dim, embed_size, n_head=4, aggr='mean', pos_encoding=pos_encoding, dropout=dropout, order_embeddings=order_embeddings)\n else:\n img_enc = None\n\n return img_enc", "def export_image(self, params: Dict[str, str]) -> bytes:\n response = requests.post(self.export_url, data=params)\n self.export_output = response.content\n return self.export_output", "def send_image(image: PIL.Image.Image):\n import base64\n import io\n\n image = image.convert(\"RGB\")\n buffer = io.BytesIO()\n image.save(buffer, format=\"PNG\")\n image_b64 = base64.b64encode(buffer.getvalue())\n send(\"image\", image_b64.decode(\"utf-8\"))", "def base64_encode_image(inArray):\n imgDat = [base64_encode_array(inArray).decode(\"utf-8\")]\n imgType = str(inArray.dtype)\n imgShape = inArray.shape\n return json.dumps([ imgDat, imgType, imgShape ])", "def write(self, image):\n raise NotImplementedError()", "def convert_canvas_to_img_js():\n return None", "def save_png(self, filename):\n post_script = self.canvas.postscript().encode()\n img = Image.open(io.BytesIO(post_script))\n img.save(filename, format=\"PNG\")", "def embed_image_html(image, type):\n if type == 'dehaze':\n image_pil = Image.fromarray((image).astype('uint8'))\n elif type == 'style_transfer':\n image_pil = Image.fromarray((image).astype('uint8'))\n else:\n image_pil = Image.fromarray((255 * image).astype('uint8'))\n if sys.version_info.major == 2:\n string_buf=StringIO.StringIO()\n image_pil.save(string_buf, format='png')\n data = string_buf.getvalue().encode('base64').replace('\\n', '')\n else:\n _buf = BytesIO()\n image_pil.save(_buf, format='png')\n _buf.seek(0)\n b64_buf = base64.b64encode(_buf.getvalue())\n string_buf = StringIO(b64_buf.decode('utf-8', errors='replace'))\n data =string_buf.getvalue().replace('\\n', '')\n\n return 'data:image/png;base64,' + data", "def save_image(self):\n self.compressed_image_id = str(uuid.uuid4().hex)\n plot.imsave(\n str(\n self.compressed_image_id + \"{}\").format(\n \".png\"), self.compressed_image)\n\n if self.verbose:\n print(\n \"Compressed image saved at \" + (\n str(self.compressed_image_id + \"{}\").format(\".png\")))", "def _build_final_image(self, image):\n raise NotImplementedError", "def encode_image(text_to_encode, template_image=\"images/template_image.jpg\"):\n raw_image = Image.open(template_image)\n hidden_message = write_text(text_to_encode,raw_image.size)\n\n x_size = raw_image.size[0]\n y_size = raw_image.size[1]\n\n red_channel = raw_image.split()[0]\n green_channel = raw_image.split()[1]\n blue_channel = raw_image.split()[2]\n # get all channels from raw_image\n encoded_image = Image.new(\"RGB\", raw_image.size)\n\n for x in range(x_size):\n for y in range(y_size):\n hidden_pixel = hidden_message.getpixel((x, y))\n\n encoded_red_pixel = red_channel.getpixel((x, y))\n if (hidden_pixel == (255, 255, 255)):\n red_channel_pixel = red_channel.getpixel((x, y))\n red_binary = bin(red_channel_pixel)\n red_binary = red_binary[:-1] + \"1\"\n # change the last binary value\n encoded_red_pixel = int(red_binary,2)\n # covert binary back to int\n\n else: # if pixel doesnt = white, that means theres no value, set last binary = 0\n red_channel_pixel = red_channel.getpixel((x, y))\n red_binary = bin(red_channel_pixel)\n red_binary = red_binary[:-1] + \"0\"\n encoded_red_pixel = int(red_binary,2)\n\n encoded_rgb = (encoded_red_pixel,\n green_channel.getpixel((x, y)),\n blue_channel.getpixel((x, y)))\n\n encoded_image.putpixel((x, y), encoded_rgb)\n encoded_image.save(\"images/hidden_message_image.png\")", "def encode_images(model_path, images, letterbox_size=224, verbose=False, onlyhor=False, fill=False):\n \n model = load_model(model_path)\n return encode_(\n model=model,\n images=images,\n letterbox_size=letterbox_size,\n verbose=verbose,\n onlyhor=onlyhor,\n fill=fill\n )", "def getimage(self):", "def save_image(self):\n self.save()", "def obimg():\n # The client might make a call to get a pic for an object which might\n # not have one. Better to return a blank than an error in that case.\n imgdat = B64ENCTRANSPARENT4X4PNG\n try:\n dsType = dbacc.reqarg(\"dt\", \"string\", required=True)\n dsId = dbacc.reqarg(\"di\", \"string\", required=True)\n inst = dbacc.cfbk(dsType, \"dsId\", dsId)\n if inst:\n picfldmap = {\"Point\": \"pic\"}\n imgdat = inst[picfldmap[dsType]]\n imgdat = base64.b64decode(imgdat)\n except ValueError as e:\n return util.serve_value_error(e)\n return util.respond(imgdat, mimetype=\"image/png\")", "def encode_image(self, image):\n image = self.clip_preprocess(image).unsqueeze(0).to(self.device)\n image_features = self.clip_model.encode_image(image)\n return image_features.cpu().detach().numpy()", "def process(self, image):", "def dump_image(image, path_image):\n cv2.imwrite(path_image, image)\n return", "def write_image(path, image):\n image = tf.image.encode_jpeg(image, quality=100)\n return tf.io.write_file(path, image)", "def encode_image(text_to_encode, template_image=\"images/samoyed.jpg\", output_image=\"images/samoyed.secret.png\"):\n\n image = Image.open(template_image)\n pixels = image.load()\n\n x_size = image.size[0]\n y_size = image.size[1]\n\n for x in range(x_size):\n for y in range(y_size):\n if lsb_of_red_pixel(image, x, y):\n pixels[x,y] = (image.getpixel((x, y))[0] - 1, image.getpixel((x, y))[1], image.getpixel((x, y))[2])\n\n text_image = Image.new(\"RGB\", image.size)\n\n usr_font = ImageFont.truetype(\"ComicNeue.otf\", 25)\n d_usr = ImageDraw.Draw(text_image)\n d_usr = d_usr.text((10,10), text_to_encode, (255,255,255), font=usr_font)\n\n for x in range(x_size):\n for y in range(y_size):\n if lsb_of_red_pixel(text_image, x, y):\n pixels[x,y] = (image.getpixel((x, y))[0] + 1, image.getpixel((x, y))[1], image.getpixel((x, y))[2])\n\n image.save(output_image)", "def archive_image(self, img):\n \n try:\n imgname = \"roboimg\" + str(int(time.time())) + \".png\"\n imgpath = os.path.join(self.imgdir, imgname)\n # print(\"Pic name \" + imgpath)\n\n cv2.imwrite(imgpath, img)\n except:\n self.logger.error(\"archive_image failed %s\" % (imgpath))", "def encode(self, rosMsg):\r\n if not isinstance(rosMsg, sensor_msgs.msg.Image):\r\n raise TypeError('Given object is not a sensor_msgs.msg.Image '\r\n 'instance.')\r\n\r\n # Convert to PIL Image\r\n pil = Image.fromstring(\r\n ImageConverter._ENCODINGMAP_ROS_TO_PY[rosMsg.encoding],\r\n (rosMsg.width, rosMsg.height),\r\n rosMsg.data,\r\n 'raw',\r\n ImageConverter._ENCODINGMAP_ROS_TO_PY[rosMsg.encoding],\r\n 0,\r\n 1)\r\n\r\n # Save to StringIO\r\n img = StringIO()\r\n pil.save(img, 'PNG')\r\n return img", "def save_image(img, view, ts, output_dir):\n\n img = tf.image.decode_jpeg(img, channels=3)\n img = Image.fromarray(img.numpy(), 'RGB')\n img.save(os.path.join(output_dir, f'{ts}_{view}.jpeg'))", "def get_body(self):\n from matplotlib.backends.backend_agg import \\\n FigureCanvasAgg as FigureCanvas\n\n canvas = FigureCanvas(self._body)\n png_output = BytesIO()\n canvas.print_png(png_output)\n data = png_output.getvalue()\n\n data_uri = base64.b64encode(data).decode('utf-8')\n return '<img title=\"{}\" src=\"data:image/png;base64,{}\">'.format(\n self.key, data_uri)", "def save_image(image, image_path):\n image = ((image[0] + 1) * 127.5).astype(np.uint8) # convert from [-1, 1] to [0, 255]\n img = Image.fromarray(image)\n img.save(os.path.expanduser(image_path))", "def render_opencv(image, fmt=\"jpg\"):\n if not isinstance(image, np.ndarray):\n return None\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n val, buf = cv2.imencode(\".%s\" % fmt, image)\n return None if not val else buf, \"image/%s\" % fmt", "def encode_png(track_metadata):\n\tprint(\"---- Encoding\", track_metadata.file_name, \"to PNG...\")\n\n\t# First step: OptiPNG.\n\tnew_file_name = track_metadata.file_name + \".png\"\n\toptipng_command = [\"optipng\", \"-o7\", \"-strip\", \"all\", \"-snip\", \"-out\", new_file_name, track_metadata.file_name]\n\tprint(optipng_command)\n\tprocess = subprocess.Popen(optipng_command, stdout=subprocess.PIPE)\n\t(cout, cerr) = process.communicate()\n\texit_code = process.wait()\n\tif exit_code != 0: #0 is success.\n\t\traise Exception(\"OptiPNG failed with exit code {exit_code}. CERR: {cerr}\".format(exit_code=exit_code, cerr=cerr))\n\n\tect_command = [\"/home/ruben/encoding/Efficient-Compression-Tool/build/ect\", \"-9\", \"-strip\", \"--allfilters-b\", \"--mt-deflate\", new_file_name]\n\tprint(ect_command)\n\tprocess = subprocess.Popen(ect_command, stdout=subprocess.PIPE)\n\t(cout, cerr) = process.communicate()\n\texit_code = process.wait()\n\tif exit_code != 0: #0 is success.\n\t\traise Exception(\"ECT failed with exit code {exit_code}. CERR: {cerr}\".format(exit_code=exit_code, cerr=cerr))\n\n\t#Delete old file.\n\tif os.path.exists(track_metadata.file_name):\n\t\tos.remove(track_metadata.file_name)\n\n\ttrack_metadata.file_name = new_file_name\n\ttrack_metadata.codec = \"png\"", "def get_legend_image():\n\n with open('assets/legend.png', 'rb') as img_file:\n encoded_string = base64.b64encode(img_file.read()).decode()\n encoded_image = 'data:image/png;base64,' + encoded_string\n\n return encoded_image", "def output_image(vk4_container, args, data):\n log.debug(\"Entering output_image()\\n\\t Data Layer: {}\".format(args.layer))\n\n not_rgb_list = ['L', 'H']\n out_type = args.type\n layer = args.layer\n\n out_file_name = output_file_name_maker(args) + '.' + out_type\n\n width = vk4_container.image_width\n height = vk4_container.image_height\n if layer in not_rgb_list:\n # data = scale_data(vk4_container, args, data)\n log.debug(\"In output_image()\\n\\tData:\\n{}\".format(data))\n image = Image.fromarray(np.reshape(data, (height, width)), 'F')\n else:\n log.debug(\"In output_image()\\n\\tData:\\n{}\".format(data))\n image = Image.fromarray(np.reshape(data, (height, width, 3)), 'RGB')\n\n image.info = create_file_meta_data(vk4_container, args)\n image.save(out_file_name, args.type.upper())\n\n log.debug(\"Exiting output_image()\")", "def exportImg(self):\n if self.superSampling:\n print(\"Exporting with size adjusted\")\n self.img = self.img.resize((int(self.width/2),int(self.height/2)),Image.NEAREST)\n self.img.save(self.fileName,\"PNG\")", "def convert_to_image(self, frame, base64_encode=False):\n #NOTE: tuple (85010, 1) ndarray --> data reduction\n img_buf_arr = cv2.imencode(\".jpeg\", frame)[1]\n if base64_encode:\n img_buf_arr = b\"data:image/jpeg;base64,\" + base64.b64encode(img_buf_arr)\n return img_buf_arr\n return bytes(img_buf_arr)", "def compress_image(filename,k):", "def gen():\n global dataFrame\n while True:\n frame = vs.read()\n # frame = imutils.resize(frame, width=400)\n \n (flag, encodedImage) = cv2.imencode(\".jpg\", frame.copy())\n if not flag: continue\n # print (encodedImage)\n dataFrame = yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + bytearray(encodedImage) + b'\\r\\n')", "def img_to_base64(img):\n with io.BytesIO() as output:\n img.save(output, format=\"PNG\")\n img_string = base64.b64encode(output.getvalue())\n return img_string.decode(\"utf-8\")", "def imwrite(image, path):\n\n if image.ndim == 3 and image.shape[2] == 1: # for gray image\n image = np.array(image, copy=True)\n image.shape = image.shape[0:2]\n\n imgarray=((image+1.0)*127.5).astype(np.uint8)\n img=Image.fromarray(imgarray)\n img.save(path)", "def _encode_img(self, file_path):\n import memcache\n filename = file_path.rpartition(os.sep)[2]\n cache_file = \"%s_cache\" % file_path\n cached_image = memcache.get('%s%s' % (memcache.version, cache_file))\n if cached_image is None:\n image = open(file_path)\n cached_image = \"data:image;base64,%s\"%base64.b64encode(image)\n memcache.set('%s%s' % (memcache.version, cache_file), cached_image, 300)\n return cached_image", "def image_to_scratch(im, scratch_image_name):\n\tim.save(scratch_image_name, dpi=(200,200))", "def write_image(self, image_name, image):\n raise NotImplementedError", "def data64(self) -> str:\n return Image.encode64(self.data)", "def save_image(data, file_path):\n with open(file_path, 'wb'):\n prefix = 'data:image/webp;base64,'\n data = data[len(prefix):]\n byte_data = base64.b64decode(data)\n image_data = BytesIO(byte_data)\n img = Image.open(image_data)\n img.save(file_path)\n return True", "def make_image(self, path):\n\t\treturn None", "def jpeg_to_png(img: bytes) -> bytes:\n im = Image.open(BytesIO(img))\n width = 240\n height = int(im.size[1] * (240 / im.size[0]))\n im = im.convert(\"RGB\").resize((width, height))\n stream = BytesIO()\n im.save(stream, format=\"PNG\")\n return stream.getvalue()", "def serialize_image(self, image):\r\n result = {\r\n 'pixels': image.tobytes(),\r\n 'size': image.size,\r\n 'mode': image.mode\r\n }\r\n return result", "def encode(self, img):\n with tf.variable_scope('encoder'):\n #conv1 = self.conv_layer(\n # img, [5, 5], [3, 32], stride=2, initializer_type=1, name='conv1')\n #conv2 = self.conv_layer(\n # conv1, [5, 5], [32, 32], stride=2, initializer_type=1, name='conv2')\n conv3 = self.conv_layer(\n img, [5, 5], [self.in_channels, 64], stride=2, initializer_type=1, name='conv3')\n #conv4 = self.conv_bn_layer(\n conv4 = self.conv_layer(\n conv3, [5, 5], [64, 128], stride=2, #is_training=self.is_training,\n initializer_type=1, name='conv4')\n #conv5 = self.conv_bn_layer(\n conv5 = self.conv_layer(\n conv4, [5, 5], [128, 256], stride=2, #is_training=self.is_training,\n initializer_type=1, name='conv5')\n shape = conv5.get_shape().as_list()\n feature_map_size = shape[1]*shape[2]*shape[3]\n conv5_flat = tf.reshape(\n conv5, [-1, feature_map_size], 'conv5_flat')\n #fc6 = self.fc_bn_layer(conv5_flat, 1024, is_training=self.is_training,\n fc6 = self.fc_layer(conv5_flat, 1024,\n initializer_type=1, name='fc6')\n #fc7 = self.fc_layer(fc6, 1024, initializer_type=1, name='fc7')\n return fc6, shape", "def image(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"image\")", "def encode_images_as_png(images):\n if tf.executing_eagerly():\n for image in images:\n yield tf.image.encode_png(image).numpy()\n else:\n (height, width, channels) = images[0].shape\n with tf.Graph().as_default():\n image_t = tf.placeholder(dtype=tf.uint8, shape=(height, width, channels))\n encoded_image_t = tf.image.encode_png(image_t)\n with tf.Session() as sess:\n for image in images:\n enc_string = sess.run(encoded_image_t, feed_dict={image_t: image})\n yield enc_string", "def save_image(self):\r\n filename = filedialog.asksaveasfilename(title='Save Image As...',\r\n filetypes=((\"Portable Network Graphics (.png)\", \"*.png\"), (\"Portable Document Format(.pdf)\", \"*.pdf\")))\r\n self.graph.savefig(filename, dpi=self.graph.dpi)", "def generate():\n global output_frame, lock\n while True:\n with lock:\n if output_frame is None:\n continue\n (flag, encoded_image) = cv2.imencode(\".jpg\", output_frame)\n if not flag:\n continue\n yield (b'--frame\\r\\n' b'Content-Type: image/jpeg\\r\\n\\r\\n' +\n bytearray(encoded_image) + b'\\r\\n')", "def img_to_ascii(**kwargs):\n ascii_chars = [ u'Z', u'Q', u'T', u'W', u'E', u'K', u'P', u'L', u'I', u'C', u'Y']\n \n width = kwargs.get('width',200)\n path = kwargs.get('path',None)\n\n\n\n im = Image.open(path)\n\n im = resize(im,width)\n\n # w,h = im.size\n\n # this is used as storage. It stores the original picture's color values\n objToGo = list(im.convert(\"RGBA\").getdata())\n\n im = im.convert(\"L\") # convert to grayscale\n\n imlist = list(im.getdata())\n\n i = 0\n j = 0\n # chList is the characters that will be printed. It is a 2D array\n chList = []\n chList.append([])\n for val in imlist:\n ch = ascii_chars[val // 25] #.decode('utf-8')\n chList[j].append(ch)\n sys.stdout.write(ch)\n i += 1\n if i % width == 0:\n sys.stdout.write(\"\\n\")\n chList.append([])\n j += 1\n i = 0\n\n return chList,objToGo", "def image_png():\n data = resource(\"images/pig_icon.png\")\n return Response(data, headers={\"Content-Type\": \"image/png\"})", "def convert_img(self):\r\n self.img = self.img.convert('RGB')", "def process_image(self):\n pass", "def image(self):\n # TODO: make sure this method works for png, gif, tiff\n if self.has_metadata:\n self.extract_metadata()\n tempdir_path = self.make_tempdir()\n tempfile_path = os.path.join(tempdir_path, self.filename)\n warnings.simplefilter('error', Image.DecompressionBombWarning)\n try: # Do image conversions\n img_in = Image.open(self.src_path)\n img_out = Image.frombytes(img_in.mode, img_in.size, img_in.tobytes())\n img_out.save(tempfile_path)\n self.src_path = tempfile_path\n except Exception as e: # Catch decompression bombs\n # TODO: change this from all Exceptions to specific DecompressionBombWarning\n self.add_error(e, \"Caught exception (possible decompression bomb?) while translating file {}.\".format(self.src_path))\n self.make_dangerous()\n self.add_file_string('Image file')\n self.set_property('processing_type', 'image')", "def save_image(name, image):\n image_name = 'output/' + name + '.png'\n cv2.imwrite(image_name, image)", "def img(self):\n return self.img_decode(self.img_msg_)", "def convert_to_base64(image_file):\n with open(image_file, 'rb') as f:\n jpeg_bytes = base64.b64encode(f.read()).decode('utf-8')\n predict_request = '{\"instances\" : [{\"b64\": \"%s\"}]}' % jpeg_bytes\n # Write JSON to file\n with open(OUTPUT_FILE, 'w') as f:\n f.write(predict_request)\n return predict_request", "def save_image_action(self):\n self.view.save_image(self.settings.get_image_type())", "def make_image(self):\n\n if self.type == 'passthrough':\n return\n render_template(\n os.path.dirname(self.main_module),\n os.path.basename(self.main_module_path),\n self.language,\n self.requirements,\n self.whitelist,\n self.type,\n into=self.code_dir)\n self.build()", "def __writeImageBytes(self, image):\n\n if not image:\n raise Exception(\"image not found\")\n result = []\n for i, b in enumerate(image):\n if i % 39 == 0:\n result.append(\"\\n\")\n result.append(f\"{b:02X}\")\n return \"\".join(result)", "def _repr_png_(self):\n mol = self.owner.mol\n keku = IPythonConsole.kekulizeStructures\n size = IPythonConsole.molSize\n opts = IPythonConsole.drawOptions\n return Draw._moltoimg(\n mol, size, self.aix, \"\", returnPNG=True, drawOptions=opts,\n kekulize=keku, highlightBonds=self.bix\n )", "def _repr_png_(self):\n mol = self.owner.mol\n keku = IPythonConsole.kekulizeStructures\n size = IPythonConsole.molSize\n opts = IPythonConsole.drawOptions\n return Draw._moltoimg(\n mol, size, self.aix, \"\", returnPNG=True, drawOptions=opts,\n kekulize=keku, highlightBonds=self.bix\n )", "def addImg(in_dict):\n img = Image(name=in_dict[\"name\"],\n b64str=in_dict[\"b64str\"],\n imgsize=in_dict[\"imgsize\"],\n processed=in_dict[\"processed\"],\n timestamp=in_dict[\"timestamp\"])\n ans = img.save()\n return ans.name", "def getbase64(nparr,):\n if type(nparr) == type({}):\n nparr = nparr['img']\n im = Image.fromarray(nparr)\n buf = BytesIO()\n im.save(buf,format=\"JPEG\")\n return base64.b64encode(buf.getvalue()).decode('ascii')", "def writeImage(image, filename):\n Sky = [128,128,128]\n Building = [128,0,0]\n Pole = [192,192,128]\n Road_marking = [255,69,0]\n Road = [128,64,128]\n Pavement = [60,40,222]\n Tree = [128,128,0]\n SignSymbol = [192,128,128]\n Fence = [64,64,128]\n Car = [64,0,128]\n Pedestrian = [64,64,0]\n Bicyclist = [0,128,192]\n Unlabelled = [0,0,0]\n r = image.copy()\n g = image.copy()\n b = image.copy()\n label_colours = np.array([Sky, Building, Pole, Road_marking, Road, Pavement, Tree, SignSymbol, Fence, Car, Pedestrian, Bicyclist, Unlabelled])\n for l in range(0,12):\n r[image==l] = label_colours[l,0]\n g[image==l] = label_colours[l,1]\n b[image==l] = label_colours[l,2]\n rgb = np.zeros((image.shape[0], image.shape[1], 3))\n rgb[:,:,0] = r/1.0\n rgb[:,:,1] = g/1.0\n rgb[:,:,2] = b/1.0\n im = Image.fromarray(np.uint8(rgb))\n # im.save('/Users/koheiyamamoto/Desktop/SegNet/out/' + filename)\n im.save('./out/' + filename)", "def to_internal_value(self, data):\n if isinstance(data, str) and data.startswith('data:image'):\n # Found image is encoded, and must be decoded\n format, imgstr = data.split(';base64,')\n ext = format.split('/')[-1] # Extract file extension\n id = uuid.uuid4()\n data = ContentFile(base64.b64decode(imgstr), name = id.urn[9:] + '.' + ext)\n return super(Base64ImageField, self).to_internal_value(data)", "def save_image(self):\n self.table_to_image.img.save(self.file_name)\n aws.AWSHandler().upload_image(self.file_name)", "def convert():\n if request.method == \"POST\":\n\n # upload an image from the user\n image_info = uploadImage(request)\n\n if not image_info:\n flash(\"No Image Selected.\")\n return redirect(url_for(\"convert\"))\n\n image_filepath = image_info.get(\"filepath\")\n image_extension = image_info.get(\"extension\")\n\n char_key = request.form.get(\"charkey\")\n\n image_width = int(request.form.get(\"image-width\"))\n image_height = request.form.get(\"image-height\")\n\n if char_key == \"\" or char_key == None:\n char_key = [\".\", \",\", \"*\", \"/\", \"(\", \"#\", \"%\", \"&\", \"@\"]\n else:\n char_key = char_key.split(\" \")\n\n # create the ascii art from the image\n if image_extension == \"gif\":\n gif_ascii_data = make_gif_ascii_string(\n image_filepath,\n char_key,\n image_width,\n image_height,\n )\n\n ascii_art = gif_ascii_data.get(\"previewImage\")\n ascii_art_data = gif_ascii_data.get(\"frames\")\n else:\n ascii_art = make_image_ascii_string(\n image_filepath,\n char_key,\n image_width,\n image_height,\n )\n ascii_art_data = ascii_art\n\n # remove the image\n os.remove(image_filepath)\n\n # the ascii image string\n context = {\n \"asciiImage\": ascii_art,\n \"imageData\": json.dumps(ascii_art_data),\n \"isAnimated\": json.dumps(image_extension == \"gif\"),\n }\n\n return render_template(\"view.html\", **context)\n else:\n context = {\n \"uploadSize\": int(current_app.config[\"MAX_CONTENT_LENGTH\"] / 1024 / 1024),\n \"allowedExtensions\": current_app.config[\"ALLOWED_EXTENSIONS\"],\n }\n\n return render_template(\"convert.html\", **context)", "def save_img(img: np.ndarray, path: str) -> None:\n\n img_obj = Image.fromarray(img)\n img_obj.save(path)", "def save(self, path: str) -> None:\n if self._encoded_image:\n path = self._path_as_png(path)\n self._encoded_image.save(path)\n else:\n print(\"Error! Image was not encoded yet.\")", "def get_image_uri(self):\n return \"data:image/png;base64,\" + \\\n self.browser.get_screenshot_as_base64()", "def imgCopy(img):\n return sitk.Image(img)", "def decode(self, image):\r\n raise NotImplementedError(\"Not Implemented\")", "def image_to_base64(pixbuf, activity):\n _file_name = os.path.join(get_path(activity, 'instance'), 'imagetmp.png')\n if pixbuf != None:\n pixbuf.save(_file_name, \"png\")\n _base64 = os.path.join(get_path(activity, 'instance'), 'base64tmp')\n _cmd = \"base64 <\" + _file_name + \" >\" + _base64\n subprocess.check_call(_cmd, shell=True)\n _file_handle = open(_base64, 'r')\n _data = _file_handle.read()\n _file_handle.close()\n return _data", "def encode(self) :\n\t\tbitmap = ISO8583Bitmap()\n\t\ttexts=[]\n\t\tfor i in range(2,129) :\n\t\t\tid = 'f%03d' % i\n\t\t\tif hasattr(self,id) :\n\t\t\t\tv = getattr(self,id)\n\t\t\t\ttyp = self.desc_dict[id]['type']\n\t\t\t\tbitmap.setBitmap(i)\n\t\t\t\t# logit(\"%s:%s\" % (id,v))\n\t\t\t\ttxt = dataAttachTo8583(v,typ)\n\t\t\t\ttexts.append(txt)\n\t\treturn (bitmap,''.join(texts))", "def Image(self, *args):\n return _BRepAlgo.BRepAlgo_Image_Image(self, *args)", "def data64(self, value: str) -> None:\n self.data = Image.decode64(value)", "def save(image):\n keypoints, description = describe(image)\n artwork = {\n \"keypoints\": keypoints,\n \"description\": description,\n \"path\": image,\n \"date\": datetime.datetime.utcnow()\n }\n artwork_id = db.insert(artwork)\n print(artwork_id)", "def save_image(image, output_folder, output_name):\n\n\tfolder_path = compute_path(output_folder, 'dataset')\n\tos.makedirs(folder_path, exist_ok=True)\n\n\tfile_path = os.path.join(folder_path, output_name + '.png')\n\timage.save(file_path)", "def display_image(image):\n image = tf.constant(image)\n image = tf.image.convert_image_dtype(image, tf.uint8)\n return PIL.Image.fromarray(image.numpy())" ]
[ "0.7395458", "0.71863705", "0.6997544", "0.69900215", "0.6834343", "0.67835957", "0.67522126", "0.6698545", "0.6664718", "0.6626647", "0.65300465", "0.643985", "0.64295864", "0.6397522", "0.63932425", "0.6282071", "0.62708914", "0.6267106", "0.625478", "0.62303776", "0.6178282", "0.616587", "0.61610234", "0.6103472", "0.6092855", "0.6066733", "0.60390556", "0.59830356", "0.59707147", "0.59332687", "0.5903442", "0.5889382", "0.58893156", "0.5871773", "0.58568835", "0.5843647", "0.5837045", "0.58225346", "0.5808718", "0.5795832", "0.5791341", "0.5788632", "0.57641745", "0.575779", "0.5727674", "0.57260877", "0.5711786", "0.5704416", "0.5697822", "0.5691467", "0.56840885", "0.56772107", "0.5676635", "0.5671399", "0.5668276", "0.5663793", "0.5657399", "0.5650698", "0.5649104", "0.5647171", "0.56468123", "0.56341356", "0.5626775", "0.562448", "0.561107", "0.56033516", "0.5597483", "0.559612", "0.5589982", "0.5589643", "0.55836236", "0.55778337", "0.55745566", "0.5564816", "0.5563254", "0.55626905", "0.55621946", "0.5560884", "0.55487895", "0.55470985", "0.5544974", "0.5544974", "0.55324763", "0.55243343", "0.55234313", "0.5521347", "0.5520428", "0.5512534", "0.549549", "0.54880995", "0.5482534", "0.5472381", "0.54697853", "0.54683024", "0.54676974", "0.54630286", "0.5462546", "0.54591525", "0.5453391", "0.5440554" ]
0.62895167
15
assert json schema for requests from api.openweathermap.org
def validate_schema_openweathermap(self, actual, schema): resources_dir = os.path.abspath(os.getcwd()) relative_schema_path = valid_json_schema if schema == 'Valid' else error_json_schema schema_data = open(os.path.join(resources_dir, relative_schema_path)) self.validate_schema(actual, json.load(schema_data)) return self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_complete_data_schema(self):\n response = self.client.get(self.url)\n data = response.data\n self.assertIn('id', data)\n self.assertIn('title', data)\n self.assertIn('release_year', data)\n self.assertIn('casting', data)\n self.assertIn('directors', data)\n self.assertIn('producers', data)\n self.assertIn('roman_release_year', data)", "def test_trucks_api(self):\n resp = self.app.get('/trucks')\n self.assertEqual(resp.status_code, 200)\n\n # ensure proper JSON is returned\n data = json.loads(resp.data)\n assert 'resp' in data\n for item in data['resp']:\n # address is not actually required\n assert 'name' in item\n assert 'fooditems' in item\n assert 'latitude' in item\n assert 'longitude' in item\n assert 'schedule' in item", "def check_valid_schema(context):\n data = context.response.json()\n validate_schema(data)", "def test_complete_data_schema(self):\n response = self.client.get(self.url)\n data = response.data\n self.assertIn('id', data)\n self.assertIn('first_name', data)\n self.assertIn('last_name', data)\n self.assertIn('aliases', data)\n self.assertIn('movies_as_actor', data)\n self.assertIn('movies_as_director', data)\n self.assertIn('movies_as_producer', data)", "def test_parse_weather_weather_simple_json(self):\n\n # Parse the data.\n actual = timeseries.parse_weather(self.weather_simple)\n\n # Ensure actual and expected results are equal.\n pd.testing.assert_frame_equal(actual, self.weather_simple_expected)", "def testSchemaLoadingAsString(self):\n api = self.ApiFromDiscoveryDoc('latitude.v1.json')\n self.assertEquals(4, len(api._schemas))", "def test_api_schema(self):\n response = self.client.get(\"/api/schema/\")\n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n response.get(\"Content-Type\"), \"application/vnd.oai.openapi; charset=utf-8\"\n )\n self.assertEqual(\n response.get(\"Content-Disposition\"), 'inline; filename=\"Marsha API.yaml\"'\n )", "def validate_json(self):\n pass", "def test_meta_data_okay(self):\n self.expect_json_http({\"some\": \"value\"},\n uri=re.compile(\".*/articles/1234-56\"))\n\n self.assertEqual({\"some\": \"value\"},\n federalregister.meta_data(\"1234-56\"))", "def test_simple2(self):\n api = self.load_api_description('simple2.json')\n self.assertEqual(api.name, 'Starbucks')\n self.assertEqual(len(api.resources), 1)\n\n resource = api.resources[0]\n self.assertEqual(resource.name, 'AllOrders')\n self.assertEqual(resource.path, '/')\n self.assertEqual(len(resource.operations), 2)\n\n operation = resource.operations[0]\n self.assertEqual(operation.method, 'GET')\n self.assertIsNone(operation.input)\n output = operation.output\n self.assertEqual(output.status, 200)\n self.assertEqual(output.type.type.get_reference_name(), 'list(Order)')\n self.assertEqual(len(output.contentType), 1)\n self.assertEqual(output.contentType[0], 'json')\n\n operation = resource.operations[1]\n self.assertEqual(operation.method, 'POST')\n input = operation.input\n self.assertEqual(input.type.type.get_reference_name(), 'OrderRequest')\n self.assertEqual(input.contentType[0], 'json')\n output = operation.output\n self.assertEqual(output.status, 201)\n self.assertEqual(output.type.type.get_reference_name(), 'Order')\n self.assertEqual(len(output.contentType), 1)\n self.assertEqual(output.contentType[0], 'json')\n\n self.assertEqual(len(api.base), 1)\n self.assertEqual(api.base[0], 'http://test.com/starbucks')\n\n self.assertEqual(len(api.data_types), 2)\n self.assertEqual(len(api.data_types[0].fields), 5)\n self.assertEqual(len(api.data_types[1].fields), 2)\n self.assertFalse(api.data_types[1].fields[0].optional)\n self.assertTrue(api.data_types[1].fields[1].optional)", "def test_functional_good_ip(self, url):\n response = requests.get(\"http://localhost:80/ip2w/{url}\".format(url=url))\n if response.status_code != BAD_GATEWAY:\n print(\"\\nGATEWAY is OK\")\n self.assertEqual(response.status_code, OK)\n content = response.json()\n self.assertEqual(len(content), 3)\n self.assertTrue(content.get(\"temp\"))\n self.assertTrue(content.get(\"city\"))\n else:\n print(\"\\nGATEWAY is RESET BY PEER\")", "def test_simple4(self):\n api = self.load_api_description('simple4.json')\n self.assertEqual(api.name, 'Starbucks')\n self.assertEqual(len(api.resources), 2)\n\n resource = api.resources[1]\n self.assertEqual(resource.name, 'AllOrders')\n self.assertEqual(resource.path, '/')\n self.assertEqual(len(resource.operations), 2)\n\n operation = resource.operations[0]\n self.assertEqual(operation.method, 'GET')\n self.assertIsNone(operation.input)\n output = operation.output\n self.assertEqual(output.status, 200)\n self.assertEqual(output.type.type.get_reference_name(), 'list(Order)')\n self.assertEqual(len(output.contentType), 1)\n self.assertEqual(output.contentType[0], 'json')\n\n operation = resource.operations[1]\n self.assertEqual(operation.method, 'POST')\n input = operation.input\n self.assertEqual(input.type.type.get_reference_name(), 'OrderRequest')\n self.assertEqual(input.contentType[0], 'json')\n output = operation.output\n self.assertEqual(output.status, 201)\n self.assertEqual(output.type.type.get_reference_name(), 'Order')\n self.assertEqual(len(output.contentType), 1)\n self.assertEqual(output.contentType[0], 'json')\n self.assertEqual(len(output.headers), 1)\n header = output.headers[0]\n self.assertEqual(header.name, 'Location')\n self.assertEqual(header.type.type.get_reference_name(), 'href')\n self.assertEqual(header.type.ref, 'Order')\n\n self.assertEqual(len(api.base), 1)\n self.assertEqual(api.base[0], 'http://test.com/starbucks')\n\n self.assertEqual(len(api.data_types), 2)\n self.assertEqual(len(api.data_types[0].fields), 5)\n self.assertEqual(len(api.data_types[1].fields), 2)\n self.assertFalse(api.data_types[1].fields[0].optional)\n self.assertTrue(api.data_types[1].fields[1].optional)\n\n resource = api.resources[0]\n self.assertEqual(len(resource.input_bindings), 1)\n self.assertEqual(resource.input_bindings[0].id, 'orderIdBinding')\n self.assertEqual(len(resource.operations), 2)\n self.assertEqual(resource.operations[0].input.params[0].binding, 'orderIdBinding')\n self.assertEqual(resource.operations[1].input.params[0].binding, 'orderIdBinding')", "def check_schema(self, response):\n self.assertEqual(response.status_code, http.client.OK)\n result = response.json()\n url = response.links['schema']['url']\n try:\n schema = self.schemas[url]\n except KeyError:\n r = self.GET(url)\n self.assertEqual(r.status_code, http.client.OK)\n schema = r.json()\n self.schemas[url] = schema\n self.validate_schema(result, schema)\n return result", "def test_no_input(self):\n resp = SearchTest.client.get('/api/search/')\n self.assertEqual(json.loads(resp.content),\"You give your input in wrong format. Please check the API documentation for the appropriate input format!!\",\"No Input Test Error\")", "def decide_schema(self, json_data):\n pass", "def decide_schema(self, json_data):\n pass", "def test_schema_invalid_json(self):\n schema_0_input = schema_nested_2_invalid_JSON\n\n # if you uncomment this line:\n # schema_0_input = schema_nested_2\n # this will fail the test: Failed: DID NOT RAISE <class 'simplejson.scanner.JSONDecodeError'>\n # because this is a valid schema\n\n with pytest.raises(simplejson.scanner.JSONDecodeError):\n msg = singer.parse_message(schema_0_input)", "def test__import_api_v7(self):\n response = textwrap.dedent(\n \"\"\"\\\n const apiSchema = [\n {\n \"info\" : {\n }\n }\n ]\n ;\n \"\"\"\n )\n self._test__import_api(response)", "def test_valid_analysis_request(analysis_request_dict: JSONDict) -> None:\n\n request = AnalysisRequest(**analysis_request_dict)\n\n assert request.dict() == analysis_request_dict", "def test_validate_schema():\n data = {\n 'business': {\n 'cacheId': 1,\n 'foundingDate': '2007-04-08T00:00:00+00:00',\n 'identifier': 'CP1234567',\n 'legalName': 'legal name CP1234567'\n },\n }\n\n is_valid, _ = validate(data, 'business', validate_schema=True)\n\n assert is_valid", "def test_validation_get_order_schema(self):\n self.assertIsInstance(api.validation.fetch_order_schema(), dict)", "def test_validate_json_validates_schema(self):\n invalid_schema = {\"type\": \"any\"}\n valid_json = {}\n test_model = RecordSchema(schema=invalid_schema)\n\n with self.assertRaises(jsonschema.exceptions.SchemaError):\n test_model.validate_json(valid_json)", "def test_api_hackernews_post_topstories_comments_no_json_data(client):\n response = client.post(\n \"/api/hackernews/topstories/123456789/comments\",\n )\n response = json.loads(response.data)\n assert (\n {\n '_schema': ['Invalid input type.'],\n\n }\n ) == response", "def test_simple3(self):\n api = self.load_api_description('simple3.json')\n self.assertEqual(api.name, 'Starbucks')\n self.assertEqual(len(api.resources), 2)\n\n resource = api.resources[1]\n self.assertEqual(resource.name, 'AllOrders')\n self.assertEqual(resource.path, '/')\n self.assertEqual(len(resource.operations), 2)\n\n operation = resource.operations[0]\n self.assertEqual(operation.method, 'GET')\n self.assertIsNone(operation.input)\n output = operation.output\n self.assertEqual(output.status, 200)\n self.assertEqual(output.type.type.get_reference_name(), 'list(Order)')\n self.assertEqual(len(output.contentType), 1)\n self.assertEqual(output.contentType[0], 'json')\n\n operation = resource.operations[1]\n self.assertEqual(operation.method, 'POST')\n input = operation.input\n self.assertEqual(input.type.type.get_reference_name(), 'OrderRequest')\n self.assertEqual(input.contentType[0], 'json')\n output = operation.output\n self.assertEqual(output.status, 201)\n self.assertEqual(output.type.type.get_reference_name(), 'Order')\n self.assertEqual(len(output.contentType), 1)\n self.assertEqual(output.contentType[0], 'json')\n\n self.assertEqual(len(api.base), 2)\n self.assertEqual(api.base[0], 'http://test.com/starbucks')\n self.assertEqual(api.base[1], 'https://test.com/starbucks')\n\n self.assertEqual(len(api.data_types), 4)\n self.assertEqual(len(api.data_types[0].fields), 5)\n self.assertEqual(len(api.data_types[1].fields), 2)\n self.assertFalse(api.data_types[1].fields[0].optional)\n self.assertTrue(api.data_types[1].fields[1].optional)\n\n operation = api.resources[0].operations[0]\n self.assertEqual(len(operation.errors), 2)", "def test_minimum_args(self) -> None:\n schema = JSONSchema()\n self.assertIsInstance(schema.schema, str)\n self.assertIsNone(schema.title)\n self.assertIsNone(schema.description)", "def test_api_base(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url()))\n j = r.json()\n self.assertIn('gages', j)\n self.assertIn('sections', j)\n self.assertIn('regions', j)\n self.assertIn('rivers', j)\n self.assertIn('sensors', j)\n self.assertIn('samples', j)", "def test_json() -> Response:\n response = requests.get('https://httpbin.org/json')\n return jsonify(response.json())", "def test_get_json_spec(self):\n pass", "def test_input_schema(self, data, errors):\n resp = self.client.post(self.url, json=data)\n\n if not errors:\n assert resp.status_code == 200\n assert resp.get_json() == {\n 'status': 'OK',\n 'message': 'Data published via Upload service',\n }\n else:\n assert resp.status_code == 400\n assert resp.get_json() == {\n 'status': 'Error',\n 'message': 'Input payload validation failed',\n 'errors': {\n k: ['Missing data for required field.'] for k in errors\n },\n }", "def test_openapi_schema(app, client):\n response = client.get(\"/swagger/\")\n assert response.status_code == 200\n assert len(json.loads(response.data)[\"paths\"]) > 0", "def test_get(self):\n response = self.client.get('/weather/', format=\"json\")\n self.assertEqual(response.status_code, 200)", "def test_json_direct(self): \n response = client.result(True, 'json', 'unittest', test_data = self.test_data)\n response = json.loads(response)\n first_name = response['person'][0]['first_name']\n self.assertEqual(first_name,'John','Should print John')\n length = len(response['person'])\n for count in range(0,length):\n self.assertNotIn('nationality',response['person'][count], 'Nationality should not be present')", "def test_simple1(self):\n api = self.load_api_description('simple1.json')\n self.assertEqual(api.name, 'Starbucks')\n self.assertEqual(len(api.resources), 1)\n\n resource = api.resources[0]\n self.assertEqual(resource.name, 'AllOrders')\n self.assertEqual(resource.path, '/')\n self.assertEqual(len(resource.operations), 1)\n\n operation = resource.operations[0]\n self.assertEqual(operation.method, 'GET')\n self.assertIsNone(operation.input)\n output = operation.output\n self.assertEqual(output.status, 200)\n self.assertEqual(output.type.type.get_reference_name(), 'list(string)')\n self.assertEqual(len(output.contentType), 1)\n self.assertEqual(output.contentType[0], 'json')\n\n self.assertEqual(len(api.base), 1)\n self.assertEqual(api.base[0], 'http://test.com/starbucks')", "def test_generating(resp):\n errors = []\n if not check_int(resp[\"tightness\"]):\n errors.append(\"Invalid type for Itinerary response's 'tightness' field.\")\n\n if not isinstance(resp, bool):\n errors.append(\"Invalid type for Itinerary response's 'start_from_airport' field.\")", "def test_trucks_api_empty_food(self):\n resp = self.app.get('/trucks?bounds=37.74552131083975,-122.45653323673707,37.74552131083975,-122.45653323673707')\n self.assertEqual(resp.status_code, 200)\n\n expected = '{ \"resp\": [] }'\n self.assertEqual(expected.split(), resp.data.split())", "def test_list(self):\n response = self.client.get('/weather-observations/', format=\"json\")\n self.assertEqual(response.status_code, 200)", "def test_bad_filter_json_format(admin_client, public_resource_with_metadata):\n query_filter = {'malformed': 'json'}\n djangoresponse = admin_client.get('/discoverapi/?filter={}'.format(query_filter), follow=True)\n response = json.loads(djangoresponse.content.decode(\"utf-8\"))\n assert djangoresponse.status_code == 400\n assert \"Filter JSON parsing error\" in response['message']", "def test_api_response_data(self):", "def test_simple6(self):\n api = self.load_api_description('simple6.json')\n self.assertEqual(api.name, 'Starbucks')\n self.assertEqual(len(api.resources), 2)\n\n resource = api.resources[1]\n self.assertEqual(resource.name, 'AllOrders')\n self.assertEqual(resource.path, '/')\n self.assertEqual(len(resource.operations), 2)\n\n operation = resource.operations[0]\n self.assertEqual(operation.method, 'GET')\n self.assertIsNone(operation.input)\n output = operation.output\n self.assertEqual(output.status, 200)\n self.assertEqual(output.type.type.get_reference_name(), 'list(Order)')\n self.assertEqual(len(output.contentType), 1)\n self.assertEqual(output.contentType[0], 'json')\n\n operation = resource.operations[1]\n self.assertEqual(operation.method, 'POST')\n input = operation.input\n self.assertEqual(len(input.type.type.fields), 2)\n self.assertEqual(input.contentType[0], 'json')\n output = operation.output\n self.assertEqual(output.status, 201)\n self.assertEqual(output.type.type.get_reference_name(), 'Order')\n self.assertEqual(len(output.contentType), 1)\n self.assertEqual(output.contentType[0], 'json')\n\n self.assertEqual(len(api.base), 1)\n self.assertEqual(api.base[0], 'http://test.com/starbucks')\n\n self.assertEqual(len(api.data_types), 2)\n self.assertEqual(len(api.data_types[0].fields), 5)\n self.assertEqual(len(api.data_types[1].fields), 2)\n nested = api.data_types[1]\n field = nested.fields[1]\n self.assertEqual(len(field.type.type.fields), 1)\n\n resource = api.resources[0]\n self.assertEqual(len(resource.input_bindings), 1)\n self.assertEqual(resource.input_bindings[0].id, 'orderIdBinding')\n self.assertEqual(len(resource.operations), 2)\n binding = resource.operations[0].input.params[0].binding\n self.assertEqual(binding.mode, 'url')\n self.assertEqual(binding.name, 'orderId')\n self.assertEqual(binding.type.type.get_reference_name(), 'string')\n self.assertEqual(resource.operations[1].input.params[0].binding, 'orderIdBinding')", "def test_get_city_notfound(client):\n response = client.get(\"/weather/curitoba\")\n # Validate the response\n print(response.data)\n assert b\"200\" not in response.data", "def test_missing_data(self):\n\n response = self.client.post(\n self.reg_url,\n {},\n format=\"json\")\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertIn(b\"required\", response.content)", "def validate_api_resp(actual_json_resp, json_schema_path: str, json_schema_file_name):\n with open(os.path.join(JSON_SCHEMA_ROOT, json_schema_path, json_schema_file_name), 'r') as f:\n json_schema = json.loads(f.read())\n actual_json = json.loads(str(actual_json_resp.data, 'utf-8'))\n jsonschema.validate(actual_json, json_schema)", "def test_required_city_missing(self):\r\n self.url_params['city'] = ''\r\n response = self.client.post(self.url, self.url_params)\r\n self.assertEqual(response.status_code, 400)\r\n obj = json.loads(response.content)\r\n self.assertEqual(\r\n obj['value'],\r\n u'A city is required',\r\n )", "def test_trucks_api_empty_food(self):\n resp = self.app.get('/trucks?food=asfdasdf')\n self.assertEqual(resp.status_code, 200)\n\n expected = '{ \"resp\": [] }'\n self.assertEqual(expected.split(), resp.data.split())", "def check_city_response(self, response, city_name):\n body = json_decode(response.body)\n self.assertEqual(type(body), dict)\n self.assertIsNotNone(body.get('city_name'))\n self.assertIsNotNone(body.get('current_temperature'))\n self.assertIsNotNone(body.get('current_weather_description'))\n self.assertIsNotNone(body.get('population'))\n self.assertIsNotNone(body.get('bars'))\n self.assertIsNotNone(body.get('city_score'))\n\n self.assertEqual(body.get('city_name'), city_name)\n\n self.assertIsInstance(body.get('current_temperature'), numbers.Number, \"The current temperature is not numeric\")\n self.assertIsInstance(body.get('current_weather_description'), str, \"The weather description is not a string\")\n self.assertIsInstance(body.get('population'), int, \"The population is not an integer\")\n self.assertIsInstance(body.get('bars'), int, \"The number of bars is not an integer\")\n self.assertIsInstance(body.get('city_score'), numbers.Number, \"The city score is not a number\")", "def test_get_home(client):\n response = client.get(\"/weather/\")\n # Validate the response\n print(response.data)\n assert b\"Weather\" in response.data", "def test_build_schema_badschema(self):\n dummy_meta = {\n 'schema': '',\n 'version': '1.0.0',\n 'update': datetime.datetime.utcnow().isoformat(),\n }\n\n with pytest.raises(jsonschema.exceptions.ValidationError):\n metadata = schema_utils.build_metadata(\n self.dummy_schema,\n dummy_meta,\n schema_utils.Update.first_run\n )", "def test_json(self):\n schema1 = GraphQlSchemaFactory.create_from_modules([\n 'graphql.executor.test.star_wars',\n 'graphql.scalar_descriptors.strict'])\n schema2 = GraphQlSchema.create_from_json(schema1.to_json())\n self._assert_schemas_equal(schema1, schema2)\n schema1 = GraphQlSchemaFactory.create_from_modules([\n 'graphql.executor.test.star_wars',\n 'graphql.executor.test.star_wars_extra',\n 'graphql.scalar_descriptors.strict'])\n schema2 = GraphQlSchema.create_from_json(schema1.to_json())\n self._assert_schemas_equal(schema1, schema2)", "def look_for_other_attributes(context):\n json_data = context.response.json()\n assert \"recommended_versions\" in json_data, \"No recommended version found\"\n assert \"registration_link\" in json_data, \"No snyk registration link found\"\n assert \"component_analyses\" in json_data, \"No component analyses data found\"\n assert \"message\" in json_data, \"No message found\"\n assert \"severity\" in json_data, \"No severity found\"\n assert \"known_security_vulnerability_count\" in json_data\n assert \"security_advisory_count\" in json_data", "def test_search_validator_good_data():\n sval = helpers.search_validator()\n good = '{\"fields\": {\"country\": \"DK\", \"plate\": \"BC69432\"}}'\n assert sval.validate(loads(good))", "def get_json(schema):\n\n data = request.get_json(force=True, silent=True, cache=False)\n\n message = schema.validate(data)\n\n if message:\n raise BadRequest(message)\n\n return data", "def test_required_city(self):\r\n self.url_params['city'] = 'New York'\r\n response = self.client.post(self.url, self.url_params)\r\n self.assertEqual(response.status_code, 200)\r\n obj = json.loads(response.content)\r\n self.assertTrue(obj['success'])", "def _check_fields(self, content: JsonDict) -> None:\n self.assertIn(\"id\", content)\n self.assertIn(\"received_ts\", content)\n self.assertIn(\"room_id\", content)\n self.assertIn(\"event_id\", content)\n self.assertIn(\"user_id\", content)\n self.assertIn(\"sender\", content)\n self.assertIn(\"canonical_alias\", content)\n self.assertIn(\"name\", content)\n self.assertIn(\"event_json\", content)\n self.assertIn(\"score\", content)\n self.assertIn(\"reason\", content)\n self.assertIn(\"auth_events\", content[\"event_json\"])\n self.assertIn(\"type\", content[\"event_json\"])\n self.assertIn(\"room_id\", content[\"event_json\"])\n self.assertIn(\"sender\", content[\"event_json\"])\n self.assertIn(\"content\", content[\"event_json\"])", "def apicall():\r\n# try:\r\n print request.get_json()\r\n test_json = request.get_json()\r\n logger.info(\"input json object loaded\")\r\n logger.info(test_json)\r\n k=MetaData(test_json)\r\n int_res=k.getData()\r\n print '------------------------------'\r\n print int_res\r\n return jsonify(int_res)", "def test_api() -> bool:\r\n weather = False\r\n news = False\r\n covid = False\r\n if check_weather_version():\r\n logging.info(\"Weather API version is up to date (check_weather_version())\")\r\n weather = True\r\n else:\r\n logging.info(\"Weather API version is not up to date (check_weather_version()) - ACTION REQUIRED\")\r\n if check_news_version():\r\n logging.info(\"News API version is up to date (check_news_version())\")\r\n news = True\r\n else:\r\n logging.info(\"News API version is not up to date (check_news_version()) - ACTION REQUIRED\")\r\n if check_covid_version():\r\n logging.info(\"Covid-19 API version is up to date (check_covid_version())\")\r\n covid = True\r\n else:\r\n logging.info(\"Covid-19 API version is not up to date (check_covid_version()) - ACTION REQUIRED\")\r\n return bool(weather and news and covid)", "def assertValidJSONResponse(self, resp):\r\n self.assertHttpOK(resp)\r\n self.assertTrue(resp['Content-Type'].startswith('application/json'))\r\n self.assertValidJSON(resp.content)", "def test_word_info_bad_request(self):\n word = \"defination of vitality \"\n rv = self.wordInfo(input_word=word)\n expected_output = {\n \"code\": 400,\n \"message\": \"A Term must be only a single word\"\n }\n response_data = json.loads(rv.get_data(as_text=True))\n\n self.assertEquals(rv.status_code, 400)\n self.assertEquals(response_data[\"code\"], expected_output[\"code\"])\n self.assertEquals(response_data[\"message\"], expected_output[\"message\"])", "def test_api_sensor(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url())).json()\n # load sensors from url specified in api base\n r = requests.get(r['sensors']).json()\n r = requests.get(r['sensors'][0]['url']).json()\n self.assertIn('description', r)\n self.assertIn('started', r)\n self.assertIn('maximum', r)\n self.assertIn('recent_sample', r)\n self.assertIn('id', r)\n self.assertIn('type', r)\n self.assertIn('url', r)\n self.assertIn('minimum', r)\n self.assertIn('ended', r)", "def test_response_json(self):\n response = self.client.search()\n self.assertTrue(isinstance(response.json, dict))\n\n\n # with invalid json\n from rubber import settings\n settings.RUBBER_MOCK_HTTP_RESPONSE = \"\"\";;;\"\"\"\n \n response = self.client.search()\n self.assertIsNone(response.json)", "def test_comments_structure(self):\n for comment in self.resp_json:\n assert type(comment) == dict", "def check_token_structure(data):\n assert \"token\" in data\n token_structure = data[\"token\"]\n\n assert \"access_token\" in token_structure\n assert \"token_type\" in token_structure\n assert \"expires_in\" in token_structure", "def weather_test():\r\n with open('config.json', 'r') as conf:\r\n conf = json.load(conf)\r\n # Gets the API key from the config.json file\r\n weather_api_key = conf[\"weather_api_key\"]\r\n weather_city_name = conf['weather_city_name']\r\n response = requests.get(\r\n 'http://api.openweathermap.org/data/2.5/weather?'\r\n 'q=' +\r\n weather_city_name +\r\n '&units=metric&appid=' +\r\n weather_api_key)\r\n response_code = response.status_code\r\n return response_code", "def test_api_hackernews_post_topstories_comments_invalid_story_id(client):\n response = client.post(\n \"/api/hackernews/topstories/123456789/comments\",\n )\n response = json.loads(response.data)\n assert (\n {\n '_schema': ['Invalid input type.'],\n\n }\n ) == response", "def validate_request(req):\n mandatory_fields = conf[\"api\"][\"mandatory-fields\"]\n optional_fields = conf[\"api\"][\"optional-fields\"]\n\n if not req.content_length:\n return {\"invalid\": \"no data\"}\n\n data = req.get_json()\n\n for field in mandatory_fields:\n if field not in data:\n data[\"invalid\"] = f\"`{field}` must be supplied\"\n return data\n\n invalid = globals()[f\"invalid_{field}\"](data[field])\n if invalid:\n data[\"invalid\"] = invalid\n return data\n\n for field, default in optional_fields.items():\n try:\n invalid = globals()[f\"invalid_{field.replace('-', '_')}\"](data[field])\n if invalid:\n data[\"invalid\"] = invalid\n return data\n\n except KeyError:\n data[field] = default\n\n return data", "def test_invalid_data_types(self):\n response=self.check_invalid_data_type()\n result = json.loads(response.data.decode('utf-8'))\n self.assertEqual(result['Error'],\"Require int or float type\")\n self.assertEqual(response.status_code, 200)", "def test_computeViewReturnsExpectedJsonFormat(self):\n response = self.client.get('/api/v1/compute/25544/?time=20170825200000')\n content = response.content.decode('utf8')\n\n expected_keys = [\n 'longitude',\n 'latitude',\n 'elevation',\n 'velocity',\n 'tle',\n ]\n\n json_data = json.loads(content)\n json_keys = [key for key in crawl_json(json_data)]\n\n\n for key in expected_keys:\n self.assertTrue(\n key in json_keys,\n \"there is no key '{}' in the json\".format(key)\n )", "def get_json(schema):\n\n data = request.get_json(force=True, silent=True, cache=False)\n\n error = schema.validate(data) or None\n\n if error:\n data = None\n error = {\n 'code': 'invalid_payload',\n 'message': f'Invalid JSON payload received. {json.dumps(error)}.',\n }\n\n return data, error", "def test_invalid_request(self):\n response = self.client.post(telemetry_url)\n self.assertEqual(400, response.status_code)\n\n response = self.client.post(telemetry_url, {\n 'longitude': 0,\n 'altitude_msl': 0,\n 'uas_heading': 0,\n })\n self.assertEqual(400, response.status_code)\n\n response = self.client.post(telemetry_url, {\n 'latitude': 0,\n 'altitude_msl': 0,\n 'uas_heading': 0,\n })\n self.assertEqual(400, response.status_code)\n\n response = self.client.post(telemetry_url, {\n 'latitude': 0,\n 'longitude': 0,\n 'uas_heading': 0,\n })\n self.assertEqual(400, response.status_code)\n\n response = self.client.post(telemetry_url, {\n 'latitude': 0,\n 'longitude': 0,\n 'altitude_msl': 0,\n })\n self.assertEqual(400, response.status_code)", "def test_validate_business_schema():\n data = {\n 'business': {\n 'cacheId': 1,\n 'foundingDate': '2007-04-08T00:00:00+00:00',\n 'identifier': 'CP1234567',\n 'lastLedgerTimestamp': '2019-04-15T20:05:49.068272+00:00',\n 'lastPreBobFilingTimestamp': '2019-04-15T20:05:49.068272+00:00',\n 'legalName': 'legal name - CP1234567'\n },\n }\n\n is_valid, _ = validate(data, 'business', validate_schema=True)\n\n assert is_valid", "def get(self, city: str):\n # Make a call to the OpenWeatherMap API and check the units inserted at the query parameter.\n units = request.args.get('unit', '').casefold()\n weather_data, query_units = self.get_weather(city, units)\n temp = self.check_unit(query_units)\n\n # Get the date from the request if no date is provided use the current date and time.\n date_raw = request.args.get('at')\n self.timezone = datetime.now().astimezone().tzinfo\n\n if date_raw:\n # Two date formats are allow an aware and naive date. If no time info has been given use the current time.\n try:\n date = isoparse(date_raw.replace(' ', '+'))\n except ValueError:\n now = datetime.now()\n date = datetime.strptime(date_raw, '%Y-%m-%d').replace(\n hour=now.hour, minute=now.minute, second=now.second, microsecond=now.microsecond,\n tzinfo=self.timezone\n )\n else:\n now = datetime.now()\n date = datetime.now().replace(\n hour=now.hour, minute=now.minute, second=now.second, microsecond=now.microsecond, tzinfo=self.timezone\n )\n\n # Prepare the error response.\n self.error = {\n 'error': '',\n 'error_code': ''\n }\n\n if self.check_past_date(date):\n return self.error, 400\n\n if type(weather_data) == dict:\n # Based on the date check the index of the weather that corresponds with the date in the weather response.\n index = self.find_index(weather_data, date)\n weather_dict = {\n f'{weather_data[\"list\"][index][\"weather\"][0][\"main\"].lower()}':\n f'{weather_data[\"list\"][index][\"weather\"][0][\"description\"]}',\n 'humidity': f'{weather_data[\"list\"][index][\"main\"][\"humidity\"]}%',\n 'pressure': f'{weather_data[\"list\"][index][\"main\"][\"pressure\"]} hPa',\n 'temperature': f'{str(weather_data[\"list\"][index][\"main\"][\"temp\"]) + temp}',\n }\n return weather_dict, 200\n\n elif '404' in str(weather_data):\n self.error['error'] = f'cannot find the city\"{city}\"'\n self.error['error_code'] = 'city_not_found'\n return self.error, 404\n\n else:\n self.error['error'] = 'Something went wrong'\n self.error['error_code'] = 'internal_server_error'\n return self.error, 500", "def handle_marshmallow_validaton(err): # except ValidationError as err\n return jsonify(err.messages), 400 # bad request", "def test_unknown_by_content_type(self):\n request = FakeRequest('/hiihoo.json', 'hiihootype')\n response = datamapper.format(request, {'a': 1})\n self.assertEquals(json.loads(response.content), {'a': 1})\n self.assertEquals(response['Content-Type'], 'application/json; charset=utf-8')", "def test_pollination08(self):\n data = u'{\"farm\": {\"type\":100} }'\n result = self.init_test_app().post(\n '/pollination',\n data=data,\n content_type='application/json',\n headers={'accept': 'application/json'})\n self.assertEqual(result.status_code, 400)", "def test_invalid_json():\n\n with pytest.raises(ExxRequestException):\n with requests_mock.mock() as m:\n m.get('https://api.exx.com/data/v1/markets', text='<head></html>')\n client.get_markets()", "def test_valid_data():\n response = test_app.post(\"/bkt_service/unwind\", params='''[{\n \"event\": {\n \"@context\": \"http://purl.imsglobal.org/ctx/caliper/v1/Context\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/OutcomeEvent\",\n \"actor\": {\n \"@context\": \"http://purl.imsglobal.org/ctx/caliper/v1/Context\",\n \"@id\": \"student-1462300421838-1\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/lis/Person\",\n \"roles\": [\n \"urn:lti:instrole:ims/lis/Learner\"\n ]\n },\n \"action\": \"http://purl.imsglobal.org/vocab/caliper/v1/action#Graded\",\n \"object\": {\n \"@context\": \"http://purl.imsglobal.org/ctx/caliper/v1/Context\",\n \"@id\": \"attempt-1462300421838-4\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/Attempt\",\n \"extensions\": {\n \"assessmentType\": \"Diagnostic Assessment\",\n \"assessmentId\": \"assessment-1462300421838-4\"\n },\n \"count\": 1,\n \"startedAtTime\": \"2016-05-03T21:33:41.844Z\",\n \"endedAtTime\": \"2016-05-03T22:03:41.844Z\"\n },\n \"generated\": {\n \"@context\": \"http://purl.imsglobal.org/ctx/caliper/v1/Context\",\n \"@id\": \"result-1462300421838-4\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/Result\",\n \"assignableId\": \"assessment-1462300421838-4\",\n \"normalScore\": 80,\n \"totalScore\": 100,\n \"itemResults\": [\n {\n \"@id\": \"item-result-1462300421838-4-1\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/Result\",\n \"question_type\": \"mcq\",\n \"automarkable\": 1,\n \"score\": 7,\n \"max_score\": 10,\n \"question_reference\": \"c0a3f0c8-eac7-4795-8c7a-adf98e336a7b\",\n \"item_reference\": \"Adaptive_Item2_extract_USMOs\",\n \"sequenceNumber\": 1\n }\n ]\n },\n \"group\": {\n \"@context\": \"http://purl.imsglobal.org/ctx/caliper/v1/Context\",\n \"@id\": \"class-01\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/lis/CourseOffering\",\n \"name\": null,\n \"description\": null,\n \"extensions\": {\n \"pageNumber\": null,\n \"courseCode\": \"course-01\",\n \"calmsCourseId\": \"calms-course-01\",\n \"lessonId\": \"lesson-01\",\n \"platform\": \"D2L\",\n \"classroomTypeId\": \"3500.0\",\n \"activityId\": \"10\",\n \"gradeLevel\": \"8\",\n \"CourseOfferingId\": \"1200.0\",\n \"adaptivewrapperId\": \"\",\n \"schoolYear\": \"2015-20116\",\n \"unitId\": \"3201.0\",\n \"moduleId\": \"1110.0\",\n \"courseId\": \"2550.0\",\n \"assessmentId\": \"4520.0\",\n \"originSystemId\": \"sams\",\n \"businessLineId\": \"1300.0\",\n \"contextId\": \"587279312bf9a9afd947ddab\"\n },\n \"dateCreated\": null,\n \"dateModified\": null,\n \"courseNumber\": null,\n \"academicSession\": null,\n \"subOrganizationOf\": {\n \"@context\": \"http://purl.imsglobal.org/ctx/caliper/v1/Context\",\n \"@id\": \"1001.0\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/w3c/Organization\",\n \"name\": null,\n \"description\": null,\n \"extensions\": {},\n \"dateCreated\": null,\n \"dateModified\": null,\n \"subOrganizationOf\": null\n }\n },\n \"eventTime\": \"2017-01-09T14:21:00Z\"\n }\n }\n ]''')\n assert response.status == '200 OK'\n assert len(response.json) == 1\n assert response.json[0][\"error\"][\"code\"] == 0", "def get_valid_json_or_abort(schema):\n\n json_request = flask.request.get_json(force=True)\n\n try:\n jsonschema.validate(json_request, schema)\n except jsonschema.ValidationError as e:\n flask_restful.abort(400, message=e.message)\n else:\n return json_request", "def test_get_business(client):\n rv = client.get('/api/v1/businesses/CP/CP0001965')\n\n assert 200 == rv.status_code\n is_valid, errors = validate(rv.json, 'business', validate_schema=True)\n if errors:\n for err in errors:\n print('\\nERROR MESSAGE:')\n print(err.message)\n\n assert is_valid", "def validate_insert_json(request_json):\n try:\n jsonschema.validate(request_json, schema_input)\n except (jsonschema.exceptions.ValidationError, jsonschema.exceptions.SchemaError, JSONDecodeError) as e:\n current_app.logger.info(\"Invalid json:{}\".format(str(e)))\n raise (InvalidJSONError(\"Invalid json:{}\".format(str(e))))", "def test_init(self):\n self.assertIsInstance(self.obj, Place)\n self.assertEqual(self.obj.id, self.test_dict[\"id\"])\n self.assertEqual(self.obj.created_at,\n strptime(self.test_dict[\"created_at\"],\n '%Y-%m-%dT%H:%M:%S.%f'))\n self.assertEqual(self.obj.updated_at,\n strptime(self.test_dict[\"updated_at\"],\n '%Y-%m-%dT%H:%M:%S.%f'))\n self.assertEqual(self.obj.created_at, self.obj.updated_at)\n self.assertEqual(self.obj.latitude, 0.0)\n self.assertEqual(self.obj.longitude, 0.0)\n self.assertEqual(self.obj.city_id, \"\")\n self.assertEqual(self.obj.user_id, \"\")\n self.assertEqual(self.obj.name, \"\")\n self.assertEqual(self.obj.description, \"\")\n self.assertEqual(self.obj.number_rooms, 0)\n self.assertEqual(self.obj.number_bathrooms, 0)\n self.assertEqual(self.obj.price_by_night, 0)\n self.assertEqual(self.obj.max_guest, 0)\n self.assertEqual(self.obj.amenity_ids, [])\n self.assertIsInstance(self.obj.created_at, datetime.datetime)\n self.assertIsInstance(self.obj.updated_at, datetime.datetime)\n self.assertIsInstance(self.obj.id, str)\n self.assertIsInstance(self.obj.latitude, float)\n self.assertIsInstance(self.obj.longitude, float)\n self.assertIsInstance(self.obj.city_id, str)\n self.assertIsInstance(self.obj.user_id, str)\n self.assertIsInstance(self.obj.name, str)\n self.assertIsInstance(self.obj.description, str)\n self.assertIsInstance(self.obj.number_rooms, int)\n self.assertIsInstance(self.obj.number_bathrooms, int)\n self.assertIsInstance(self.obj.price_by_night, int)\n self.assertIsInstance(self.obj.max_guest, int)\n self.assertIsInstance(self.obj.amenity_ids, list)\n\n 'This is test for else statment'\n self.obj = Place()\n\n self.assertIsInstance(self.obj, Place)\n self.assertNotEqual(self.obj.id, \"\")\n self.assertEqual(self.obj.created_at, self.obj.updated_at)\n self.assertEqual(self.obj.latitude, 0.0)\n self.assertEqual(self.obj.longitude, 0.0)\n self.assertEqual(self.obj.city_id, \"\")\n self.assertEqual(self.obj.user_id, \"\")\n self.assertEqual(self.obj.name, \"\")\n self.assertEqual(self.obj.description, \"\")\n self.assertEqual(self.obj.number_rooms, 0)\n self.assertEqual(self.obj.number_bathrooms, 0)\n self.assertEqual(self.obj.price_by_night, 0)\n self.assertEqual(self.obj.max_guest, 0)\n self.assertEqual(self.obj.amenity_ids, [])\n self.assertIsInstance(self.obj.created_at, datetime.datetime)\n self.assertIsInstance(self.obj.updated_at, datetime.datetime)\n self.assertIsInstance(self.obj.created_at, datetime.datetime)\n self.assertIsInstance(self.obj.updated_at, datetime.datetime)\n self.assertIs(self.obj, storage.objects[type(self.obj).__name__ + \".\" +\n str(self.obj.id)])\n self.assertIsInstance(self.obj.id, str)\n self.assertIsInstance(self.obj.id, str)\n self.assertIsInstance(self.obj.latitude, float)\n self.assertIsInstance(self.obj.longitude, float)\n self.assertIsInstance(self.obj.city_id, str)\n self.assertIsInstance(self.obj.user_id, str)\n self.assertIsInstance(self.obj.name, str)\n self.assertIsInstance(self.obj.description, str)\n self.assertIsInstance(self.obj.number_rooms, int)\n self.assertIsInstance(self.obj.number_bathrooms, int)\n self.assertIsInstance(self.obj.price_by_night, int)\n self.assertIsInstance(self.obj.max_guest, int)\n self.assertIsInstance(self.obj.amenity_ids, list)", "def get_weather_data() -> dict:\n # Creating the url for the api call\n api_key = \"96bba64ba34672da132c1a987ad2fee6\"\n lat = 49.24\n long = -123.15\n config = '&units=metric'\n url = f'https://api.openweathermap.org/data/2.5/onecall?lat={lat}&lon={long}&appid={api_key}{config}'\n\n # Querying and JSON parsing\n api_return = requests.get(url)\n weather_data = api_return.json()\n return weather_data", "def test_lti20_good_json(self):\r\n for json_str, expected_comment in self.GOOD_JSON_INPUTS:\r\n score, comment = self.xmodule.parse_lti_2_0_result_json(json_str)\r\n self.assertEqual(score, 0.1)\r\n self.assertEqual(comment, expected_comment)", "def test_postStatsRequest(self):\n api = PlanetApi(config.Planet_Api_Key)\n res = api.postStatsRequests(self.search_request)\n #Response was returned successfully\n self.assertEqual(res.status_code, 200)\n #Response had the expected structure\n r = res.json()\n self.assertTrue(r['interval'])\n self.assertTrue((r['buckets']))\n self.assertTrue(r['buckets'][0]['count'])\n self.assertTrue(r['buckets'][0]['start_time'])", "def test_api_sample(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url())).json()\n # load samples from url specified in api base\n r = requests.get(r['samples']).json()\n # load a sample\n r = requests.get(r['samples'][0]['url']).json()\n self.assertIn('datetime', r)\n self.assertIn('value', r)\n self.assertIn('id', r)\n self.assertIn('url', r)\n self.assertIn('sensor', r)", "def test_validate_json(self):\n # Lifted directly from the python-jsonschema docs\n test_schema = {\"type\": \"object\",\n \"properties\": {\n \"price\": {\"type\": \"number\"},\n \"name\": {\"type\": \"string\"},\n }}\n valid = {\"name\": \"Eggs\", \"price\": 34.99}\n invalid = {\"name\": \"Eggs\", \"price\": \"Invalid\"}\n\n test_model = RecordSchema(schema=test_schema)\n\n self.assertIsNone(test_model.validate_json(valid))\n\n with self.assertRaises(jsonschema.exceptions.ValidationError):\n test_model.validate_json(invalid)", "def test_post(self):\n payload = {\n \"layers\": \"wwtp_capacity_nuts3\",\n \"year\": \"2012\",\n \"schema\": \"public\",\n \"nuts\": [\"FR102\"]\n }\n\n expected_status = 200\n\n output = requests.post(url, json=payload)\n\n assert output.status_code == expected_status", "def test_read_json_schema():\n json_schema = os.path.join(TEST_DATA_PATH, 'example_schema.json')\n schema_tree = schema.load_schema(json_schema, resolve_references=True)\n schema.check_schema(schema_tree)", "def test_positive_get_auth_horizon_check_values(self):\n r = self.res.get('/auth/config/'+(utils.partner), headers=utils.headers)\n logging.info(\"Return response is '%s'\" % r)\n # convert string to dictionary\n rd = ast.literal_eval(r)\n logging.info(\"Return response in dictionary format is '%s'\" % rd)\n self.assertEqual(self.res.response.status, 200)\n types = ['mozy', 'cbeyond', 'horizon']\n # assert 'type' is in the 3 values\n self.assertTrue(rd['type'] in types, \"The 'type' is not in '%s'\" % types)\n # assert 'web_endpoint' is the url format\n p = re.compile(\"(https:\\/\\/)*[\\w\\-_]+(\\.[\\w\\-_]+)+([\\w\\-\\.,@?^=%&amp;:/~\\+#]*[\\w\\-\\@?^=%&amp;/~\\+#])?\")\n self.assertTrue(p.match(rd['web_endpoint']), \"The 'web_endpoint' does not match URL format\")\n # assert 'client_endpoint' is the url format\n self.assertTrue(p.match(rd['client_endpoint']), \"The 'client_endpoint' does not match URL format\")\n # assert 'horizon_org_name' is at least a string type\n self.assertEqual(type(rd['org_name']), type(str(\"\")), \"The 'org_name' is not string type\")", "def validate_response(response):\n assert response.ok\n rpcdict = response.json()\n assert rpcdict['jsonrpc'] == '2.0'\n assert rpcdict['id']\n assert 'error' in rpcdict.keys() or 'result' in rpcdict.keys()", "def test_detail_format(self) -> None:\n r = self.perform_request('detail', True)\n self.assert_json_schema(r.json(), self.get_details_schema())", "def test_authentication_is_not_required(self):\n # Act\n response = self.client.get(self.url)\n # Assert\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json, self.test_project.get_aoi_geometry_as_geojson())", "def _check_fields(self, content: List[JsonDict]) -> None:\n for c in content:\n self.assertIn(\"id\", c)\n self.assertIn(\"received_ts\", c)\n self.assertIn(\"room_id\", c)\n self.assertIn(\"event_id\", c)\n self.assertIn(\"user_id\", c)\n self.assertIn(\"sender\", c)\n self.assertIn(\"canonical_alias\", c)\n self.assertIn(\"name\", c)\n self.assertIn(\"score\", c)\n self.assertIn(\"reason\", c)", "def test_get_valid_line(self):\n ars = self.ar[2009][11]['general']\n self.assertTrue(isinstance(ars['TotalVisits'], awstats_reader.AttrDict))", "def test_getTLEFromCatalogEntryIsReachable(self):\n response = self.client.get('/api/v1/catalogentry/25544/tle/?time=20170825200000')\n content = response.content.decode('utf8')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n json_data = json.loads(content)\n self.assertTrue(is_correct_json(content))", "def test_api_response(self):\n # url = 'http://127.0.0.1:8000/api/aircraft/'\n url = reverse('airlines:aircraft-list')\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_jsonIsCorrect(self):\n\n elements = [\n 'LaunchSite',\n 'OperationalStatus',\n 'OrbitalStatus',\n 'Source',\n 'CatalogEntry',\n 'TLE',\n 'DataSource',\n ]\n\n for element in elements:\n # Dynamicly instanciate the view class\n request = self.factory.get('/api/v1/%s/?format=json' % element.lower())\n view_class = globals()['%sViewSet' % element]\n view = view_class.as_view({'get': 'list'})\n response = view(request).render()\n\n self.assertTrue(is_correct_json(response.content.decode('utf8')))", "def test_dictionary(self):\n self.assertIsInstance(self.test1json, dict)", "def test_api_basic(base_url, uri, expected_keys):\n resp = request(\"get\", base_url + uri)\n assert resp.status_code == 200\n assert resp.headers.get(\"content-type\") == \"application/json\"\n data = resp.json()\n for item in expected_keys:\n if isinstance(item, tuple):\n key, sub_keys = item\n else:\n key, sub_keys = item, ()\n assert key in data\n for sub_key in sub_keys:\n assert sub_key in data[key]", "def test_validation_error_json():\n error = ValidationError(\n type=\"Syntax Error\",\n data={\"data\": [1, 2, 3]},\n )\n\n assert ValidationError(**json.loads(error.json())) == error", "def test_client_location_endpoint(self):\n endpoint = settings.CLIENT_LOCATION_ENDPOINT\n access_token = config.ACCESS_TOKEN\n self.assertValidGetOicJsonEndpoint(endpoint, access_token)", "def validate_json(data: dict) -> bool:\n try:\n assert \"data\" in data.keys()\n assert isinstance(data[\"data\"], str)\n assert \"command\" in data.keys()\n assert isinstance(data[\"command\"], str)\n assert \"time\" in data.keys()\n assert isinstance(data[\"time\"], str)\n assert \"origin\" in data.keys()\n assert isinstance(data[\"origin\"], str)\n return True\n except AssertionError:\n return False" ]
[ "0.62621653", "0.6240114", "0.6214563", "0.6125648", "0.5942485", "0.5931073", "0.575438", "0.57441986", "0.57291126", "0.57290787", "0.57119447", "0.56926596", "0.5668845", "0.5663258", "0.5649895", "0.5649895", "0.5649045", "0.56264323", "0.5599427", "0.5597548", "0.5595037", "0.55873364", "0.5579084", "0.55725527", "0.55676025", "0.55456424", "0.5538711", "0.55208063", "0.5520564", "0.5511551", "0.5505915", "0.54982024", "0.54914314", "0.54860765", "0.548125", "0.5475418", "0.545438", "0.5447336", "0.5428451", "0.5426639", "0.54241085", "0.54063857", "0.5401016", "0.5399843", "0.5398315", "0.53969395", "0.5391448", "0.5387293", "0.5382341", "0.5380366", "0.5369594", "0.5358542", "0.53520167", "0.5335856", "0.53299344", "0.5323383", "0.5317899", "0.53159374", "0.53034043", "0.52946603", "0.52931243", "0.52804184", "0.5277264", "0.5276948", "0.5268242", "0.52532625", "0.52487594", "0.52475303", "0.5241857", "0.5240947", "0.52323574", "0.5229117", "0.52263004", "0.5225594", "0.52220756", "0.522133", "0.52212983", "0.52197707", "0.52188045", "0.52125156", "0.5211741", "0.5210417", "0.5205214", "0.51918226", "0.51901287", "0.5182207", "0.51792306", "0.51767755", "0.5172101", "0.51690507", "0.51678395", "0.51674426", "0.51635396", "0.5161091", "0.5160582", "0.5156609", "0.51523954", "0.5141716", "0.5140593", "0.5133932" ]
0.6302012
0
Count the number of nonempty dicts/lists or other objects
def recursive_count(o): if isinstance(o, dict): c = 0 for v in o.values(): c += recursive_count(v) return c elif isinstance(o, list): c = 0 for v in o: c += recursive_count(v) return c else: return 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def num_empty(self):\n count = 0\n for i in self.__buckets:\n if i.size() == 0:\n count += 1\n return count", "def __len__(self):\n total_objs = 0\n\n if self._shelve is not None:\n total_objs += len(self._shelve)\n\n if self._dict is not None:\n total_objs += len(self._dict)\n\n return total_objs", "def count(self):\n return len([i for i in self.iteritems()])", "def cnt(iterable):\n return len(iterable) if iterable is not None else 0", "def get_types_count():\n return len(type_dict.keys())", "def none_count(d):\n return six.moves.reduce(lambda x, y: x + 1 if y == None else x, d.values(), 0)", "def get_dict_data_len(x_dict: Dict[Any, Collection]):\n return check_all_same_length(*x_dict.values())", "def __len__(self):\n return len(self._dict)", "def __len__(self):\n return len(self._dict)", "def __len__(self) -> int:\n return len(self._dict)", "def size(self):\n return dict.__len__(self)", "def __len__(self):\n return len(self.data.keys())", "def __len__(self):\n return self._data_dict.__len__()", "def count_dictionary_values(self):\n my_dictionary = {'A': [1, 2, 3, 4, 5, 6, 7, 8, 9],\n 'B': 34,\n 'C': 12,\n 'D': [7, 8, 9, 6, 4]}\n\n count = 0\n for key, value in my_dictionary.items():\n if isinstance(value, list):\n count += len(value)\n print(\"Number of items in a dictionary value i.e a list :\", count)", "def count(item):\n return len(item)", "def Counts(dict_of_list):\n return {k: len(v) for k, v in dict_of_list.iteritems()}", "def _size_of_dict(dictionary):\n size = len(dictionary.keys())\n for value in dictionary.values():\n size += len(value)\n return size", "def nb_objects(self) -> int:\n return 0", "def count(self):\n return len(self.objects)", "def size(matrix):\n size = 0\n for _,row in matrix.items():\n #size += len([r for r in row.values() if r != None])\n for _,v in row.items():\n #print(\"V:\",v)\n size += 1 if v != None else 0\n return size", "def __len__(self):\n return sum(item['cantidad'] for item in self.carro.values())", "def __len__(self):\n return sum(item['qty'] for item in self.basket.values()) # counts all the values of the key qty", "def numnems(self):\n count = 0\n for o in self._objs.values():\n count += len(o.netifs())\n return count", "def count(self):\n return len(self)", "def size(self):\n return len(LongObjectHashMap.self)", "def size(self):\n return len(LongObjectHashMap.self)", "def get_num_animals():\n return jsonify(len(list(rd.keys(\"*\"))))", "def _dict_values_count_hashed(a_dict, count_this):\n counter = 0\n for value in a_dict.values():\n if value == count_this:\n counter += 1\n elif (\n isinstance(value, dict)\n and isinstance(count_this, dict)\n and \"hash\" in value\n and \"hash\" in count_this\n and \"size\" in value\n and \"size\" in count_this\n and value[\"hash\"] == count_this[\"hash\"]\n ):\n counter += 1\n \"hash\" in value and isinstance(count_this, dict) and \"hash\" in count_this\n return counter", "def number_keys(a_dictionary):\n return(len(a_dictionary))", "def count_objects_of_size(self, n: int, **parameters: int) -> int:", "def __len__(self):\n return self._count()", "def countit(objs):\n out = {}\n for el in objs:\n out[el] = 1 + out.get(el, 0)\n out = {k: v for k, v in out.items()}\n return out", "def __len__(self):\n return len(self.__keys)", "def __len__(self):\n return len(self.__keys)", "def get_number_of_empty_pages(cls, pages, item_key):\n empty = [page for page in pages if page[item_key] == []]\n return len(empty)", "def __len__(self):\n return self.keyvaluepair_set.count()", "def getsize(obj_0):\n _seen_ids = set()\n def inner(obj):\n obj_id = id(obj)\n if obj_id in _seen_ids:\n return 0\n _seen_ids.add(obj_id)\n size = sys.getsizeof(obj)\n if isinstance(obj, zero_depth_bases):\n pass # bypass remaining control flow and return\n elif isinstance(obj, (tuple, list, Set, deque)):\n size += sum(inner(i) for i in obj)\n elif isinstance(obj, Mapping) or hasattr(obj, iteritems):\n size += sum(inner(k) + inner(v) for k, v in getattr(obj, iteritems)())\n # Check for custom object instances - may subclass above too\n if hasattr(obj, '__dict__'):\n size += inner(vars(obj))\n if hasattr(obj, '__slots__'): # can have __slots__ with __dict__\n size += sum(inner(getattr(obj, s)) for s in obj.__slots__ if hasattr(obj, s))\n return size\n return inner(obj_0)", "def __len__(self):\n return len(self.keys)", "def _get_objects_length(self) -> int:\n return len(self.objects)", "def __len__(self):\n\n return len(self._entries)", "def length(self):\n\t\treturn len(self.object_list)", "def count(self):\n # TODO not implemented yet\n return 0", "def getsize(obj_0):\n _seen_ids = set()\n\n def inner(obj):\n obj_id = id(obj)\n if obj_id in _seen_ids:\n return 0\n _seen_ids.add(obj_id)\n size = sys.getsizeof(obj)\n if isinstance(obj, ZERO_DEPTH_BASES):\n pass # bypass remaining control flow and return\n elif isinstance(obj, (tuple, list, Set, deque)):\n size += sum(inner(i) for i in obj)\n elif isinstance(obj, Mapping) or hasattr(obj, 'items'):\n size += sum(inner(k) + inner(v) for k, v in getattr(obj, 'items')())\n # Check for custom object instances - may subclass above too\n if hasattr(obj, '__dict__'):\n size += inner(vars(obj))\n if hasattr(obj, '__slots__'): # can have __slots__ with __dict__\n size += sum(inner(getattr(obj, s)) for s in obj.__slots__ if hasattr(obj, s))\n return size\n\n return inner(obj_0)", "def size(self) -> int:\n return sum(ob.size for ob in self.objects.ravel())", "def num_keys(self):\n return len(self.counter.keys())", "def __len__(self):\n return len(self._matches.keys())", "def number_objects():\n classes = [Amenity, City, Place, Review, State, User]\n names = [\"amenities\", \"cities\", \"places\", \"reviews\", \"states\", \"users\"]\n\n num_objs = {}\n for i in range(len(classes)):\n num_objs[names[i]] = storage.count(classes[i])\n\n return jsonify(num_objs)", "def count(self):\n return self.size()", "def __len__(self):\n return sum(item[\"quantity\"] for item in self.carro.values())", "def __len__(self) -> int:\n return len(self._keys)", "def __len__(self):\n return len(self.items)", "def __len__(self):\n return len(self.items)", "def __len__(self):\n return len(self.items)", "def __len__(self):\n return len(self.items)", "def count(self, cls=None):\n return len(self.all(cls))", "def count(self):\n objects = self.all()\n return len(objects)", "def count(self, value): # real signature unknown; restored from __doc__\n return 0", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def Count(self) -> int:", "def __len__(self):\n return reduce(operator.add, self.values(), 0)", "def __len__(self):\n return len(list(self.__iter__()))", "def size(self):\n return len(self.items)", "def size(self):\n return len(self.items)", "def size(self):\n return len(self.items)", "def size(self):\n return len(self.items)", "def size(self):\n return len(self.items)", "def do_count(self, args):\n args = shlex.split(args)\n if len(args) < 1:\n return\n _nb_objects = 0\n items = storage.all()\n for key in items:\n if items[key].__class__.__name__ == args[0]:\n _nb_objects += 1\n print(_nb_objects)", "def __len__(self):\n return len(self.data[self.name])", "def __len__(self):\n return self.count()", "def __len__(self):\n return len(self.as_list())", "def get_count_value(obj):\n # Passed None value.\n if obj is None:\n return None\n\n # Return 0 if object has no value.\n if obj.GetValue() is None or obj.GetValueAsUnsigned() == 0:\n return 0\n return obj.GetNumChildren()", "def count():", "def __len__(self):\r\n return len(self._items)", "def __len__(self):\r\n return len(self._items)", "def __len__(self):\n return len(self.__values)", "def __len__(self):\n\t\treturn len(self.hitos)", "def __len__(self):\n return len(self._items)", "def __len__(self):\n return len(self._items)", "def __len__(self):\n return len(self._items)", "def __len__(self):\n return len(self._items)", "def __len__(self):\n return len(self._items)", "def __len__(self):\n return len(self._items)", "def _count_shapes(self, shape_data : dict) -> dict:\n shape_count = {}\n for item in shape_data:\n item_shape_type = item.get('shapeType')\n if item_shape_type not in shape_count:\n shape_count[item_shape_type] = 1\n else:\n shape_count[item_shape_type] += 1\n return shape_count", "def __len__(self):\n return self._count", "def __len__(self):\n return self._count", "def count(self):\n\n raise NotImplementedError", "def __len__(self):\n return self.count", "def length(self):\n # TODO: Count number of key-value entries in each of the buckets\n return self.size\n # for bucket in self.buckets():", "def length(self):\n return len(self.items)", "def items_count(self):\n return len(self.items)", "def count(self):\n return self.data_container.count", "def __len__(self) -> int:\n return self.objects.size", "def dict_sublength(nest_dict):\n return sum(len(v) for v in nest_dict.itervalues())", "def size(iterable):\n try:\n return iterable.__len__()\n except AttributeError:\n return None", "def pobj_counts(pcode_obj):\n pcode = (pcode_obj.asDict())['pcode'][0] # no multiple pcode blocks - no delimiter\n counts = {'galleries': 0, 'spreads': 0, 'layouts': 0, 'panelgroups': 0}\n # , 'panels': 0, 'skips': 0 }\n galleries = pcode.pop('gallery', '')\n counts['galleries'] = len(galleries)\n for gallery in galleries:\n spreads = gallery.pop('spread', '')\n counts['spreads'] += len(spreads)\n for spread in spreads:\n layouts = spread.pop('layout', '')\n counts['layouts'] += len(layouts)\n for layout in layouts:\n panelgroups = layout.pop('panelgroup', '')\n counts['panelgroups'] += len(panelgroups)\n return counts", "def _count_elements(mapping, iterable): # real signature unknown; restored from __doc__\n pass", "def _get_count(results):\n return len(results)", "def __len__(self):\n return sum(f.count for f in self.filters)" ]
[ "0.71943367", "0.71715474", "0.69646627", "0.6960094", "0.6950782", "0.6901174", "0.6860939", "0.6854527", "0.68178976", "0.6807361", "0.6695624", "0.6688045", "0.6622141", "0.6599806", "0.6558737", "0.6528312", "0.6516975", "0.64872235", "0.6447478", "0.6436087", "0.641087", "0.63838947", "0.6376552", "0.63462913", "0.6342291", "0.6342291", "0.6333362", "0.6329662", "0.6318653", "0.6316472", "0.63138014", "0.6313794", "0.6308746", "0.6308746", "0.6301769", "0.630164", "0.6299032", "0.62956935", "0.6276722", "0.6274876", "0.6265931", "0.62620336", "0.6243121", "0.62408155", "0.6240203", "0.62387025", "0.62376076", "0.6233585", "0.6227692", "0.62031585", "0.61987835", "0.61987835", "0.61987835", "0.61987835", "0.618422", "0.61834645", "0.6177228", "0.615587", "0.615587", "0.615587", "0.615587", "0.61438483", "0.6140656", "0.61371833", "0.61371833", "0.61371833", "0.61371833", "0.61371833", "0.6135869", "0.6128165", "0.6127258", "0.6125218", "0.6114955", "0.6110094", "0.609962", "0.609962", "0.60967445", "0.60913295", "0.60887045", "0.60887045", "0.60887045", "0.60887045", "0.60887045", "0.60887045", "0.6082368", "0.6078339", "0.6078339", "0.60765386", "0.6072266", "0.60717255", "0.60673016", "0.6061431", "0.6054843", "0.6054735", "0.60534817", "0.60414207", "0.6040172", "0.60399574", "0.6035823", "0.60217017" ]
0.7272351
0
Reorganize xarray object a bit for netcdf files
def process_lidar(radial_file, scan_file, wind_file, site, period, netcdf_path): lidar = rasp.lidar_from_csv(radial_file, scan_file, wind=wind_file) # remove status==0 data (if we have the whole data) if 'Status' in lidar.data_vars: lidar['CNR'] = lidar['CNR'].where(lidar['Status']) lidar['DRWS'] = lidar['DRWS'].where(lidar['Status']) # remove unneeded variables if they exist to_drop = list(set(lidar.data_vars) & set(['Status', 'Error', 'Confidence', 'RWS'])) lidar = lidar.drop(to_drop) lidar = lidar.rasp.cf_compliant() lidar.to_netcdf(netcdf_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_netCDF_to_memory(self):\n f = cfdm.example_field(4)\n f.data.to_memory() # on non-compressed array\n f.compress(\"indexed_contiguous\", inplace=True)\n f.data.to_memory() # on compressed array", "def __init__(self, xarray_obj):\n super(RasterDataArray, self).__init__(xarray_obj)", "def index_xarray_data():\n pressure = xr.DataArray([850., 700., 500.], dims=('isobaric',), attrs={'units': 'hPa'})\n temp = xr.DataArray([[[[296., 295., 294.], [293., 292., 291.]],\n [[286., 285., 284.], [283., 282., 281.]],\n [[276., 275., 274.], [273., 272., 271.]]]] * units.K,\n dims=('time', 'isobaric', 'y', 'x'))\n\n profile = xr.DataArray([[[[289., 288., 287.], [286., 285., 284.]],\n [[279., 278., 277.], [276., 275., 274.]],\n [[269., 268., 267.], [266., 265., 264.]]]] * units.K,\n dims=('time', 'isobaric', 'y', 'x'))\n\n dewp = xr.DataArray([[[[294., 293., 292.], [291., 290., 289.]],\n [[284., 283., 282.], [281., 280., 279.]],\n [[274., 273., 272.], [271., 270., 269.]]]] * units.K,\n dims=('time', 'isobaric', 'y', 'x'))\n\n dirw = xr.DataArray([[[[180., 180., 180.], [180., 180., 180.]],\n [[225., 225., 225.], [225., 225., 225.]],\n [[270., 270., 270.], [270., 270., 270.]]]] * units.degree,\n dims=('time', 'isobaric', 'y', 'x'))\n\n speed = xr.DataArray([[[[20., 20., 20.], [20., 20., 20.]],\n [[25., 25., 25.], [25., 25., 25.]],\n [[50., 50., 50.], [50., 50., 50.]]]] * units.knots,\n dims=('time', 'isobaric', 'y', 'x'))\n\n return xr.Dataset({'temperature': temp, 'profile': profile, 'dewpoint': dewp,\n 'wind_direction': dirw, 'wind_speed': speed},\n coords={'isobaric': pressure, 'time': ['2020-01-01T00:00Z']})", "def getXarray(node,noisy=False, strict=False):\n #assert type(node)==mds.treenode.TreeNode\n data=node.data()\n node_shape=get_mds_shape(node)\n naxes=len(node_shape)\n own_name=get_mds_shortname(node)\n coordinates=OrderedDict() \n units_dict={own_name:get_mds_units(node)}\n dimension_names=[]\n if noisy: print(\"Main body: node %s has shape %s\"%(own_name,node_shape))\n for i in range(naxes):\n ax=node.dim_of(i)\n ax_dims=len(get_mds_shape(ax))#do we have a coordinate or a simple dimension?\n if noisy: print( \"doing axis # %d: shape=%s\"%(i,get_mds_shape(ax)))\n if ax_dims==1:\n if noisy: print( \" inside dims==1\")\n try:\n name=get_mds_shortname(get_mds_node_reference(ax))\n coordinates[name]=((name,),ax.data()) #only give it a coordinate if it might be interesting\n except:\n name=own_name+\"_index\"\n #don't assign a coordinate, because it is presumably just an index if it doesn't have a node reference\n dimension_names.append(name)\n elif ax_dims>1:\n if noisy: print( \" inside dims>1\")\n #time for recursion! Look out!\n coord = getXarray(get_mds_node_reference(ax),noisy=noisy)\n coord_dim_names=set(coord.dims)\n #This is a bit tricky, because the name of the present dimension\n #gets defined by this call. So we need to extract that name and\n #place it in the running list of dimensions for 'node':\n unique_dim_names=list(coord_dim_names.difference(set(dimension_names)))\n if len(unique_dim_names)>1:\n raise Exception(\"Coordinate #%d of node %s has %d>1 new dimensions, which is not allowed\"%(i,own_name,len(unique_dim_names)))\n name=get_mds_shortname(get_mds_node_reference(ax))#This thing had better have a name!!!!\n coordinates[name]=coord #refer to the coordinate by its proper name\n dimension_names.append(unique_dim_names[0]) #refer to the dimension that parameterizes this coordinate by whatever name it recieved in the recursive call, assuming that the unique dimension name defined there corresponds to the present dimension of the base array\n else:#zero-dimensional coordinate means index\n name=own_name+\"_index\"\n dimension_names.append(name)\n units_dict[name]=get_mds_units(ax)\n try:\n return xr.DataArray(data,coords=coordinates, dims=dimension_names,attrs={'units':units_dict}) \n except:\n pass\n dimension_names.reverse()\n try:\n return xr.DataArray(data,coords=coordinates, dims=dimension_names,attrs={'units':units_dict}) \n except Exception as ex:#IF something goes wrong, you will at least get the inputs!\n if not strict:\n #print \"WARNING: returning non-xarray object due to error in coordinates.\"\n dummy= Branch(node)\n dummy.dims=dimension_names\n dummy.coords=coordinates\n dummy.units=units_dict\n dummy.node_ref=node\n dummy.data=data\n return dummy\n else:\n raise ex", "def _prepare_proj(self, x):\n b, l, d = x.size()\n return x.view(b, l, self.num_heads, self.d_head).transpose(1, 2).contiguous().view(b * self.num_heads, l,\n self.d_head)", "def make_photon_arrays(path, numevents):\n xcoords = []\n zcoords = []\n \n nsipmarrays = []\n nabsarrays = []\n \n for filename in os.listdir(path):\n\n photondata = np.loadtxt(path+'/'+filename,delimiter=',',usecols=[1,4])\n\n coords = filename[0:8]\n\n arraylen = len(photondata.flatten('F'))\n \n nsipmphotons = photondata.flatten('F')[numevents:arraylen]\n #print(len(nsipmphotons))\n nabsphotons = photondata.flatten('F')[0:numevents] \n \n nsipmarrays.append(nsipmphotons)\n nabsarrays.append(nabsphotons)\n \n x = re.findall('(-[0-9]+)x',coords) \n \n if bool(x) == False:\n x = re.findall('([0-9]+)x', coords)\n \n z = re.findall('(-[0-9]+)z',coords) \n\n if bool(z) == False:\n z = re.findall('([0-9]+)z',coords)\n\n xcoords.append(x[0])\n zcoords.append(z[0])\n \n xcoords = np.array(xcoords).astype(np.float)\n zcoords = np.array(zcoords).astype(np.float)\n \n return xcoords, zcoords, nsipmarrays, nabsarrays", "def patch2X(patch): #X\n return patch.reshape(-1)", "def __init__(self, xarray_obj: xr.DataArray | xr.Dataset) -> None:\n self._obj = xarray_obj\n # create new coordinate with attributes in which to save x_dim, y_dim and crs.\n # other spatial properties are always calculated on the fly to ensure\n # consistency with data\n if GEO_MAP_COORD not in self._obj.coords:\n # zero is used by rioxarray\n self._obj.coords[GEO_MAP_COORD] = xr.Variable((), 0)", "def _raw_to_arrays(self):\n self.update_geometry()\n if isinstance(self, Molecule):\n # normal qcdb.Molecule\n geom = self.geometry(np_out=True)\n else:\n # psi4.core.Molecule\n geom = np.array(self.geometry())\n mass = np.asarray([self.mass(at) for at in range(self.natom())])\n elem = np.asarray([self.symbol(at) for at in range(self.natom())])\n elez = np.asarray([self.Z(at) for at in range(self.natom())])\n uniq = np.asarray(\n [hashlib.sha1((str(elem[at]) + str(mass[at])).encode('utf-8')).hexdigest() for at in range(self.natom())])\n\n return geom, mass, elem, elez, uniq", "def write_to_xarray(self,ds=None,mesh_name='mesh'):\n import xarray as xr\n if ds is None:\n ds=xr.Dataset()\n\n ds[mesh_name]=1\n ds[mesh_name].attrs['cf_role']='mesh_topology'\n ds[mesh_name].attrs['node_coordinates']='node_x node_y'\n ds[mesh_name].attrs['face_node_connectivity']='face_node'\n ds[mesh_name].attrs['edge_node_connectivity']='edge_node'\n ds[mesh_name].attrs['face_dimension']='face'\n ds[mesh_name].attrs['edge_dimension']='edge'\n\n ds['node_x']= ( ('node',),self.nodes['x'][:,0])\n ds['node_y']= ( ('node',),self.nodes['x'][:,1])\n\n ds['face_node']= ( ('face','maxnode_per_face'), self.cells['nodes'] )\n\n ds['edge_node']= ( ('edge','node_per_edge'), self.edges['nodes'] )\n\n return ds", "def prepare_test_data():\n # Dictionary in which to store data\n data_dict = {}\n # Load data01.nc Dataset\n data01 = xr.open_dataset(os.path.dirname(__file__)+'/data/data01.nc',\n decode_times=False, autoclose=True)\n data_dict['data01'] = data01.copy()\n # Extract two *DataArrays* - to test functions with DataArrays\n da_ts = data01['TS'].copy()\n da_precl = data01['PRECL'].copy()\n data_dict['da_ts'] = da_ts.copy()\n data_dict['da_precl'] = da_precl.copy()\n # Dataset with *shifted* longitudes\n ds_shift_lon = climapy.xr_shift_lon(data01.copy())\n data_dict['ds_shift_lon'] = ds_shift_lon.copy()\n # Datasets with *reversed* lon/lat coordinates and data\n ds_rev_lon = data01.copy()\n ds_rev_lon['lon'].values = ds_rev_lon['lon'].values[::-1]\n for var_name in ['TS', 'PRECL']: # array order: time, lat, lon\n ds_rev_lon[var_name].values = ds_rev_lon[var_name].values[:, :, ::-1]\n ds_rev_lat = data01.copy()\n ds_rev_lat['lat'].values = ds_rev_lat['lat'].values[::-1]\n for var_name in ['TS', 'PRECL']:\n ds_rev_lat[var_name].values = ds_rev_lat[var_name].values[:, ::-1, :]\n ds_rev_both = data01.copy()\n ds_rev_both['lat'].values = ds_rev_both['lat'].values[::-1]\n ds_rev_both['lon'].values = ds_rev_both['lon'].values[::-1]\n for var_name in ['TS', 'PRECL']:\n ds_rev_both[var_name].values = ds_rev_both[var_name].values[:, ::-1, ::-1]\n data_dict['ds_rev_lon'] = ds_rev_lon.copy()\n data_dict['ds_rev_lat'] = ds_rev_lat.copy()\n data_dict['ds_rev_both'] = ds_rev_both.copy()\n # Dataset with *transposed* lon/lat coords\n ds_transposed = data01.copy()\n ds_transposed = ds_transposed.transpose()\n data_dict['ds_transposed'] = ds_transposed.copy()\n # Dataset with *renamed* longitude and latitude coords\n ds_renamed = data01.copy()\n ds_renamed = ds_renamed.rename({'lon': 'longitude', 'lat': 'latitude'})\n data_dict['ds_renamed'] = ds_renamed.copy()\n # Datasets with slightly *irregular* lon/lat coords, yet still monotonic\n nx, ny = data01['lon'].size, data01['lat'].size\n lon_irr = (data01['lon'].values +\n np_rand.uniform(low=-0.5, high=0.5, size=nx)) # add small amount of noise\n lon_irr[[0, -1]] = data01['lon'].values[[0, -1]] # keep end values unchanged\n lat_irr = (data01['lat'].values +\n np_rand.uniform(low=-0.5, high=0.5, size=ny))\n lat_irr[[0, -1]] = data01['lat'].values[[0, -1]]\n ds_irr_lon = data01.copy()\n ds_irr_lon['lon'].values = lon_irr.copy()\n ds_irr_lat = data01.copy()\n ds_irr_lat['lat'].values = lat_irr.copy()\n ds_irr_both = data01.copy()\n ds_irr_both['lon'].values = lon_irr.copy()\n ds_irr_both['lat'].values = lat_irr.copy()\n data_dict['ds_irr_lon'] = ds_irr_lon.copy()\n data_dict['ds_irr_lat'] = ds_irr_lat.copy()\n data_dict['ds_irr_both'] = ds_irr_both.copy()\n # Dataset with *strange* lon/lat coords - very irregular and not monotonic\n lon_strange = (data01['lon'].values +\n np_rand.uniform(low=-10, high=10, size=nx)) # add large amount of noise\n lon_strange[[0, -1]] = data01['lon'].values[[0, -1]] # keep end values unchanged\n lat_strange = (data01['lat'].values + np_rand.uniform(low=-10, high=10, size=ny))\n lat_strange[[0, -1]] = data01['lat'].values[[0, -1]] # keep end values unchanged\n ds_strange = data01.copy()\n ds_strange['lon'].values = lon_strange.copy()\n ds_strange['lat'].values = lat_strange.copy()\n data_dict['ds_strange'] = ds_strange.copy()\n # Return dictionary of data\n return data_dict", "def dataArr(filename):\r\n #Open the file\r\n f=h5py.File(filename,'r')\r\n \r\n #Initialize the data arrays\r\n cdata=[]\r\n idxset=[]\r\n vertices=[]\r\n \r\n #Open groups in the file\r\n for group in f.keys():\r\n# print('Group- '+group)\r\n \r\n #Get the group\r\n currGroup=f[group]\r\n \r\n #Open keys in the group\r\n for key in currGroup.keys():\r\n# print('Key- '+key)\r\n \r\n #Append the data to the respective arrays\r\n if key=='cdata(Complex)':\r\n cdataGroup=currGroup[key]\r\n \r\n imag=[]\r\n real=[]\r\n #Open the keys in cdata\r\n for subkey in cdataGroup.keys():\r\n# print('Subkey- '+subkey)\r\n \r\n #Get the real and imaginary parts of the array\r\n if subkey=='Imag':\r\n imag=cdataGroup[subkey][()]\r\n elif subkey=='Real':\r\n real=cdataGroup[subkey][()]\r\n \r\n #Convert lists to numpy arrays\r\n imag=np.array(imag)\r\n real=np.array(real)\r\n #Get the cdata value\r\n cdata=real+1j*imag\r\n \r\n elif key=='idxset':\r\n idxset=currGroup[key][()]\r\n elif key=='vertices':\r\n vertices=currGroup[key][()]\r\n \r\n #Remove the z component from the vertices\r\n xVals=[]\r\n yVals=[]\r\n newVertices=[]\r\n for vertex in vertices:\r\n xVals.append(vertex[0])\r\n yVals.append(vertex[1])\r\n newVertices.append([vertex[0],vertex[1]])\r\n vertices=newVertices\r\n \r\n #Convert to numpy arrays\r\n cdata=np.array(cdata)\r\n xVals=np.array(xVals)\r\n yVals=np.array(yVals)\r\n \r\n #Close the file\r\n f.close()\r\n \r\n return cdata, xVals, yVals", "def _diagnostic2xarray(node,names={},reverse_order=None,debug=False,noisy=False):\n #Here's the deal: some trees seem to opposite conventions when it comes to\n #order of indices, ie: if you call node.dim_of(n) with i=0,j=1,k=2, you get \n #dimensions to array[j,k,i] sometimes, but other times you get array[i,j,k]\n #EFIT trees seem to use normal order, others do not, but you may need to\n #adjust this\n raise Exception(\"This function is not yet operational.\")\n if reverse_order is None:\n if 'efit' in str(node).lower():\n reverse_order=False\n else:\n reverse_order=True\n \n if type(node)==mds.Tree:\n #assuming we are dealing with EFIT tree where the variables are all\n #stored as tags and the tree just contains processing info\n members=[node.getNode(tag) for tag in node.findTags('') if not '::TOP' in tag]\n else:\n members = node.getMembers()#get list all the nodes for the diagnostic\n \n #If user supplies list/tuple/array of names, select only those names\n test_names = len(names) > 0\n \n #Sort the members into either coordinates or data arrays (ignore the rest)\n coord_objs=[]\n data_objs=[]\n other_objs=[]\n for member in members:\n try:\n #I'm getting a non-deterministic error here on EFITS, when the call\n #to member.getShape() sometimes causes a TDIException that escapes\n #the 'try' clause somehow! \n naxes=len(member.getShape())\n assert naxes >0\n except:\n other_objs.append(member)\n continue #if it doesn't have a shape, or if there are no dimensions, it's not an array, so skip it\n axes_types = [type(member.dim_of(i)) for i in range(naxes)]\n if mds.compound.Range in axes_types:\n coord_objs.append(member)\n else:\n data_objs.append(member)\n if len(other_objs) >0 and noisy:\n print (\"Warning: the following nodes were not used: %s\"%other_objs) \n #Assemble the desired data arrays together with their dimension names to \n #prepare for insertion into an xr.Dataset object\n dvars={}\n labels={}\n units={}\n used_coordinates=set()\n for member in data_objs: \n name=get_mds_shortname(member)\n if not test_names or name in names: \n axes=getAxes(member,strict=True)\n if reverse_order: \n axes.reverse()\n axes_names=[get_mds_shortname(ax) for ax in axes]\n axes_shape_strings=[str(ax.getShape())[1:-1] for ax in axes]\n shapestring=str(member.getShape())\n if not all([axes_shape in shapestring for axes_shape in axes_shape_strings]) and noisy:\n print(\"Warning: Shape mismatch: %s has shape %s, but dimensions %s have shapes %s\"%(member,shapestring,axes_names,axes_shape_strings))\n continue\n units[name]=member.units_of()\n dvars[name]=axes_names, member.data() \n for ax in axes_names:\n used_coordinates.add(ax)\n if noisy:\n unused_names=set(names).difference(set(dvars.keys()))\n if len(unused_names)>0:\n print(\"Warning: these requested names were not found: %s\"%unused_names)\n\n #figure out which of the coordinates we need/have, and add them under the right name\n coords={}\n for member in coord_objs:\n name=get_mds_shortname(member)\n if name in used_coordinates:\n naxes=len(member.getShape())\n axes_names=[]\n if naxes==1:\n axes_names=[name]\n else:\n looper =range(naxes)\n if reverse_order:\n looper.reverse()\n for i in looper:\n ax_obj=get_mds_axis(member,i,strict=False)\n if not isinstance(ax_obj,mds.treenode.TreeNode):\n axes_names.append(name+'_index')\n else:\n axes_names.append(get_mds_shortname(ax_obj))\n coords[name]=axes_names,member.data()\n #protocol for metadata seems to be something like this:\n #valid=member_dict['valid']\n #valid_remark = valid.getMember().value_of() \n if debug: \n return dvars,coords,labels,units\n return xr.Dataset(dvars,coords=coords,attrs={'labels':labels,'units':units})", "def to_xarray(self):\n\n xarray_object = xr.Dataset()\n for var, data in self.items(deep=True):\n xarray_object[var] = data.to_xarray()\n\n xarray_object.attrs.update(**self.attrs)\n\n return xarray_object", "def to_xarray(self, errors: str = 'ignore'):\n\n # Download data\n if not self.parallel:\n if len(self.uri) == 1:\n ds = self.fs.open_dataset(self.uri[0])\n else:\n ds = self.fs.open_mfdataset(\n self.uri, method=\"sequential\", progress=self.progress, errors=errors\n )\n else:\n ds = self.fs.open_mfdataset(\n self.uri, method=self.parallel_method, progress=self.progress, errors=errors\n )\n\n ds = ds.rename({\"row\": \"N_POINTS\"})\n\n # Post-process the xarray.DataSet:\n\n # Set coordinates:\n coords = (\"LATITUDE\", \"LONGITUDE\", \"TIME\", \"N_POINTS\")\n ds = ds.reset_coords()\n ds[\"N_POINTS\"] = ds[\"N_POINTS\"]\n # Convert all coordinate variable names to upper case\n for v in ds.data_vars:\n ds = ds.rename({v: v.upper()})\n ds = ds.set_coords(coords)\n\n # Cast data types and add variable attributes (not available in the csv download):\n ds = ds.argo.cast_types()\n ds = self._add_attributes(ds)\n\n # More convention:\n # ds = ds.rename({'pres': 'pressure'})\n\n # Remove erddap file attributes and replace them with argopy ones:\n ds.attrs = {}\n if self.dataset_id == \"phy\":\n ds.attrs[\"DATA_ID\"] = \"ARGO\"\n elif self.dataset_id == \"ref\":\n ds.attrs[\"DATA_ID\"] = \"ARGO_Reference\"\n elif self.dataset_id == \"bgc\":\n ds.attrs[\"DATA_ID\"] = \"ARGO-BGC\"\n ds.attrs[\"DOI\"] = \"http://doi.org/10.17882/42182\"\n ds.attrs[\"Fetched_from\"] = self.erddap.server\n ds.attrs[\"Fetched_by\"] = getpass.getuser()\n ds.attrs[\"Fetched_date\"] = pd.to_datetime(\"now\").strftime(\"%Y/%m/%d\")\n ds.attrs[\"Fetched_constraints\"] = self.cname()\n ds.attrs[\"Fetched_uri\"] = self.uri\n ds = ds[np.sort(ds.data_vars)]\n\n #\n return ds", "def __init__(self, xarray_obj):\n super(XRasterBase, self).__init__(xarray_obj)", "def dataArr(filename):\r\n #Open the file\r\n f=h5py.File(filename,'r')\r\n \r\n #Initialize the data arrays\r\n cdata=[]\r\n idxset=[]\r\n vertices=[]\r\n \r\n #Open groups in the file\r\n for group in f.keys():\r\n# print('Group- '+group)\r\n \r\n #Get the group\r\n currGroup=f[group]\r\n \r\n #Open keys in the group\r\n for key in currGroup.keys():\r\n# print('Key- '+key)\r\n \r\n #Append the data to the respective arrays\r\n if key=='cdata(Complex)':\r\n cdataGroup=currGroup[key]\r\n \r\n imag=[]\r\n real=[]\r\n #Open the keys in cdata\r\n for subkey in cdataGroup.keys():\r\n# print('Subkey- '+subkey)\r\n \r\n #Get the real and imaginary parts of the array\r\n if subkey=='Imag':\r\n imag=cdataGroup[subkey][()]\r\n elif subkey=='Real':\r\n real=cdataGroup[subkey][()]\r\n \r\n #Convert lists to numpy arrays\r\n imag=np.array(imag)\r\n real=np.array(real)\r\n #Get the cdata value\r\n cdata=real+1j*imag\r\n \r\n elif key=='idxset':\r\n idxset=currGroup[key][()]\r\n elif key=='vertices':\r\n vertices=currGroup[key][()]\r\n \r\n #Remove the y component from the vertices\r\n xVals=[]\r\n yVals=[]\r\n newVertices=[]\r\n for vertex in vertices:\r\n xVals.append(vertex[0])\r\n yVals.append(vertex[2])\r\n newVertices.append([vertex[0],vertex[1]])\r\n vertices=newVertices\r\n \r\n #Convert to numpy arrays\r\n cdata=np.array(cdata)\r\n xVals=np.array(xVals)\r\n yVals=np.array(yVals)\r\n \r\n #Close the file\r\n f.close()\r\n \r\n return cdata, xVals, yVals", "def _process_data(data, band):\n\n meta = {key:value for key,value in data[0].items() if key != \"subset\" }\n meta['band'] = band\n data_dict = {'dates': [], 'arrays': [], 'metadata': meta}\n for i in data:\n for j in i['subset']:\n if j['band'] == band:\n data_dict['dates'].append(j['calendar_date'])\n data = []\n for x in j['data']:\n try:\n data.append(float(x))\n except ValueError:\n data.append(np.nan) \n data_dict['arrays'].append(np.array(data).reshape(meta['nrows'], \n meta['ncols'])) \n dtdates = [dt.datetime.strptime(d,\"%Y-%m-%d\") for d in data_dict['dates']]\n xcoordinates = ([float(meta['xllcorner'])] + \n [i * meta['cellsize'] + float(meta['xllcorner']) \n for i in range(1, meta['ncols'])])\n ycoordinates = ([float(meta['yllcorner'])] + \n [i * meta['cellsize'] + float(meta['yllcorner'])\n for i in range(1, meta['nrows'])])\n return xr.DataArray(name = band,\n data = np.flipud(np.dstack(data_dict['arrays'])),\n coords = [np.array(ycoordinates), \n np.array(xcoordinates), dtdates],\n dims = [ \"y\", \"x\", \"time\" ],\n attrs = meta)", "def netcdf_compatible_array(arry):\n arry = strip_array_wrappers(arry)\n\n if arry.ndim > 0:\n for _ in range(3):\n if arry.dtype.char != \"O\" or arry.ndim == 0:\n break\n\n if arry.shape[0] == 1:\n arry = np.array(arry[0])\n else:\n arry = np.array(tuple(arry))\n\n if \"S\" in arry.dtype.char:\n return np.char.decode(arry, \"ascii\")\n # TODO: ensure no float16, ...\n return arry", "def to_cdo_grid(self, outfile):", "def array_to_raster_noTi(x_pix,y_pix,pixSize,x_min,y_max,proj,array,outFile):\n \n\n driver = gdal.GetDriverByName('GTiff')\n\n outDataset = driver.Create(\n outFile,\n x_pix,\n y_pix,\n 1,\n gdal.GDT_Float32, )\n\n outDataset.SetGeoTransform((\n x_min, # 0 * top left border of pixel\n pixSize, # 1\n 0, # 2\n y_max, # 3 top left border of pixel\n 0, # 4\n -pixSize)) \n\n projx = osr.SpatialReference()\n projx.SetWellKnownGeogCS( proj ) #Get the long coordinate system name\n wkt_projection = projx.ExportToWkt()\n\n outDataset.SetProjection(wkt_projection)\n outDataset.GetRasterBand(1).WriteArray(array)\n outDataset.FlushCache() # Write to disk.\n return outDataset, outDataset.GetRasterBand(1) #If you need to return, remenber to return also the dataset because the band don`t live without dataset.", "def to_netcdf(self, path=None):\n if path is None:\n path = self.path\n\n for (t_i, m_i, e_i), channel in zip(self.channels(),\n range(self.n_channels)):\n dataset = xr.Dataset({'measurement': (['i'], m_i)})\n # Store meta_features, name, target in first group only\n if channel == 0:\n meta_feat_series = pd.Series(self.meta_features)\n dataset['meta_features'] = xr.DataArray(meta_feat_series,\n dims='feature')\n if self.name:\n dataset.attrs['name'] = self.name\n if self.target:\n dataset.attrs['target'] = self.target\n # If time is a 1d array, only store once (in the first group)\n if isinstance(self.time, np.ndarray) and self.time.ndim == 1:\n if channel == 0:\n dataset['time'] = (['i'], t_i)\n # Otherwise time is multi-dimensional; store it for every channel\n else:\n dataset['time'] = (['i'], t_i)\n # Same logic as above for time\n if isinstance(self.error, np.ndarray) and self.error.ndim == 1:\n if channel == 0:\n dataset['error'] = (['i'], e_i)\n else:\n dataset['error'] = (['i'], e_i)\n\n # xarray won't append to a netCDF file that doesn't exist yet\n file_open_mode = 'w' if channel == 0 else 'a'\n dataset.to_netcdf(path, group=self.channel_names[channel],\n engine='netcdf4', mode=file_open_mode)", "def data_array(self) -> xr.Dataset:\n\n xr_data = xr.open_mfdataset(self.path_to_files,\n chunks=self.chunks,\n parallel=True)\n\n if not all(x in list(xr_data.coords) for x in self.DIMS):\n xr_data = xr_data.rename({\n 'latitude': 'lat',\n 'longitude': 'lon',\n })\n\n if self.subset_dict is not None:\n print(f'Cutting data using {self.subset_dict}')\n xr_data = self.cut(xr_data)\n\n if self.season is not None:\n xr_data = xr_data.where(xr_data.time.dt.season == self.season,\n drop=True)\n\n if self.rescale_longitude is True:\n xr_data = xr_data.assign_coords(lon=(((xr_data.lon + 180) % 360) -\n 180)).sortby('lon')\n\n return xr_data", "def dask_data_to_xarray(self, df, var=None):\n\n lazy_values = [dask.delayed(df[dim].unique()) for dim in self.DIMS]\n dims_values = [future for future in dask.compute(*lazy_values)]\n shape = tuple([len(x) for x in dims_values])\n\n var_array = df[var].values\n var_array.compute_chunk_sizes()\n var_array_reshape = var_array.reshape(shape)\n tuple_data = (self.DIMS, var_array_reshape)\n\n coords_dict = dict(zip(self.DIMS, dims_values))\n #values_dicts = dict(zip(extract_vars, values_arrays))\n\n xarr = xr.DataArray(var_array_reshape, \n coords=dims_values,\n dims=self.DIMS)\n\n return xarr.sortby(['lat', 'lon'])", "def reformat(dataset):\n x = dataset[:, 1] \n x = np.stack(x) # reshape to (n, mel bands, timesteps)\n x = np.expand_dims(np.moveaxis(x, 1, -1), axis=3) # reformat x to (n, timesteps, mel bands, 1) \n y = dataset[:, 2] \n y = np.moveaxis(np.stack(y), 1, -1) # reformat y to (n, timesteps, 8)\n return x, y", "def CreateDataArray(name, shape, cDims, type):\n # Create a numpy array of ones to hold our data\n num_array = np.ndarray(shape, dtype=type, order=\"C\")\n\n z = np.asarray(num_array)\n if not z.flags.contiguous:\n z = np.ascontiguousarray(z)\n z.fill(0)\n\n shape = z.shape\n assert z.flags.contiguous, 'Only contiguous arrays are supported.'\n assert not np.issubdtype(z.dtype, np.complex128), \\\n \"Complex numpy arrays cannot be converted to vtk arrays.\"\\\n \"Use real() or imag() to get a component of the array before\"\\\n \" passing it to vtk.\"\n\n # Get the Pointer to the numpy array\n z_flat = np.ravel(z)\n \n #np.info(z)\n \n # Declare the number of components for the array\n if type == np.int8:\n array = simpl.Int8ArrayType(z_flat, cDims, name, False)\n elif type == np.uint8:\n array = simpl.UInt8ArrayType(z_flat, cDims, name, False)\n elif type == np.int16:\n array = simpl.Int16ArrayType(z_flat, cDims, name, False)\n elif type == np.uint16:\n array = simpl.UInt16ArrayType(z_flat, cDims, name, False)\n elif type == np.int32:\n array = simpl.Int32ArrayType(z_flat, cDims, name, False)\n elif type == np.uint32:\n array = simpl.UInt32ArrayType(z_flat, cDims, name, False)\n elif type == np.int64:\n array = simpl.Int64ArrayType(z_flat, cDims, name, False)\n elif type == np.uint64:\n array = simpl.UInt64ArrayType(z_flat, cDims, name, False)\n elif type == np.float32:\n array = simpl.FloatArrayType(z_flat, cDims, name, False)\n elif type == np.double:\n array = simpl.DoubleArrayType(z_flat, cDims, name, False) \n \n # we need to return the 'z' numpy array so it does not go out of scope.\n return (z, array)", "def nc_encode(ds):\n for var in data_vars:\n ds[var].encoding[\"_FillValue\"] = 1.0e20\n # ds[var].encoding[\"coordinates\"] = \"{} {}\".format(lon, lat)\n for coord in ds.coords.values():\n coord.encoding[\"_FillValue\"] = None\n for coord in [lon_vertices, lat_vertices]:\n ds[coord].encoding = {\"_FillValue\": None}\n return ds", "def make_xarray_coords(y, x, crs):\n if crs.is_geographic:\n y_attrs, x_attrs = COORD_DEFS['latitude'], COORD_DEFS['longitude']\n elif crs.is_projected:\n crs_osr = crs2osr(crs)\n units = crs_osr.GetLinearUnitsName()\n y_attrs, x_attrs = COORD_DEFS['y'], COORD_DEFS['x']\n y_attrs['units'], x_attrs['units'] = units, units\n\n y = xr.Variable(('y', ), y, attrs=y_attrs)\n x = xr.Variable(('x', ), x, attrs=x_attrs)\n\n return y, x", "def preprocess_xarray(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n args = tuple(a.metpy.unit_array if isinstance(a, xr.DataArray) else a for a in args)\n kwargs = {name: (v.metpy.unit_array if isinstance(v, xr.DataArray) else v)\n for name, v in kwargs.items()}\n return func(*args, **kwargs)\n return wrapper", "def read_mmclx_save_nc(mmclx_filename, nc_filename):\n\n # Open .mmclx file\n mmclx = xr.open_dataset(mmclx_filename)\n\n # Create .nc file with necessary variables\n ncfile = mmclx[\n [\n \"Ze\",\n \"VEL\",\n \"LDR\",\n \"RHO\",\n \"DPS\",\n \"NyquistVelocity\",\n \"nave\",\n \"nfft\",\n \"prf\",\n \"zrg\",\n \"drg\",\n ]\n ]\n # Invert order of dimensions\n ncfile = ncfile.transpose()\n\n # Convert + change attributes of time dimension\n ncfile[\"time\"] = (\n \"time\",\n pd.to_datetime(ncfile.time.values, unit=\"s\"),\n {\"long_name\": \"Time in epoch reference\", \"standard_name\": \"time\"},\n )\n # Change attributes of range dimension\n ncfile[\"range\"] = ncfile.range.assign_attrs(\n {\n \"long_name\": \"Range from antenna to the centre of each range gate\",\n \"standard_name\": \"range\",\n }\n )\n\n # Create additional instrument parameters\n ncfile[\"pulse_width\"] = float(\n ncfile.hrd[\n (ncfile.hrd.find(\"\\nPULSE_WIDTH:\") + 15) : ncfile.hrd.find(\n \"\\nRX_PULSEWIDTH:\"\n )\n ]\n )\n ncfile[\"pulse_width\"] = ncfile.pulse_width.assign_attrs(\n {\n \"long_name\": \"Pulse Width\",\n \"units\": \"s\",\n \"meta_group\": \"instrument_parameters\",\n \"standard_name\": \"hrd['PULSE_WIDTH']\",\n }\n )\n ncfile[\"prt\"] = 2e-4\n ncfile[\"prt\"] = ncfile.prt.assign_attrs(\n {\n \"long_name\": \"Pulse repetition time\",\n \"units\": \"s\",\n \"meta_group\": \"instrument_parameters\",\n \"standard_name\": \"\",\n }\n )\n ncfile[\"frequency\"] = 35e9\n ncfile[\"frequency\"] = ncfile.frequency.assign_attrs(\n {\n \"long_name\": \"Radiation Frequency\",\n \"units\": \"s^-1\",\n \"meta_group\": \"instrument_parameters\",\n \"standard_name\": \"\",\n }\n )\n ncfile[\"latitude\"] = float(ncfile.Latitude[:-1])\n ncfile[\"latitude\"] = ncfile.latitude.assign_attrs(\n {\n \"long_name\": \"Latitude\",\n \"units\": \"degrees_north\",\n \"meta_group\": \"instrument_parameters\",\n \"standard_name\": \"Latitude\",\n }\n )\n ncfile[\"longitude\"] = float(ncfile.Longitude[:-1])\n ncfile[\"longitude\"] = ncfile.longitude.assign_attrs(\n {\n \"long_name\": \"Longitude\",\n \"units\": \"degrees_east\",\n \"meta_group\": \"instrument_parameters\",\n \"standard_name\": \"Longitude\",\n }\n )\n ncfile[\"altitude\"] = float(ncfile.Altitude[:-1])\n ncfile[\"altitude\"] = ncfile.altitude.assign_attrs(\n {\n \"long_name\": \"Altitude\",\n \"units\": \"m\",\n \"meta_group\": \"instrument_parameters\",\n \"standard_name\": \"Altitude\",\n }\n )\n\n # Change names of variables\n ncfile = ncfile.rename_vars(\n {\n \"Ze\": \"filtered_reflectivity\",\n \"VEL\": \"filtered_velocity\",\n \"LDR\": \"filtered_linear_depolarization_ratio\",\n \"RHO\": \"cross_correlation_ratio\",\n \"DPS\": \"differential_phase\",\n \"NyquistVelocity\": \"nyquist_velocity\",\n \"nave\": \"n_samples\",\n \"nfft\": \"n_fft\",\n \"zrg\": \"n_range_gates\",\n \"drg\": \"range_resolution\",\n }\n )\n\n # Convert dBZ variables to dBZ\n ncfile.filtered_reflectivity.values = 10 * np.log10(\n ncfile.filtered_reflectivity.values\n )\n ncfile.filtered_linear_depolarization_ratio.values = 10 * np.log10(\n ncfile.filtered_linear_depolarization_ratio.values\n )\n\n # Change attributes of variables\n ncfile[\"filtered_reflectivity\"] = ncfile.filtered_reflectivity.assign_attrs(\n {\n \"units\": \"dBZ\",\n \"valid_range\": ncfile.filtered_reflectivity.attrs.pop(\"yrange\"),\n \"standard_name\": \"Ze\",\n \"long_name\": \"Filtered Equivalent Reflectivity Factor\",\n }\n )\n ncfile[\"filtered_velocity\"] = ncfile.filtered_velocity.assign_attrs(\n {\n \"units\": \"m * s^-1\",\n \"valid_range\": ncfile.filtered_velocity.attrs.pop(\"yrange\"),\n \"standard_name\": \"VEL\",\n \"long_name\": \"Filtered Mean Doppler Velocity\",\n }\n )\n ncfile[\n \"filtered_linear_depolarization_ratio\"\n ] = ncfile.filtered_linear_depolarization_ratio.assign_attrs(\n {\n \"units\": \"dB\",\n \"valid_range\": ncfile.filtered_linear_depolarization_ratio.attrs.pop(\n \"yrange\"\n ),\n \"standard_name\": \"LDR\",\n \"long_name\": \"Filtered Linear De-Polarization Ratio\",\n }\n )\n ncfile[\n \"cross_correlation_ratio\"\n ] = ncfile.cross_correlation_ratio.assign_attrs(\n {\n \"units\": \"unitless\",\n \"valid_range\": ncfile.cross_correlation_ratio.attrs.pop(\"yrange\"),\n \"standard_name\": \"RHO\",\n \"long_name\": \"Co-Cross Correlation Ratio\",\n }\n )\n ncfile[\"differential_phase\"] = ncfile.differential_phase.assign_attrs(\n {\n \"units\": \"degrees\",\n \"valid_range\": ncfile.differential_phase.attrs.pop(\"yrange\"),\n \"standard_name\": \"DPS\",\n \"long_name\": \"Differential Phase\",\n }\n )\n ncfile[\"nyquist_velocity\"] = ncfile.nyquist_velocity.assign_attrs(\n {\n \"units\": \"m * s^-1\",\n \"meta_group\": \"instrument_parameters\",\n \"standard_name\": \"NyquistVelocity\",\n \"long_name\": \"Nyquist Velocity\",\n }\n )\n ncfile[\"n_samples\"] = ncfile.n_samples.assign_attrs(\n {\n \"units\": \"unitless\",\n \"meta_group\": \"instrument_parameters\",\n \"standard_name\": \"nave\",\n \"long_name\": \"Number of spectral averages used to compute moments\",\n }\n )\n ncfile[\"n_fft\"] = ncfile.n_fft.assign_attrs(\n {\n \"units\": \"unitless\",\n \"meta_group\": \"instrument_parameters\",\n \"standard_name\": \"nfft\",\n \"long_name\": \"Number of FFT points\",\n }\n )\n ncfile[\"prf\"] = ncfile.prf.assign_attrs(\n {\n \"units\": \"Hz\",\n \"meta_group\": \"instrument_parameters\",\n \"standard_name\": \"prf\",\n \"long_name\": \"Pulse Repetition Frequency\",\n }\n )\n ncfile[\"n_range_gates\"] = ncfile.n_range_gates.assign_attrs(\n {\n \"units\": \"unitless\",\n \"meta_group\": \"instrument_parameters\",\n \"standard_name\": \"zrg\",\n \"long_name\": \"Number of range gates\",\n }\n )\n ncfile[\"range_resolution\"] = ncfile.range_resolution.assign_attrs(\n {\n \"units\": \"m\",\n \"meta_group\": \"instrument_parameters\",\n \"standard_name\": \"drg\",\n \"long_name\": \"Range resolution\",\n }\n )\n\n # Remove unnecessary attributes of variables\n for var in ncfile.variables:\n for attr in [\"axis\", \"db\", \"unit\"]:\n if attr in ncfile[var].attrs.keys():\n del ncfile[var].attrs[attr]\n\n # Remove unnecessary global attributes\n for attr in [\"Altitude\", \"Latitude\", \"Longitude\", \"ppar\", \"hrd\"]:\n del ncfile.attrs[attr]\n\n # Reorder variables\n ncfile = ncfile[\n [\n \"filtered_reflectivity\",\n \"filtered_velocity\",\n \"filtered_linear_depolarization_ratio\",\n \"cross_correlation_ratio\",\n \"differential_phase\",\n \"nyquist_velocity\",\n \"n_fft\",\n \"prf\",\n \"prt\",\n \"n_range_gates\",\n \"range_resolution\",\n \"n_samples\",\n \"pulse_width\",\n \"frequency\",\n \"latitude\",\n \"longitude\",\n \"altitude\",\n ]\n ]\n\n # Save .nc file\n ncfile.to_netcdf(\n nc_filename,\n unlimited_dims=\"time\",\n encoding={\"time\": {\"units\": \"seconds since 1970-01-01 00:00:00\"}},\n )", "def _prepare_data(self, coords):\n return np.array([coords])", "def npz_to_array(npzfile):\n nitems = len(npzfile.keys())\n return [npzfile['arr_%s' % i] for i in range(nitems)]", "def test_dimension_mapping(self):\n fh = NetCDF4()\n\n with tempfile.TemporaryDirectory() as tdir:\n tfile = os.path.join(tdir, 'testfile')\n before = xr.Dataset({\n \"var1\": (\"dim1\", np.arange(5)),\n \"group1/var1\": (\"group1/dim1\", np.arange(5)),\n \"group1/var2\": (\"group1/dim2\", np.arange(5)),\n \"group1/subgroup1/var1\":\n (\"group1/subgroup1/dim1\", np.arange(5)),\n \"group1/subgroup1/var2\":\n (\"group1/subgroup1/dim2\", np.arange(5)),\n \"group2/var1\": (\"group2/dim1\", np.arange(5)),\n \"group2/subgroup1/var1\":\n (\"group1/subgroup1/dim1\", np.arange(5)),\n \"group3/var1\": (\"group3/dim1\", np.arange(10)),\n }, coords={\n \"dim1\": (\"dim1\", np.arange(5)),\n \"group1/dim1\": (\"group1/dim1\", np.arange(5))\n })\n\n # Save the dataset and load it again:\n fh.write(before, tfile)\n after = fh.read(tfile)\n\n # How it should be after loading:\n check = xr.Dataset({\n \"var1\": (\"dim1\", np.arange(5)),\n \"group1/var1\": (\"group1/dim1\", np.arange(5)),\n \"group1/var2\": (\"group1/dim2\", np.arange(5)),\n \"group1/subgroup1/var1\": (\"group1/dim1\", np.arange(5)),\n \"group1/subgroup1/var2\": (\"group1/dim2\", np.arange(5)),\n \"group2/var1\": (\"dim1\", np.arange(5)),\n \"group2/subgroup1/var1\": (\"dim1\", np.arange(5)),\n \"group3/var1\": (\"group3/dim1\", np.arange(10)),\n }, coords={\n \"dim1\": (\"dim1\", np.arange(5)),\n \"group1/dim1\": (\"group1/dim1\", np.arange(5))\n })\n\n assert after.equals(check)", "def write_merged_file(self):\n \n #out_name = os.getcwd() + '/FAST_INDEX_merged_' + [ x for x in self.datasets[ list(self.datasets_keys)[0]].split('/') if '.nc' in x ] [0] \n \n \"\"\" Loading the econding of variables created from the harvester script \"\"\"\n encodings = np.load('groups_encodings.npy' , allow_pickle = True ).item()\n \n if not os.path.isdir(self.out_dir):\n Path(self.out_dir).mkdir(parents=True, exist_ok=True)\n \n out_name = self.out_dir + '/' + self.station + '_CEUAS_merged_v0.nc' \n \n logging.info('Writing the observations_tables to the netCDF output via xarray to_netcdf() ')\n #obs_tab = self.MergedObs[ ['date_time' , 'latitude', 'longitude' , 'observation_value' , 'observed_variable' , 'source_id' , 'observation_id', 'z_coordinate' ] ] # including only some columns \n obs_tab = self.MergedObs # including only some columns \n obs_tab = self.add_cdm_missing_columns(obs_tab) \n \n \"\"\" \n # Old using xarray\n obs_tab = obs_tab.to_xarray() \n for v in obs_tab.variables:\n if v == \"index\" or v == \"hdrlen\" or 'string' in v:\n continue\n obs_tab[v].attrs['external_table'] = self.attributes['observations_table'][v]['external_table']\n obs_tab[v].attrs['description'] = self.attributes['observations_table'][v]['description']\n \"\"\"\n\n for k in obs_tab.columns:\n print('Writing the observation table using h5py new method for the variable: ' , k )\n df = obs_tab[ [k] ] # making a 1 column dataframe \n write_dict_h5(out_name, df, k, encodings['observations_table'], var_selection=[], mode='a', attrs={'date_time':('units','seconds since 1900-01-01 00:00:00')})\n \n #obs_tab.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='w' , group = 'observations_table') # writing the merged observations_table \n \n \n \n logging.info('Writing the header_table to the netCDF output via xarray ')\n head_tab = self.MergedHead.to_xarray()\n for v in head_tab.variables: \n if v == \"index\" or v == \"hdrlen\" or v == \"string80\":\n continue\n head_tab[v].attrs['external_table'] = self.attributes['header_table'][v]['external_table']\n head_tab[v].attrs['description'] = self.attributes['header_table'][v]['description']\n \n head_tab.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = 'header_table') # writing the merged observations_table \n \n \n \n logging.info('Writing the station_configuration and source_configurations tables to the netCDF output via xarray ') \n for k in self.data.keys():\n if k == 'cdm_tables':\n continue \n group_name = k + '_station_configuration'\n sc = self.data[k]['station_configuration'].to_xarray()\n sc.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = group_name )\n \n group_name = k + '_source_configuration'\n sc = self.data[k]['source_configuration'].to_xarray()\n sc.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = group_name )\n \n \"\"\" To be fixed ! \"\"\"\n #group_name = k + '_source_configuration'\n #sc = self.data[k]['source_configuration'][:1].to_xarray()\n #sc.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = group_name ) \n \n logging.info('Writing the merged record indices to the netCDF output ') \n di = self.MergedRecordIndex\n di.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a')\n \n logging.info('Writing the merged feedback to the netCDF output ') \n group_name = 'era5fb' \n di = self.MergedFeedback\n di = di.to_xarray()\n di.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = group_name )\n \n logging.info('Writing the standard cdm tables to the netCDF output ') \n for t in self.data['cdm_tables'].keys(): \n d = self.data['cdm_tables'][t]\n d.to_netcdf(out_name, format='netCDF4', engine='h5netcdf', mode='a' , group = t )\n \n logging.info('*** Done writing the output netCDF file ')", "def get_data():\r\n spatial_expmat = np.load('/home/anniegao/spatial_magan/data/spatial_pca_with_coords.npz')['arr_0']\r\n spatial_expmat[:,100:] *= 5\r\n rna_expmat = np.load('/home/anniegao/spatial_magan/data/rna_pca_sampled.npz')['arr_0']\r\n spatial_pca_components = np.load('/home/anniegao/spatial_magan/data/spatial_pca_100components.npz')['arr_0']\r\n rna_pca_components = np.load('/home/anniegao/spatial_magan/data/rna_pca_100components.npz')['arr_0']\r\n spatial_cluster_labels = np.load('/home/anniegao/spatial_magan/data/spatial_cluster_3_labels_phate.npz')['arr_0']\r\n rna_cluster_labels = np.load('/home/anniegao/spatial_magan/data/rna_cluster_5_labels_sampled.npz')['arr_0']\r\n return spatial_expmat, rna_expmat, spatial_pca_components, rna_pca_components, spatial_cluster_labels, rna_cluster_labels", "def to_netcdf(self, outfile):", "def to_xarray(self, searchString, remove_grib=True):\n print('nothing here yet')\n pass", "def saveDailyBlobs():\n\n msgfile = '/users/global/cornkle/MCSfiles/blob_map_allscales_-50_JJAS_points_dominant.nc'\n msg = xr.open_dataarray(msgfile)\n\n # def first_nozero(array_like, axis):\n # array_like[array_like<16]= array_like[array_like<16]+24\n # return np.nanmin(array_like,axis=axis)\n\n msg.values[msg.values > 75] = np.nan\n msg.values[msg.values == 0] = np.nan\n\n for m in msg:\n if m['time.hour'].values >= 16:\n m.values[m > 0] = m['time.hour'].values\n else:\n m.values[m > 0] = m['time.hour'].values+24\n\n ### this is useful, it removes all pixels which got rain twice on a day\n md = msg.resample('24H', base=16, dim='time', skipna=True, how='min')\n\n md = md[(md['time.month'] >=6) & (md['time.month'] <=9)]\n\n md.values[md.values>23] = md.values[md.values>23]-24\n\n md.to_netcdf('/users/global/cornkle/MCSfiles/blob_map_allscales_-50_JJAS_points_dominant_daily.nc')", "def dcm_to_npy(folder, start=1, stop=280, mid_slice=190, dim=120, energies=['40kVp', '80kVp'],\n load_directory=r'D:\\OneDrive - University of Victoria\\Research/CBCT/',\n save_directory=r'D:\\OneDrive - University of Victoria\\Research/CBCT/'):\n path = load_directory + folder + '/'\n save_path = save_directory + folder + '/'\n\n # Create the folder in the save_directory\n gof.create_folder(folder_name=folder, directory_path=save_directory)\n\n # Create the 'RawMatrices' folder\n gof.create_folder(folder_name='RawMatrices', directory_path=save_path)\n\n save_path = save_path + 'RawMatrices/'\n\n # Save each slice as .npy matrix\n for energy in energies:\n\n dirs3 = os.listdir(save_path)\n\n # Create the energy folder in the RawMatrices folder\n gof.create_folder(folder_name=energy, directory_path=save_path)\n\n save_path = save_path + energy + '/'\n\n # Sub file path\n subpath = energy + '/Mouse_Cropped.xst/'\n # Load the mid_slice view to find the edges of the phantom\n data = pyd.dcmread(path + subpath + 'volume0' + mid_slice + '.dcm')\n s6 = data.pixel_array\n\n # Center the image for cropping\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.imshow(s6)\n ax.set_title('Click the edges of the phantom in the order: top, bottom, left, right. '\n '\\n Left-click: add point, Right-click: remove point, Enter: stop collecting')\n\n # Array to hold the coordinates of the center of the ROI and its radius\n # Left-click to add point, right-click to remove point, press enter to stop collecting\n # First point\n coords = plt.ginput(n=-1, timeout=-1, show_clicks=True)\n coords = np.array(coords)\n coords = np.round(coords, decimals=0)\n top = coords[0]\n bottom = coords[1]\n left = coords[2]\n right = coords[3]\n # Round the center coordinates to index numbers\n x = int(round((right[0]+left[0])/2))\n y = int(round((bottom[1]+top[1])/2))\n\n for i in np.arange(start, stop):\n if i < 10:\n filename = 'volume000' + str(i) + '.dcm'\n savename = 'volume000' + str(i) + '.npy'\n elif i < 100 and i >= 10:\n filename = 'volume00' + str(i) + '.dcm'\n savename = 'volume00' + str(i) + '.npy'\n else:\n filename = 'volume0' + str(i) + '.dcm'\n savename = 'volume0' + str(i) + '.npy'\n\n # Crop image\n crop = dim/2\n data = pyd.dcmread(path+subpath+filename)\n matrix = data.pixel_array\n matrix = matrix[y-crop:y+crop, x-crop:x+crop]\n np.save(save_path+savename, matrix)", "def createncfile(dz_id,t,x,z):\n db = labdb.LabDB()\n #create the directory in which to store the nc file\n sql = \"\"\"INSERT into dn2t (dz_id) VALUES (%d)\"\"\" % (dz_id) \n db.execute(sql)\n sql = \"\"\"SELECT LAST_INSERT_ID()\"\"\" \n rows = db.execute(sql)\n dn2t_id = rows[0][0]\n dn2t_path = \"/Volumes/HD4/dn2t/%d\" % dn2t_id \n os.mkdir(dn2t_path)\n\n dn2t_filename = os.path.join(dn2t_path,\"dn2t.nc\")\n print(\"d(N2)/dt filename : \",dn2t_filename)\n\n\n # Declare the nc file for the first time\n nc = netCDF4.Dataset(dn2t_filename,'w',format = 'NETCDF4')\n row_dim = nc.createDimension('row',964)\n col_dim = nc.createDimension('column',1292)\n lenT=t.shape[0] #lenT is the length of the dn2t file.Its 1 element shorter in time axis than deltaN2\n print(\"time axis length\",lenT) # debug info\n t_dim = nc.createDimension('time',lenT)\n\n # Dimensions are also variable\n ROW = nc.createVariable('row',numpy.float32,('row'))\n print(list(nc.dimensions.keys()), ROW.shape,ROW.dtype)\n COLUMN = nc.createVariable('column',numpy.float32,('column'))\n print(list(nc.dimensions.keys()) , COLUMN.shape, COLUMN.dtype)\n TIME = nc.createVariable('time',numpy.float32,('time'))\n print(list(nc.dimensions.keys()) ,TIME.shape, TIME.dtype)\n\n # declare the 3D data variable \n dn2t = nc.createVariable('dn2t_array',numpy.float32,('time','row','column'))\n print(list(nc.dimensions.keys()) ,dn2t.shape,dn2t.dtype)\n\n # assign the values\n TIME[:] = t\n ROW[:] = z\n COLUMN[:] = x\n\n nc.close()\n db.commit()\n return dn2t_id,dn2t_filename", "def shortarray_to_array(self, x):\n n_fit_p = len(self.fit_parameters)\n n_nui_p = len(self.nuisance_parameters)\n n_wc = len(self.fit_wc_names)\n arr = np.zeros(n_fit_p + n_nui_p + n_wc)\n arr[:n_fit_p] = x[:n_fit_p]\n arr[n_fit_p:n_fit_p+n_nui_p] = self.get_central_nuisance_parameters\n arr[n_fit_p+n_nui_p:] = x[n_fit_p:]\n return arr", "def _unpack_drex_rdata(self, data, ngr):\n\n self.g_ol = np.copy(data[0:3*3*ngr].reshape((3,3,ngr)).T)\n self.g_en = np.copy(data[3*3*ngr:2*3*3*ngr].reshape((3,3,ngr)).T)\n self.volfrac_ol = np.copy(data[2*3*3*ngr:2*3*3*ngr+ngr])\n self.volfrac_en = np.copy(data[2*3*3*ngr+ngr:2*3*3*ngr+2*ngr])\n self.fraction_olivine = np.copy(data[3*3*ngr*2+ngr*4+10])", "def copy_nc_attrs(src, dest):\n with xarray.open_dataset(src) as s:\n attrs = s.attrs\n # Write empty root dataset with attributes\n ds = xarray.Dataset(attrs=attrs)\n ds.to_netcdf(dest, mode=\"a\")", "def nctoflt(ncfile, fltstem, varname, iz=0):\n\n ncobj = nh.nc3_open(ncfile,'r')\n a = ncobj.variables[varname]\n # Copy out into a numpy array and make sure we have only\n # 2 dimensions and type float32.\n b = numpy.float32(ncobj.variables[varname])\n if len(b.shape) < 2 or len(b.shape) > 3:\n raise ValueError(\"Only 2D and 3D data allowed (not \"+len(b.shape)+\"D)\")\n if len(b.shape) == 3:\n b = numpy.float32(b[iz,::,::].reshape(b.shape[1], b.shape[2]))\n fillValue = numpy.float32(ncobj.variables[varname]._FillValue)\n\n latvec = ncobj.variables['latitude']\n lonvec = ncobj.variables['longitude']\n lat1 = latvec[0]\n lat2 = latvec[len(latvec)-1]\n # Reverse if latitude runs South to North\n if lat1 < lat2:\n x = lat2\n lat2 = lat1\n lat1 = x\n b = b[::-1,]\n lon1 = lonvec[0]\n lon2 = lonvec[len(lonvec)-1]\n \n dlat = abs(lat1-lat2)/(len(latvec)-1)\n dlon = abs(lon2-lon1)/(len(lonvec)-1)\n xll = lon1-dlon*0.5\n yll = lat2-dlat*0.5\n\n\n fltname = fltstem+'.flt'\n if os.path.exists(fltname): os.unlink(fltname)\n b.tofile(fltname)\n f = file(fltstem+\".hdr\",\"w\")\n \n f.write(\"ncols %d\\n\" % b.shape[1])\n f.write(\"nrows %d\\n\" % b.shape[0])\n f.write(\"xllcorner %f\\n\" % xll)\n f.write(\"yllcorner %f\\n\" % yll)\n f.write(\"cellsize %f\\n\" % dlon)\n f.write(\"NODATA_value %f\\n\" % fillValue)\n if sys.byteorder == \"little\":\n f.write(\"byteorder LSBFIRST\\n\")\n else:\n f.write(\"byteorder LSBLAST\\n\")\n f.close()\n attr = nh.nc3_get_attributes(ncobj)\n nh.nc3_close(ncobj)\n return attr", "def order_indexes(dataarray: xr.DataArray, index_list: list) -> np.ndarray:\n\n dim_list = list(dataarray.dims)\n print(\"index_list\", index_list)\n print(\"list(dataaray.dims)\", dim_list)\n init_list = []\n\n for dim in dim_list:\n init_list.append(index_list.index(dim))\n\n print(\"init_list\", init_list)\n fin_list = list(range(len(dim_list)))\n dataarray_values = np.moveaxis(dataarray.values, init_list, fin_list)\n\n return dataarray_values", "def _readXarrayFile(self, var_ids=None, exclude_vars=None, exclude_bounds=True):\r\n exclude_vars = exclude_vars or []\r\n\r\n ds = xr.open_dataset(self.nc_file, use_cftime=True, decode_timedelta=False)\r\n xr_variables = []\r\n\r\n # Make sure var_ids is a list\r\n if isinstance(var_ids, str):\r\n var_ids = [var_ids]\r\n\r\n # Identify bounds variables\r\n bounds_vars = {ds[var_id].attrs.get(\"bounds\", None) for var_id in ds.variables}\r\n\r\n if None in bounds_vars:\r\n bounds_vars.remove(None)\r\n\r\n for var_id in ds.variables:\r\n if var_ids == None or var_id in var_ids:\r\n\r\n # Process required variables\r\n if not fuzzy_contains(var_id, exclude_vars):\r\n if exclude_bounds and var_id in bounds_vars:\r\n continue\r\n\r\n da = ds[var_id]\r\n\r\n # Check whether singleton variable, if so create variable \r\n if hasattr(da, \"shape\") and da.shape == ():\r\n # Test type of the data to convert \r\n data_value = get_rank_zero_array_value(da.values) \r\n da = xr.DataArray(np.array(data_value), name=da.name, attrs=da.attrs)\r\n\r\n xr_variables.append(da)\r\n\r\n global_attrs = ds.attrs.items()\r\n return (xr_variables, global_attrs)", "def build_raw_xy_data(params, fold, sub_list):\n # Some repeated code from load_data : not super smart\n X = get_raw_x_data(params, fold, subject_list=sub_list)\n XZ = np.array(X)\n Y = []\n if params[\"data_source\"] == \"ABIDE\":\n classified_file = open(\n \"/scratch/mmahaut/scripts/INT_fMRI_processing/url_preparation/subs_list_asd_classified.json\"\n ) # Hardwriten non-modifiable paths in script is bad practice. modify later !\n classified_dict = json.load(classified_file)\n # no normalisation step (which kind of seems legit for classification)\n for key in classified_dict:\n Y.append(1 if classified_dict[key] == \"asd\" else 0)\n elif params[\"data_source\"] == \"interTVA\":\n # Hardcoding this array is probably not the most reusable solution...\n # Error 1 found on 30/07/2020 : bad correspondance between subject file and hardcoded Y,\n # subjects in subject file were not in the same order\n Y = [\n 81.25,\n 81.25,\n 93.75,\n 93.75,\n 93.75,\n 62.5,\n 81.25,\n 100,\n 100,\n 87.5,\n 87.5,\n 68.75,\n 68.75,\n 87.5,\n 93.75,\n 100,\n 62.5,\n 87.5,\n 93.75,\n 87.5,\n 81.25,\n 81.25,\n 81.25,\n 93.75,\n 50,\n 62.5,\n 93.75,\n 81.25,\n 81.25,\n 87.5,\n 68.75,\n 81.25,\n 87.5,\n 87.5,\n 87.5,\n 75,\n 93.75,\n 93.75,\n 93.75,\n ]\n x = np.array(Y)\n YZ = (x - min(x)) / (max(x) - min(x))\n return XZ, YZ", "def to_xarray(self, **kwargs):\n if not self.fetcher:\n raise InvalidFetcher(\n \" Initialize an access point (%s) first.\"\n % \",\".join(self.Fetchers.keys())\n )\n xds = self.fetcher.to_xarray(**kwargs)\n xds = self.postproccessor(xds)\n return xds", "def hdr_to_Nifti(files):\r\n array = []\r\n for element in files:\r\n array = np.append(array, nib.load(element))\r\n\r\n print('array size: ', array.shape, '\\narray type: ', type(array))\r\n\r\n return array", "def fits_to_nparray(file):\n hdu_list = fits.open(file)\n image_data = hdu_list[0].data\n image_data=image_data.astype(np.uint16)\n \n gdal_array.SaveArray(image_data, file[:-5]+\".tif\")\n \n return image_data", "def createnc(ncfout,xlat,xlon,times=None,zvals=None,wsvals=None,\\\n wdvals=None,olvals=None,attbts=None,ftype=\"timeseries\",dims=[7,180,180]):\n nc_out=nc.Dataset(ncfout,'w',clobber=True)\n\n # Set Attributes to the File\n if attbts is not None:\n final_attbts={}\n # Define projection\n proj_lcc = pj_lcc = Proj(\"+proj=lcc +lat_1={TRUELAT1} +lat_2={TRUELAT2} +lat_0={MOAD_CEN_LAT} +lon_0={STAND_LON} +x_0=0 +y_0=0 +a=6370000 +b=6370000\".format(**attbts))\n\n # Get x&y of domain center\n xcen, ycen = pj_lcc(attbts['CEN_LON'], attbts['CEN_LAT'])\n\n for key in attbts:\n if str(key).find(\"STAG\") <= 0 : # Remove Staggered Grid Information\n final_attbts.update({key:attbts[key]})\n nc_out.setncatts(final_attbts)\n # Create a CRS Variable for the Projection (GIS Readability)\n crsv=nc_out.createVariable('crs','c')\n crsv.semi_major_axis = 6370000.0\n crsv.inverse_flattening = 0.0\n crsv.grid_mapping_name = \"lambert_conformal_conic\"\n crsv.longitude_of_central_meridian = attbts[\"STAND_LON\"]\n crsv.false_easting = 0.0\n crsv.false_northing = 0.0\n crsv.latitude_of_projection_origin = attbts[\"MOAD_CEN_LAT\"]\n crsv.standard_parallel = [attbts[\"TRUELAT1\"],attbts[\"TRUELAT2\"]]\n crsv.longitude_of_prime_meridian = 0.0\n crsv.proj = proj_lcc.srs\n\n\n\n # Override Institution and Experiment\n nc_out.INSTITUTION=INSTITUTION\n nc_out.EXPERIMENT=EXPERIMENT\n nc_out.Conventions=\"CF-1.6\"\n\n # Create Dimensions First\n if ftype==\"timeseries\":\n nc_out.TITLE='Timeseries of the New European Wind Atlas from WRF V3.8.1'\n nc_out.createDimension('time',None)\n nc_out.createDimension('DateStrLen',19)\n nc_out.createDimension('height',dims[0])\n nc_out.createDimension('south_north',dims[1])\n nc_out.createDimension('west_east',dims[2])\n # Create Time Vector as Integer\n timesn = nc_out.createVariable('time','i8',('time',))\n timesn.units = \"minutes since 1900-01-01 00:00:00.0\"\n timesn.calendar = \"gregorian\"\n timesn.long_name = \"Time\"\n timesn.standard_name = \"time\"\n timesn[:] = nc.date2num(createdatv(times),units=timesn.units,calendar=timesn.calendar)\n # Create additional Time Vector as Character\n timesc = nc_out.createVariable('Times', 'c', ('time','DateStrLen'))\n timesc.format = \"YYYY-MM-DD_HH:MM:SS\"\n timesc.long_name = \"Time\"\n timesc[:] = times[:]\n # Height\n hgts = nc_out.createVariable('height','f4',('height',))\n hgts.units=\"m\"\n hgts.long_name=\"Height above Ground\"\n hgts.standard_name=\"height\"\n hgts[:] = zvals\n # y\n south_north = nc_out.createVariable('south_north','f4',('south_north',))\n south_north.long_name = \"y-coordinate in Cartesian system\"\n south_north.units = \"m\"\n\n dy = attbts[\"DY\"]\n ny = attbts[\"SOUTH-NORTH_PATCH_END_UNSTAG\"]\n ymin = ycen - dy * (ny - 1) / 2\n s_n = np.linspace(0, ny-1, ny) * dy + ymin\n south_north[:] = s_n\n\n # x\n west_east = nc_out.createVariable('west_east','f4',('west_east',))\n west_east.long_name = \"x-coordinate in Cartesian system\"\n west_east.units = \"m\"\n\n dx = attbts[\"DX\"]\n nx = attbts[\"WEST-EAST_PATCH_END_UNSTAG\"]\n xmin = xcen - dx * (nx - 1) / 2\n e_w = np.linspace(0, nx-1, nx) * dx + xmin\n west_east[:] = e_w\n\n elif ftype==\"roughness\":\n nc_out.title='NEWA Roughness'\n nc_out.createDimension('south_north',dims[0])\n nc_out.createDimension('west_east',dims[1])\n\n elif ftype==\"tabfile\":\n nc_out.title='NEWA WasP Tab File'\n nc_out.createDimension('south_north',dims[0])\n nc_out.createDimension('west_east',dims[1])\n nc_out.createDimension('sector',dims[2])\n nc_out.createDimension('wind',dims[3])\n nc_out.createDimension('stab',dims[4])\n\n # Wind Speed Class\n wscl = nc_out.createVariable('wspdCl','f4',('wind',))\n wscl.units=\"ms-1\"\n wscl.long_name=\"Velocity of bin centre\"\n wscl[:] = wsvals\n\n # Wind Speed Class\n wdcl = nc_out.createVariable('wdirCl','f4',('sector',))\n wdcl.units=\"ms-1\"\n wdcl.long_name=\"Velocity of bin centre\"\n wdcl[:] = wdvals\n\n # Stability\n lcl = nc_out.createVariable('Ltypical','f4',('stab',))\n lcl.units=\"m\"\n lcl.long_name=\"L typical\"\n lcl[:] = olvals\n\n # Lat and Lon\n lats = nc_out.createVariable(\"XLAT\", 'f4', ('south_north','west_east'), zlib=True,complevel=9)\n lats[:] = xlat[:]\n lats.units=\"degree_north\"\n lats.long_name=\"Center Latitude of Grid Cell\"\n lats.standard_name=\"latitude\"\n lons = nc_out.createVariable(\"XLON\", 'f4', ('south_north','west_east'), zlib=True,complevel=9)\n lons[:] = xlon[:]\n lons.units=\"degree_east\"\n lons.long_name=\"Center Longitude of Grid Cell\"\n lons.standard_name=\"longitude\"\n nc_out.close()\n return(None)", "def compress_netcfd(folder_path, start_date, out_folder, file_name, num_of_rivids):\n\n # Based on 15 day forecast\n forecast_day_indices = np.array([0, 8, 16, 24, 32, 40, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84], dtype=np.int8)\n\n # Based on 10 day forecast\n # Excluding the first day because we already have initialization from the normal forecasts\n high_res_forecast_day_indices = np.array([24, 48, 72, 92, 100, 108, 112, 116, 120, 124])\n\n start_datetime = to_datetime(start_date, infer_datetime_format=True)\n dates = date_range(start_datetime + DateOffset(1), periods=15)\n high_res_dates = date_range(start_datetime + DateOffset(1), periods=10)\n\n # Ensemble Dimensions\n # 1) Rivid\n # 2) Number of forecast days (i.e. 15 in a 15 day forecast)\n # 3) Number of ensembles\n\n ensembles = np.zeros((num_of_rivids, 15, 51), dtype=np.float32)\n initialization = np.zeros((num_of_rivids,), dtype=np.float32)\n\n for forecast_number in range(1, 52):\n file = os.path.join(folder_path, \"{}_{}.nc\".format(file_name, forecast_number))\n\n tmp_dataset = xr.open_dataset(file)\n streamflow = tmp_dataset['Qout'].data\n streamflow = streamflow[:, forecast_day_indices]\n\n if forecast_number == 1:\n initialization[:] = streamflow[:, 0]\n rivids = tmp_dataset['rivid'].data\n lat = tmp_dataset['lat'].data\n lon = tmp_dataset['lon'].data\n z = tmp_dataset['z'].data\n\n ensembles[:, :, forecast_number - 1] = streamflow[:, 1:]\n\n tmp_dataset.close()\n\n # High Res Forecast\n file = os.path.join(folder_path, \"{}_52.nc\".format(file_name))\n\n tmp_dataset = xr.open_dataset(file)\n\n high_res_forecast_data = tmp_dataset[\"Qout\"].data\n high_res_forecast_data = high_res_forecast_data[:, high_res_forecast_day_indices]\n\n tmp_dataset.close()\n\n data_variables = {\n \"Qout\": (['rivid', 'date', 'ensemble_number'], ensembles),\n \"Qout_high_res\": (['rivid', 'date_high_res'], high_res_forecast_data)\n }\n\n coords = {\n 'rivid': rivids,\n 'date': dates,\n 'date_high_res': high_res_dates,\n 'ensemble_number': np.arange(1, 52, dtype=np.uint8),\n 'initialization_values': ('rivid', initialization),\n 'lat': ('rivid', lat),\n 'lon': ('rivid', lon),\n 'z': ('rivid', z),\n 'start_date': start_datetime\n }\n\n xarray_dataset = xr.Dataset(data_variables, coords)\n xarray_dataset.to_netcdf(path=os.path.join(out_folder, '{}.nc'.format(start_date)), format='NETCDF4')", "def __repackDataArrays(cH5, format, log):\n SORTED = \"Sorted\"\n\n alnGroups = [x for x in cH5[format.ALN_GROUP_PATH]]\n pulseDatasets = [cH5[x].keys() for x in alnGroups]\n uPulseDatasets = reduce(lambda x,y: set.union(set(x), set(y)), pulseDatasets)\n if (not all(map(lambda x : set(x) == uPulseDatasets, pulseDatasets))):\n log.error(\"All alignment groups need to have the same datasets.\")\n raise Exception(\"Can only repack cmp.h5 files with consistent datasets across alignment groups.\")\n\n readGroupPaths = dict(zip(cH5[format.ALN_GROUP_ID], [ x for x in cH5[format.ALN_GROUP_PATH]]))\n refGroupPaths = dict(zip(cH5[format.REF_GROUP_ID], [ x for x in cH5[format.REF_GROUP_PATH]]))\n uPDAndType = dict(zip(uPulseDatasets, [ cH5[readGroupPaths.values()[0]][z].dtype for z in uPulseDatasets ]))\n\n def getDataset(read, ds):\n return(cH5[readGroupPaths[read[format.ALN_ID]]][ds])\n\n def getRefGroup(gID):\n return(cH5[refGroupPaths[gID]])\n\n offsets = cH5[format.REF_OFFSET_TABLE].value\n sAI = cH5[format.ALN_INDEX]\n orderedRefPaths = [\"\"] * offsets.shape[0]\n\n for row in xrange(0, offsets.shape[0]):\n log.msg(\"Processing reference group: %d of %d\" % (row + 1, offsets.shape[0]))\n orderedRefPaths[row] = \"/\".join([getRefGroup(offsets[row, 0]).name, SORTED])\n\n fRow = int(offsets[row, 1])\n lRow = int(offsets[row, 2])\n \n ## Don't really have to do anything if there are no references\n ## which aligned.\n if (lRow == fRow):\n continue \n\n ## Make a new Group.\n newGroup = getRefGroup(offsets[row, 0]).create_group(SORTED)\n log.msg(\"Created new read group: %s\" % SORTED)\n\n ## Go through each read and write it into the new vector.\n reads = sAI[fRow:lRow, ]\n totalSizes = reads[:, format.OFFSET_END] - reads[:, format.OFFSET_BEGIN]\n for pulseDataset in uPulseDatasets: \n log.msg(\"Processing dataset: %s\" % pulseDataset)\n newDS = array([0]*sum(1 + totalSizes), dtype = uPDAndType[pulseDataset])\n currentStart = 0\n for readIdx in xrange(0, reads.shape[0]):\n read = reads[readIdx, ]\n gStart, gEnd = currentStart, currentStart + totalSizes[readIdx]\n newDS[gStart:gEnd] = getDataset(read, pulseDataset)[read[format.OFFSET_BEGIN]:read[format.OFFSET_END]]\n currentStart = gEnd + 1\n newGroup.create_dataset(pulseDataset, data = newDS, dtype = uPDAndType[pulseDataset], maxshape = None)\n \n ## After we've moved all of the data we can move the offsets.\n currentStart = 0\n for i in xrange(0, reads.shape[0]):\n reads[i, format.OFFSET_BEGIN] = currentStart\n reads[i, format.OFFSET_END] = currentStart + totalSizes[i]\n reads[i, format.ALN_ID] = row \n currentStart = reads[i, format.OFFSET_END] + 1\n sAI[fRow:lRow,] = reads\n\n \n ## Now remake the AlnGroup Dataset.\n log.msg(\"Writing new AlnGroupPath values.\")\n del(cH5[format.ALN_GROUP_PATH])\n del(cH5[format.ALN_GROUP_ID])\n cH5.create_dataset(format.ALN_GROUP_PATH, data = orderedRefPaths, \n dtype = H5.new_vlen(str), maxshape = None)\n cH5.create_dataset(format.ALN_GROUP_ID, data = range(0, offsets.shape[0]),\n dtype = \"int32\", maxshape = None)\n for rg in readGroupPaths.values():\n del(cH5[rg])", "def save_correlation_matrix(ldl_corrs, events, output):\n\n corrs_temp = xr.DataArray(data=ldl_corrs, dims=('events1', 'events2'), coords={'events1': events, 'events2': events})\n corrs_temp.to_netcdf(output)\n del corrs_temp", "def to_xarray(self, **kwargs):\n if self._AccessPoint not in self.valid_access_points:\n raise InvalidFetcherAccessPoint(\n \" Initialize an access point (%s) first.\"\n % \",\".join(self.Fetchers.keys())\n )\n return self.load().index.to_xarray(**kwargs)", "def xr_array_to_dataset(data, names, units):\n # Create an empty dataset\n ds_new = xr.Dataset()\n \n # Insert the coordinates\n ds_new.coords[names[0]] = data[:,0]\n ds_new.coords[names[0]].attrs['units'] = units[0]\n \n # Insert the rest of the data\n for i in range(1, len(names)):\n ds_new[names[i]] = ((names[0]), data[:,i])\n ds_new[names[i]].attrs['units'] = units[i]\n \n return ds_new", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_3I().pack(_x.x.header.seq, _x.x.header.stamp.secs, _x.x.header.stamp.nsecs))\n _x = self.x.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_7d3I().pack(_x.x.pose.position.x, _x.x.pose.position.y, _x.x.pose.position.z, _x.x.pose.orientation.x, _x.x.pose.orientation.y, _x.x.pose.orientation.z, _x.x.pose.orientation.w, _x.x_desi.header.seq, _x.x_desi.header.stamp.secs, _x.x_desi.header.stamp.nsecs))\n _x = self.x_desi.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_7d3I().pack(_x.x_desi.pose.position.x, _x.x_desi.pose.position.y, _x.x_desi.pose.position.z, _x.x_desi.pose.orientation.x, _x.x_desi.pose.orientation.y, _x.x_desi.pose.orientation.z, _x.x_desi.pose.orientation.w, _x.x_desi_filtered.header.seq, _x.x_desi_filtered.header.stamp.secs, _x.x_desi_filtered.header.stamp.nsecs))\n _x = self.x_desi_filtered.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_31d().pack(_x.x_desi_filtered.pose.position.x, _x.x_desi_filtered.pose.position.y, _x.x_desi_filtered.pose.position.z, _x.x_desi_filtered.pose.orientation.x, _x.x_desi_filtered.pose.orientation.y, _x.x_desi_filtered.pose.orientation.z, _x.x_desi_filtered.pose.orientation.w, _x.x_err.linear.x, _x.x_err.linear.y, _x.x_err.linear.z, _x.x_err.angular.x, _x.x_err.angular.y, _x.x_err.angular.z, _x.xd.linear.x, _x.xd.linear.y, _x.xd.linear.z, _x.xd.angular.x, _x.xd.angular.y, _x.xd.angular.z, _x.xd_desi.linear.x, _x.xd_desi.linear.y, _x.xd_desi.linear.z, _x.xd_desi.angular.x, _x.xd_desi.angular.y, _x.xd_desi.angular.z, _x.F.force.x, _x.F.force.y, _x.F.force.z, _x.F.torque.x, _x.F.torque.y, _x.F.torque.z))\n length = len(self.tau_pose)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.tau_pose.tostring())\n length = len(self.tau_posture)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.tau_posture.tostring())\n length = len(self.tau)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.tau.tostring())\n length = len(self.J.layout.dim)\n buff.write(_struct_I.pack(length))\n for val1 in self.J.layout.dim:\n _x = val1.label\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1\n buff.write(_get_struct_2I().pack(_x.size, _x.stride))\n buff.write(_get_struct_I().pack(self.J.layout.data_offset))\n length = len(self.J.data)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.J.data.tostring())\n length = len(self.N.layout.dim)\n buff.write(_struct_I.pack(length))\n for val1 in self.N.layout.dim:\n _x = val1.label\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1\n buff.write(_get_struct_2I().pack(_x.size, _x.stride))\n buff.write(_get_struct_I().pack(self.N.layout.data_offset))\n length = len(self.N.data)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(self.N.data.tostring())\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def array_to_raster(array, x, y):\n\n # Files info\n dst_filename = 'atiff.tiff'\n \n # Load matlab file\n front_dict = loadmat(infile,squeeze_me=True, struct_as_record=False)\n #print front_dict\n \n # You need to get those values like you did.\n x_pixels = len(x) # number of pixels in x\n y_pixels = len(y) # number of pixels in y\n PIXEL_SIZE = 1000 # size of the pixel...(in m?) \n x_min = np.min(x)\n y_max = np.min(y) # x_min & y_max are like the \"top left\" corner.\n wkt_projection = 'a projection in wkt that you got from other file'\n\n driver = gdal.GetDriverByName('GTiff')\n\n dataset = driver.Create(\n dst_filename,\n x_pixels,\n y_pixels,\n 1,\n gdal.GDT_Float32, )\n\n dataset.SetGeoTransform((\n x_min, # 0\n PIXEL_SIZE, # 1\n 0, # 2\n y_max, # 3\n 0, # 4\n -PIXEL_SIZE)) \n\n dataset.SetProjection(wkt_projection)\n dataset.GetRasterBand(1).WriteArray(array)\n dataset.FlushCache() # Write to disk.\n return dataset, dataset.GetRasterBand(1) #If you need to ret", "def obj_df(x,project): \n \n dobj_list = project.obj_df(x)\n dobj=[0.0]*len(dobj_list[0])\n \n for this_dobj in dobj_list:\n idv=0\n for this_dv_dobj in this_dobj:\n dobj[idv] = dobj[idv]+this_dv_dobj;\n idv+=1\n dobj = array( dobj )\n \n return dobj", "def reconstruct(self, X):", "def reconstruct(self, X):", "def test_grdfilter_dataarray_in_file_out(grid):\n with GMTTempFile(suffix=\".nc\") as tmpfile:\n result = grdfilter(grid, outgrid=tmpfile.name, filter=\"g600\", distance=\"4\")\n assert result is None # grdfilter returns None if output to a file\n result = grdinfo(tmpfile.name, per_column=True)\n assert (\n result == \"-180 180 -90 90 -6147.49072266 5164.06005859 1 1 360 180 1 1\\n\"\n )", "def transform(x_data):\n return flatten(x_data)", "def to_xarray(trace, coords, dims):\n coords = coords.copy()\n coords['sample'] = list(range(len(trace)))\n coords['chain'] = list(range(trace.nchains))\n\n coords_ = {}\n for key, vals in coords.items():\n coords_[key] = xr.IndexVariable((key,), data=vals)\n coords = coords_\n\n data = xr.Dataset(coords=coords)\n for key in trace.varnames:\n if key.endswith('_'):\n continue\n dims_str = ('chain', 'sample')\n if key in dims:\n dims_str = dims_str + dims[key]\n vals = trace.get_values(key, combine=False, squeeze=False)\n vals = np.array(vals)\n data[key] = xr.DataArray(vals, {v: coords[v] for v in dims_str}, dims=dims_str)\n\n return data", "def pcl_to_ros(pcl_array, frame_id, stamp):\n pcl_array = np.array(pcl_array, dtype=np.float32)\n # print(pcl_array)\n pcl_array = pcl_array.reshape(-1, 4)\n\n ros_msg = PointCloud2()\n\n ros_msg.header.stamp = stamp\n ros_msg.header.frame_id = frame_id\n\n ros_msg.height = 1\n ros_msg.width = pcl_array.size\n\n ros_msg.fields.append(PointField(\n name=\"x\",\n offset=0,\n datatype=PointField.FLOAT32, count=1))\n ros_msg.fields.append(PointField(\n name=\"y\",\n offset=4,\n datatype=PointField.FLOAT32, count=1))\n ros_msg.fields.append(PointField(\n name=\"z\",\n offset=8,\n datatype=PointField.FLOAT32, count=1))\n ros_msg.fields.append(PointField(\n name=\"rgb\",\n offset=16,\n datatype=PointField.FLOAT32, count=1))\n\n ros_msg.is_bigendian = False\n ros_msg.point_step = 32\n ros_msg.row_step = ros_msg.point_step * ros_msg.width\n ros_msg.is_dense = False\n buffer = []\n\n for data in pcl_array:\n # color = [0,0,255,1.0]\n # s = struct.pack('>f', color)\n s = struct.pack('>f', data[3])\n i = struct.unpack('>l', s)[0]\n pack = ctypes.c_uint32(i).value\n\n r = (pack & 0x00FF0000) >> 16\n g = (pack & 0x0000FF00) >> 8\n b = (pack & 0x000000FF)\n\n buffer.append(struct.pack('ffffBBBBIII', data[0], data[1], data[2], 1.0, b, g, r, 0, 0, 0, 0))\n # print(len(bytes(struct.pack('ffffBBBBIII', data[0], data[1], data[2], 1.0, b, g, r, 0, 0, 0, 0))))\n # buffer = buffer + (struct.pack('ffffBBBBIII', data[0], data[1], data[2], 1.0, b, g, r, 0, 0, 0, 0))\n # print(len(bytes(struct.pack('ffffBBBBIII', data[0], data[1], data[2], 1.0, b, g, r, 0, 0, 0, 0))))\n # ros_msg.data = bytes(buffer.encode('utf-8'))\n ros_msg.data = int(bytes(buffer))\n # print(str(buffer)[0])\n\n return ros_msg", "def reindex2d(self, index):\n ds_out = xr.Dataset(attrs=self._obj.attrs)\n for var in self.vars:\n ds_out[var] = self._obj[var].raster.reindex2d(index=index)\n return ds_out", "def __init__(self, narray, axes_names=None, axes_domains=None,\n value_label=\"value\", meta_data=None):\n logger.debug('xndarray.__init__ ...')\n\n narray = np.asarray(narray)\n self.data = narray\n self.value_label = value_label\n self.meta_data = meta_data\n self.has_deprecated_xml_header = True\n\n nbDims = self.data.ndim\n\n if axes_names is None:\n self.axes_names = ['dim' + str(i) for i in xrange(nbDims)]\n else:\n assert type(axes_names) == list\n if len(axes_names) != nbDims:\n raise Exception(\"length of axes_names (%d) is different \"\n \"from nb of dimensions (%d).\\n\"\n \"Got axes names: %s\"\n % (len(axes_names), nbDims, str(axes_names)))\n\n self.axes_names = axes_names[:]\n\n self.axes_ids = dict([(self.axes_names[i], i) for i in xrange(nbDims)])\n\n # By default: domain of axis = array of slice indexes\n sh = self.data.shape\n self.axes_domains = dict([(axis, np.arange(sh[i]))\n for i, axis in enumerate(self.axes_names)])\n\n if axes_domains is not None:\n assert isinstance(axes_domains, dict)\n\n for an, dom in axes_domains.iteritems():\n if an not in self.axes_names:\n raise Exception('Axis \"%s\" defined in domains not '\n 'found in axes (%s)'\n % (an, ','.join(self.axes_names)))\n\n ia = self.axes_names.index(an)\n l = self.data.shape[ia]\n if len(dom) != l:\n raise Exception('Length of domain for axis \"%s\" (%d) '\n 'does not match length of data '\n 'axis %d (%d) ' % (an, len(dom), ia, l))\n\n if len(set(dom)) != len(dom):\n raise Exception('Domain of axis \"%s\" does not contain '\n 'unique values' % an)\n\n axes_domains[an] = np.asarray(dom)\n\n self.axes_domains.update(axes_domains)\n\n logger.debug('Axes names: %s', str(self.axes_names))\n logger.debug('Axes domains: %s', str(self.axes_domains))", "def load_from_netcdf(filename):\n filename = os.path.join(datadir, filename + '.nc')\n return xr.open_dataarray(filename)", "def _from_xarray_Dataset(self, ds, ztsp, chem_names): \n # Get the variable names in the Dataset\n keys = []\n for key in ds.data_vars:\n keys += [key]\n \n # Get the correct set of variables chosen by the user\n if 'all' in chem_names:\n # We need to take all variables in the dataset\n non_chems = ['time', 'lat', 'lon'] + ztsp\n chem_names = [name for name in keys if name not in non_chems]\n \n # Make sure the current data are included as the user probably\n # considers them much like the temperature and pressure\n current_vars = ['ua', 'va', 'wa']\n for var in current_vars:\n if var not in chem_names and var in keys:\n chem_names += [var]\n \n # Load the data from the netCDF dataset variables\n ztsp_units, chem_units = get_xarray_data(ds, ztsp, chem_names)\n \n # Return the final set of data\n return (ztsp_units, chem_names, chem_units)", "def _image_data(self):\n if self.header['Image']['bytes per pixel'] == 2:\n # 16-bit unsigned integers, short\n dt = np.dtype(self._bo + 'H')\n elif self.header['Image']['bytes per pixel'] == 4:\n # 32-bit unsigned integers, int\n dt = np.dtype(self._bo + 'I')\n\n shape = [self.header['Image']['planes'],\n self.header['Image']['masses'],\n self.header['Image']['height'],\n self.header['Image']['width']]\n\n self.fh.seek(self.header['header size'])\n\n compressedfiles = (\n gzip.GzipFile,\n bz2.BZ2File,\n tarfile.ExFileObject,\n lzma.LZMAFile,\n io.BytesIO\n )\n\n # fromfile is about 2x faster than frombuffer(fh.read())\n if isinstance(self.fh, compressedfiles):\n data = np.frombuffer(self.fh.read(), dtype=dt).reshape(shape)\n else:\n data = np.fromfile(self.fh, dtype=dt).reshape(shape)\n\n # We want to have a cube of contiguous data (stacked images) for each\n # mass. Swap axes 0 and 1. Returns a view, so make full copy to make\n # data access faster.\n data = data.swapaxes(0, 1).copy()\n\n self.data = xarray.DataArray(data,\n dims=('species', 'frame', 'y', 'x'),\n coords={'species': ('species', list(self.header['label list']))},\n attrs={'unit': 'counts'})", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_2H().pack(_x.image_width, _x.image_height))\n length = len(self.Hlines)\n buff.write(_struct_I.pack(length))\n for val1 in self.Hlines:\n _x = val1\n buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))\n length = len(self.Vlines)\n buff.write(_struct_I.pack(length))\n for val1 in self.Vlines:\n _x = val1\n buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))\n buff.write(_get_struct_H().pack(self.PFPS))\n length = len(self.regions)\n buff.write(_struct_I.pack(length))\n for val1 in self.regions:\n _v5 = val1.color\n _x = _v5\n buff.write(_get_struct_4f().pack(_x.r, _x.g, _x.b, _x.a))\n _v6 = val1.moment\n _x = _v6\n buff.write(_get_struct_10f().pack(_x.m00, _x.m10, _x.m01, _x.m11, _x.m20, _x.m02, _x.m21, _x.m12, _x.m30, _x.m03))\n _x = self\n buff.write(_get_struct_2H().pack(_x.box_width, _x.box_height))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def _make_array(x):\n try:\n x = np.asfarray(x).squeeze()\n except ValueError:\n pass\n return x", "def split_db_original(x, components):\n cm = components[1]\n ap = []\n for itera in cm:\n ap.append(x[:, itera].tolist())\n ap_np = np.transpose(np.array(ap))\n\n return ap_np", "def coords_to_structure(self) -> None:\n ...", "def from_chx(array):\n return _backend._convert_arrays(array, _array_from_chainerx)", "def writeNetCDFData(out_nc, hrus, dr_time, hru_type, remapped_data, var_meta, var_attrs, var_encodings, remap_idx):\n\n dataset = xr.Dataset()\n\n for varname, meta in var_meta.items():\n foo = xr.DataArray(remapped_data[varname][:, remap_idx],\n dims=['time', 'basinID'],\n name=varname)\n\n foo.encoding = var_encodings[varname]\n foo.attrs = var_attrs[varname]\n\n dataset[varname] = foo\n\n # HRU ID variables\n dataset['basinID'] = xr.DataArray(hrus[remap_idx], dims=['basinID'])\n dataset['basinID'].encoding = {'dtype': hru_type, '_FillValue': None}\n dataset['basinID'].attrs = {'long_name': 'Basin ID'}\n\n dataset[TIME_DIM_NAME] = dr_time\n\n dataset.to_netcdf(out_nc, unlimited_dims='time')", "def process_extended_xyz_file_to_array(extended_xyz_file_path, verbose=True):\n\n with open(extended_xyz_file_path, \"r\") as input_file:\n\n # Read all the lines at once\n lines = input_file.readlines()\n\n # Get the number of atoms per block, which is always the first line of\n # either an xyz or extended xyz file\n n_atoms = int(lines[0].strip())\n\n # We can print some diagnostics to help us debug\n if verbose:\n print(\n f\"Read {len(lines)} lines from {extended_xyz_file_path}, each \"\n f\"block has {n_atoms} atoms\"\n )\n\n # Each \"single\" xyz file has the following lines:\n # A single line indicating how many atoms there are in the block\n # A comment line\n # n_atoms lines for the species type and coordinates\n # With this information, we can \"chunk\" the list into some number of equal\n # parts each containing 12+2 lines.\n # Check out a way to do this here:\n # https://www.delftstack.com/howto/python/\n # python-split-list-into-chunks/\n # #split-list-in-python-to-chunks-using-the-lambda-function\n EXTRA_LINES = 2 # <- no magic numbers\n offset = n_atoms + EXTRA_LINES\n\n # List comprehension is much faster than for loops. Try to avoid the latter\n # when at all possible\n chunked = [lines[ii:ii + offset] for ii in range(0, len(lines), offset)]\n\n if verbose:\n print(f\"Got {len(chunked)} snapshots\")\n\n # Each entry of chunked contains the:\n # - number of atoms (same for everything)\n # - the energy (I think)\n # - the atom types/coordinates\n # Get the energies\n comment_lines = np.array([\n float(lines[ii + 1]) for ii in range(0, len(lines), offset)\n ])\n\n # Get the atom list - only have to do this once!\n atom_list = [line.split()[0] for line in chunked[0][EXTRA_LINES:]]\n\n # Finally, get the coordinates\n chunked = np.array([\n [line.split()[1:4] for line in chunk[EXTRA_LINES:]]\n for chunk in chunked\n ], dtype=float)\n\n return dict(energy=comment_lines, elements=atom_list, coordinates=chunked)", "def _finalize_data(self):\n\n if isinstance(self.node_data, np.ndarray): # SR workflow\n self.node_data = da.from_array(self.node_data)\n elif isinstance(self.node_data, list): # vr workflow\n struct_data = np.empty(len(self.node_data), dtype=self.data.dtype)\n datavals = np.array(self.node_data)\n for cnt, varname in enumerate(self.data.dtype.names):\n struct_data[varname] = datavals[:, cnt]\n self.node_data = da.from_array(struct_data)\n if isinstance(self.data, np.ndarray):\n self.data = da.from_array(self.data)", "def __init__(self, data_array, cui2name):\n\t\tassert isinstance(data_array, xr.DataArray), 'Constructor requires xr.DataArray.'\n\n\t\tself.data_array = data_array\n\t\tself.data = self.data_array.values\n\t\tself.sources = self.data_array.source.values\n\t\tself.targets = self.data_array.target.values\n\t\tself.metrics = self.data_array.metric.values\n\t\tself.metapaths = self.data_array.metapath.values\n\n\t\t# Fetch the verbose names\n\t\t#int_rf_df = pd.read_csv('data/impd_cogn_sources.tsv', delimiter='\\t')\n\t\t#cui2name = {cui:name for cui, name in zip(int_rf_df.identifier, int_rf_df.name)} \n\t\tself.source_names = np.array([cui2name[source] for source in self.sources])", "def make_test_dataArray():\n x = np.zeros(shape=(3,31))\n x[0,:] = np.nan\n x[1,[1,2,3,4,5,6,15,23,24,25]] = [np.nan,np.nan,0.1,0.5,2.,2.,2.,2.,0.9,2.]\n x[2,[3,4,5,6,15,23,24,25]] = [0.1,0.5,2.,2.,2.,2.,0.9,2.]\n da = xr.DataArray(x, dims=['x','time'])\n da.coords['time'] = pd.date_range('19790101', freq='D', periods=31)\n return da", "def data_to_x(self, new_data):\n pass", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.polygons)\n buff.write(_struct_I.pack(length))\n for val1 in self.polygons:\n _v3 = val1.stamp\n _x = _v3\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n buff.write(_struct_I.pack(val1.ID))\n buff.write(val1.parameter.tostring())\n length = len(val1.score)\n buff.write(_struct_I.pack(length))\n for val2 in val1.score:\n _x = val2\n buff.write(_struct_If.pack(_x.ID, _x.prob))\n length = len(val1.polyline)\n buff.write(_struct_I.pack(length))\n for val2 in val1.polyline:\n _x = val2\n buff.write(_struct_3f.pack(_x.x, _x.y, _x.edge_prob))\n length = len(val1.features)\n buff.write(_struct_I.pack(length))\n for val2 in val1.features:\n _x = val2\n buff.write(_struct_3fI.pack(_x.x, _x.y, _x.z, _x.ID))\n _x = val1.energy\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_f.pack(val1.weight))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def prepare_X_data(ibs, gid_list, use_gps=True):\n images = ibs.images(gid_list, caching=True)\n gps_list_ = images.gps2\n unixtime_list_ = images.unixtime2\n gps_list_ = vt.ensure_shape(gps_list_, (None, 2))\n has_gps = np.all(np.logical_not(np.isnan(gps_list_)), axis=1)\n has_time = np.logical_not(np.isnan(unixtime_list_))\n\n if not use_gps:\n has_gps[:] = False\n\n has_both = np.logical_and(has_time, has_gps)\n has_either = np.logical_or(has_time, has_gps)\n has_gps_only = np.logical_and(has_gps, np.logical_not(has_both))\n has_time_only = np.logical_and(has_time, np.logical_not(has_both))\n has_neither = np.logical_not(has_either)\n\n both = images.compress(has_both)\n xgps = images.compress(has_gps_only)\n xtime = images.compress(has_time_only)\n neither = images.compress(has_neither)\n\n # Group imagse with different attributes separately\n datas = {\n 'both': (both.gids, both.unixtime2, both.gps2),\n 'gps_only': (xgps.gids, None, xgps.gps2),\n 'time_only': (xtime.gids, xtime.unixtime2, None),\n 'neither': (neither.gids, None, None),\n }\n return datas", "def xr_dataset_to_array(ds, z_coord):\n # Determine the size of the dataset\n nvars = len(ds.keys())\n nvals = len(ds.coords[z_coord].values)\n \n # Create an empty array to store the data\n data = np.zeros((nvals, nvars+1))\n units = []\n \n # Insert the depth coordinate\n data[:,0] = ds.coords[z_coord].values\n units.append(ds.coords[z_coord].attrs['units'])\n \n # Insert the rest of the data\n variables = list(ds.keys())\n for i in range(len(variables)):\n data[:,i+1] = ds[variables[i]].values\n units.append(ds[variables[i]].attrs['units'])\n \n # Create a list of variables names\n names = [z_coord] + variables\n \n # Return the data\n return (data, names, units)", "def get_x_data(\n params, dimension, fold, subject_list,\n):\n X = []\n input_file_path = os.path.join(\n params[\"orig_path\"], \"ae_output_{}\".format(params[\"modality\"])\n )\n for i in range(len(subject_list)):\n x_sub_data_path = os.path.join(\n input_file_path,\n str(dimension),\n \"fold_{}\".format(fold),\n \"X_{}.npy\".format(subject_list[i]),\n )\n if not os.path.exists(x_sub_data_path):\n x_sub_data = build_x_data(\n dimension, fold, subject_list, i, params, out_file=input_file_path,\n )\n else:\n x_sub_data = np.load(x_sub_data_path)\n X.append(x_sub_data)\n X = np.array(X)\n return X\n # interTVA data has already been run on taskFMRI, on frioul", "def write_compressed(self, filename):\n\n # Define which molecules to use \n # (counting indices of processed data set)\n indices = np.arange(len(self))\n # All charges and position arrays have the same size\n # (the one of the biggest molecule)\n size = np.max( self.num_atoms )\n # Initialize arrays\n num_atoms = np.zeros(len(indices))\n labels = np.zeros(len(indices))\n charges = np.zeros([len(indices),size])\n positions = np.zeros([len(indices),size,3])\n # For each molecule ...\n for j,idx in enumerate(indices):\n # load the data\n sample = self[idx]\n # assign per-molecule data\n labels[j] = sample['data']\n num_atoms[j] = sample['num_atoms']\n # ... and for each atom:\n for ia in range(sample['num_atoms']):\n charges[j,ia] = sample['charges'][ia]\n positions[j,ia,0] = sample['positions'][ia][0] \n positions[j,ia,1] = sample['positions'][ia][1] \n positions[j,ia,2] = sample['positions'][ia][2]\n\n # Merge pairs\n print(labels.shape,charges.shape,positions.shape)\n labels = labels[0::2]\n charges = np.array([np.concatenate((charges[i],charges[i+1])) for i in indices[0::2]])\n positions = np.array([np.concatenate((positions[i],positions[i+1])) for i in indices[0::2]])\n print(labels.shape,charges.shape,positions.shape)\n \n # Create a dictionary with all the values to save\n save_dict = {}\n save_dict['label'] = labels\n save_dict['charges'] = charges\n save_dict['positions'] = positions\n\n # Save as a compressed array \n np.savez_compressed(filename,**save_dict)\n \n return", "def transform(self, x: Array2D) -> Array2D:", "def save_ncds(vardict,coords,attrs={},filename=None):\n \n if 'time' in coords.keys():\n newtime = [np.double((t-dt(1800,1,1)).total_seconds()/3600) for t in coords['time']['data']]\n delta_t = np.gradient(newtime)[0]\n \n coords['time']['data'] = newtime\n coords['time']['attrs'] = {'long_name':\"Time\",\n 'delta_t':f\"0000-00-{int(delta_t/24):02} {int(delta_t%24):02}:00:00\",\n 'standard_name':\"time\",\n 'axis': \"T\",\n 'units':\"hours since 1800-01-01 00:00:0.0\"}\n \n if 'climo' in vardict.keys():\n long_name = vardict['climo']['attrs']['long_name']\n vardict['climo']['attrs']['long_name'] = 'Climatology of '+long_name\n \n encoding = {k: {'dtype': 'double', '_FillValue': 1e30} for k in coords.keys()}\n for k in vardict.keys():\n encoding.update({k: {'dtype': 'single', '_FillValue': 1e30}})\n \n ds = xr.Dataset.from_dict({\n 'coords':coords,\n 'data_vars':vardict,\n 'dims':[k for k,v in coords.items()],\n 'attrs':attrs,\n })\n \n if isinstance(filename,str):\n ds.to_netcdf(filename,encoding=encoding,mode='w',engine='scipy')\n ds.close()\n else:\n print('filename must be a string')", "def GPy_reformat_3D(array):\r\n n_timesteps = np.shape(array)[-1]\r\n if len(np.shape(array)) == 1:\r\n array = array.reshape(n_timesteps, 1)\r\n return [array, array, array]\r\n elif len(np.shape(array)) == 2:\r\n array = array.T\r\n array1 = array[:, 0, None]\r\n array2 = array[:, 1, None]\r\n array3 = array[:, 2, None]\r\n return [array1, array2, array3]\r\n else:\r\n return print(\"Error in GPy_reformat, input array is wrong shape.\")", "def data_for_grouping():\n return RaggedArray(\n [[1, 0], [1, 0], [], [], [0, 0], [0, 0], [1, 0], [2, 0]])", "def xarray_to_df(file_path):\n df = xr.open_dataset(file_path).to_dataframe()\n df = df.reset_index()\n return df", "def canonicalize(data):\n data = data.transpose(*[d for d in map(data.axes.find, 'TCIZYX') if d >= 0])\n projection = []\n\n if 'T' in data.axes and data.shape[0] == 1:\n projection.append(0) # remove trivial T dimension\n\n if 'C' not in data.axes:\n projection.append(None) # add trivial C dimension\n elif projection:\n projection.append(slice(None))\n\n if projection:\n projection += [slice(None) for d in 'ZYX']\n data = data.lazyget(tuple(projection))\n \n return data", "def serialize_numpy(self, buff, numpy):\n try:\n _x = self.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.type_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_4f.pack(_x.home.latitude, _x.home.longitude, _x.home.altitude, _x.home.heading))\n length = len(self.movements)\n buff.write(_struct_I.pack(length))\n for val1 in self.movements:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_b.pack(val1.type))\n length = len(val1.pre_actions)\n buff.write(_struct_I.pack(length))\n for val2 in val1.pre_actions:\n _x = val2.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_b.pack(val2.type))\n _x = val2.action_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val2.parameters)\n buff.write(_struct_I.pack(length))\n for val3 in val2.parameters:\n _x = val3.key\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val3.value\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.slot_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val2.receivers_name)\n buff.write(_struct_I.pack(length))\n for val3 in val2.receivers_name:\n length = len(val3)\n if python3 or type(val3) == unicode:\n val3 = val3.encode('utf-8')\n length = len(val3)\n buff.write(struct.pack('<I%ss'%length, length, val3))\n length = len(val1.post_actions)\n buff.write(_struct_I.pack(length))\n for val2 in val1.post_actions:\n _x = val2.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_b.pack(val2.type))\n _x = val2.action_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val2.parameters)\n buff.write(_struct_I.pack(length))\n for val3 in val2.parameters:\n _x = val3.key\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val3.value\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.slot_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val2.receivers_name)\n buff.write(_struct_I.pack(length))\n for val3 in val2.receivers_name:\n length = len(val3)\n if python3 or type(val3) == unicode:\n val3 = val3.encode('utf-8')\n length = len(val3)\n buff.write(struct.pack('<I%ss'%length, length, val3))\n buff.write(_struct_f.pack(val1.altitude))\n _v5 = val1.target_position\n _x = _v5\n buff.write(_struct_4f.pack(_x.latitude, _x.longitude, _x.altitude, _x.heading))\n buff.write(_struct_b.pack(val1.strategy))\n _v6 = val1.duration\n _x = _v6\n buff.write(_struct_2i.pack(_x.secs, _x.nsecs))\n _x = val1\n buff.write(_struct_2fBf.pack(_x.radius, _x.circle_altitude, _x.clockwise, _x.direction))\n length = len(self.move_transitions)\n buff.write(_struct_I.pack(length))\n for val1 in self.move_transitions:\n buff.write(_struct_B.pack(val1.is_choice))\n _x = val1.wait_for_slot_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.from_move_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.to_move_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_B.pack(val1.fluid))\n _x = val1.condition_identifier\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.false_branch_move_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.slot_names)\n buff.write(_struct_I.pack(length))\n for val1 in self.slot_names:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.pack('<I%ss'%length, length, val1))\n buff.write(_struct_b.pack(self.travel_mode))\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def to_dataset(self):\n import xarray as xr\n ds = xr.Dataset(coords={'x': (['x', ], self.center_grid.x_coord),\n 'y': (['y', ], self.center_grid.y_coord)}\n )\n ds.attrs['pyproj_srs'] = self.proj.srs\n return ds", "def export_array(in_array, output_path):\n global proj, geotrans, row, col\n proj = band.GetProjection()\n geotrans = band.GetGeoTransform()\n row = band.RasterYSize\n col = band.RasterXSize\n driver = gdal.GetDriverByName(\"GTiff\")\n outdata = driver.Create(output_path, col, row, 1)\n outband = outdata.GetRasterBand(1)\n outband.SetNoDataValue(-9999)\n outband.WriteArray(in_array)\n # Georeference the image\n outdata.SetGeoTransform(geotrans)\n # Write projection information\n outdata.SetProjection(proj)\n outdata.FlushCache()\n outdata = None", "def initialize_data(self , station = '', datasets = {} ):\n \n self.datasets = datasets\n self.datasets_keys = datasets.keys()\n self.station = station\n \n data = {} # container for the data of each dataset\n source_configuration = {} # container for the source_configuration of each dataset\n \n\n \n \"\"\" Looping over the datasets \"\"\"\n logging.info('*** Reading and Initializing the data from the netCDF files ')\n \n \n for k,v in datasets.items() :\n logging.info(' Initialising the dataset: *** %s ' , k )\n data[k] = {} \n data['cdm_tables'] = {} \n \n ### alternative with xarray \n #ds = xr.load_dataset(v) \n #observations_table = xr.open_dataset(v , engine = 'h5netcdf' , group = 'observations_table') \n \n ### alternative with netCDF4\n #ds = nc.Dataset(v) \n #data[k]['dateindex'] = ds.variables['dateindex'][0,:] # storing the dateindex \n \n ###for h5py but cant extract date time units !!!\n ds = h5py.File(v , driver=\"core\" ) \n data[k]['df'] = ds # storing the entire file \n try: \n data[k]['source_file'] = ds['source_configuration']['source_file'][0]\n except:\n data[k]['source_file'] = str(v) # temp fix \n \n #data[k]['product_code'] = ds['source_configuration']['product_code'][0] \n #data[k]['recordtimestamp'] = ds['recordtimestamp'].value\n #data[k]['recordindex'] = ds['recordindex'].value \n #ds.close() \n logging.debug('Reading the file with h5py ')\n \n \n # add here appending datasets for the case of ncar_w and ncar_t \n \n \n self.data = data\n self.make_dataframe()\n ds.close()\n \n \"\"\" Reading the header_table, station_configuration, source_configuration \"\"\"\n for k,v in datasets.items() : \n \n #d = xr.open_dataset(v , engine = 'h5netcdf' ) \n #data[k]['recordtimestamp'] = d['recordtimestamp'].values\n #data[k]['recordindex'] = d['recordindex'].values \n \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'station_configuration') \n data[k]['station_configuration'] = d.to_dataframe() \n #data[k]['station_configuration'] = d ### USELESS ? \n logging.debug('Done with %s station_configuration' , str(k) )\n \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'header_table') \n logging.debug('Loading the header_table') \n if 'header_table' not in list( self.attributes.keys() ): # saving the attributes to be re-applied at the end\n self.attributes['header_table'] = {}\n for var in d.variables:\n self.attributes['header_table'][var] = {}\n self.attributes['header_table'][var]['description'] = d[var].description\n self.attributes['header_table'][var]['external_table'] = d[var].external_table \n data[k]['header_table'] = d.to_dataframe() \n logging.debug('Done with %s ' , k )\n \n logging.info(\"*** Loading the observations_table (might take time) %s\" , k ) \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'observations_table') \n \n if 'observations_table' not in list( self.attributes.keys() ): # saving the attributes to be re-applied at the end\n self.attributes['observations_table'] = {}\n for var in d.variables:\n self.attributes['observations_table'][var] = {}\n self.attributes['observations_table'][var]['description'] = d[var].description\n self.attributes['observations_table'][var]['external_table'] = d[var].external_table\n \n \n logging.info(\"*** Loading the source configuration %s\" , k ) \n try: \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'source_configuration')\n d = d.isel(hdrlen=[0])\n data[k]['source_configuration'] = d.to_dataframe() ### USELESS ? \n logging.debug('Done with %s source_configuration' , k )\n except: \n data[k]['source_configuration']= pd.DataFrame(np.array( [ [ self.data[k]['source_file'] ] ] ) , columns=['source_file'] ) \n \n if k == 'era5_1': # reading the whole era5_1 feedback (including reanalysis)\n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'era5fb') \n data[k]['era5fb'] = d.to_dataframe() \n logging.debug('Done with %s era5 feedback ', k )\n \n \"\"\" Reading the CDM tables that do not depend on specific stations or observations (fixed values), for the first file only \"\"\" \n if list(datasets.keys()).index(k) == 0 :\n for t in [ 'crs' , 'observed_variable', 'units' , 'z_coordinate_type' , 'station_type']: \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = t) \n #data['cdm_tables'][t] = d.to_dataframe() ### USELESS ?\n data['cdm_tables'][t] = d \n \n d.close() \n ds.close()\n\n \"\"\" Reading the name of the original source file \"\"\"\n source_configuration[k] = {} \n source_configuration[k]['source_file'] = [ c for c in v.split('/') if '.nc' in c][0]\n\n \n \"\"\" Storing the station configurations \"\"\" \n self.source_configuration = source_configuration \n \n \"\"\" Making all date_times \"\"\" \n self.make_all_datetime()\n \n \n \"\"\" feedback columns \"\"\"\n if 'era5_1' in list (self.data.keys() ):\n self.fb_columns = list(self.data['era5_1']['era5fb'].columns ) \n else:\n self.fb_columns = ['empty']", "def _reindex2d(self, index, dst_nodata=np.nan):\n # create new DataArray for output\n dst_coords = {d: self._obj.coords[d] for d in self._obj.dims}\n ys, xs = index.raster.ycoords, index.raster.xcoords\n dst_coords.update({self.y_dim: ys, self.x_dim: xs})\n da_reproject = full(\n dst_coords,\n nodata=dst_nodata,\n dtype=self._obj.dtype,\n name=self._obj.name,\n attrs=self._obj.attrs,\n crs=index.raster.crs,\n shape=index.raster.shape\n if self.dim0 is None\n else (self._obj.shape[0], *index.raster.shape),\n dims=self.dims if self.dim0 is None else (self.dim0, *self.dims),\n )\n # reproject by indexing\n shape2d = (self._obj.shape[0] if self.dim0 else 1, self.size)\n src_data = self._obj.load().data.reshape(shape2d)\n idxs = index.values\n valid = idxs >= 0\n if self.dim0:\n da_reproject.data[:, valid] = src_data[:, idxs[valid]]\n else:\n da_reproject.data[valid] = src_data[:, idxs[valid]].squeeze()\n return da_reproject", "def test_netCDF_field_components(self):\n # Geometries\n f = cfdm.example_field(6)\n\n for component in (\"interior_ring\", \"node_count\", \"part_node_count\"):\n f.nc_set_component_variable(component, \"ncvar\")\n f.nc_set_component_variable_groups(component, [\"forecast\"])\n\n f.nc_clear_component_variable_groups(component)\n f.nc_del_component_variable(component)\n\n f.nc_del_component_variable(component)\n f.nc_clear_component_variable_groups(component)\n\n f.nc_set_component_variable(component, \"ncvar\")\n f.nc_set_component_variable_groups(component, [\"forecast\"])\n\n for component in (\"interior_ring\", \"part_node_count\"):\n f.nc_set_component_dimension(component, \"ncvar\")\n f.nc_set_component_dimension_groups(component, [\"forecast\"])\n\n f.nc_clear_component_dimension_groups(component)\n f.nc_del_component_dimension(component)\n\n f.nc_del_component_dimension(component)\n f.nc_clear_component_dimension_groups(component)\n\n f.nc_set_component_dimension(component, \"ncvar\")\n f.nc_set_component_dimension_groups(component, [\"forecast\"])\n\n # Compression: indexed and contiguous\n f = cfdm.example_field(4)\n f.compress(\"indexed_contiguous\", inplace=True)\n\n for component in (\"count\", \"index\"):\n f.nc_set_component_variable(component, \"ncvar\")\n f.nc_set_component_variable_groups(component, [\"forecast\"])\n\n f.nc_clear_component_variable_groups(component)\n f.nc_del_component_variable(component)\n\n f.nc_del_component_variable(component)\n f.nc_clear_component_variable_groups(component)\n\n f.nc_set_component_variable(component, \"ncvar\")\n f.nc_set_component_variable_groups(component, [\"forecast\"])\n\n for component in (\"count\", \"index\"):\n f.nc_set_component_dimension(component, \"ncvar\")\n f.nc_set_component_dimension_groups(component, [\"forecast\"])\n\n f.nc_clear_component_dimension_groups(component)\n f.nc_del_component_dimension(component)\n\n f.nc_del_component_dimension(component)\n f.nc_clear_component_dimension_groups(component)\n\n f.nc_set_component_dimension(component, \"ncvar\")\n f.nc_set_component_dimension_groups(component, [\"forecast\"])\n\n for component in (\"count\", \"index\"):\n f.nc_set_component_sample_dimension(component, \"ncvar\")\n f.nc_set_component_sample_dimension_groups(component, [\"forecast\"])\n\n f.nc_clear_component_sample_dimension_groups(component)\n f.nc_del_component_sample_dimension(component)\n\n f.nc_del_component_sample_dimension(component)\n f.nc_clear_component_sample_dimension_groups(component)\n\n f.nc_set_component_sample_dimension(component, \"ncvar\")\n f.nc_set_component_sample_dimension_groups(component, [\"forecast\"])\n\n # Compression: gathered\n component = \"list\"\n\n # Expected exceptions\n for component in (\"list\", \"node_count\"):\n with self.assertRaises(ValueError):\n f.nc_set_component_dimension(component, \"ncvar\")\n\n with self.assertRaises(ValueError):\n f.nc_del_component_dimension(component)\n\n with self.assertRaises(ValueError):\n f.nc_set_component_dimension_groups(component, \"ncvar\")\n\n with self.assertRaises(ValueError):\n f.nc_clear_component_dimension_groups(component)\n\n with self.assertRaises(ValueError):\n f.nc_set_component_sample_dimension(component, \"ncvar\")\n\n with self.assertRaises(ValueError):\n f.nc_del_component_sample_dimension(component)\n\n with self.assertRaises(ValueError):\n f.nc_set_component_sample_dimension_groups(component, \"ncvar\")\n\n with self.assertRaises(ValueError):\n f.nc_clear_component_sample_dimension_groups(component)\n\n # Expected exceptions\n for component in (\"WRONG\",):\n with self.assertRaises(ValueError):\n f.nc_set_component_variable(component, \"ncvar\")\n\n with self.assertRaises(ValueError):\n f.nc_del_component_variable(component)\n\n with self.assertRaises(ValueError):\n f.nc_set_component_variable_groups(component, \"ncvar\")\n\n with self.assertRaises(ValueError):\n f.nc_clear_component_variable_groups(component)", "def read_netcdf(self,filename):", "def reformat(x):\n x = x.permute(0, 2, 3, 1)\n N, D1, D2, Feat = x.size()\n x = x.view(N, D1 * D2, Feat)\n return x", "def reformat(x):\n x = x.permute(0, 2, 3, 1)\n N, D1, D2, Feat = x.size()\n x = x.view(N, D1 * D2, Feat)\n return x" ]
[ "0.57716465", "0.5675053", "0.5598693", "0.5529777", "0.5516983", "0.54658604", "0.54497313", "0.54397637", "0.53918636", "0.5390577", "0.5377511", "0.5367062", "0.5363264", "0.5362354", "0.53438973", "0.5343177", "0.5296879", "0.5289519", "0.5279383", "0.5264651", "0.5245457", "0.519476", "0.5188231", "0.5187344", "0.516218", "0.51404256", "0.5105498", "0.5096492", "0.50843686", "0.50721246", "0.50531244", "0.50504994", "0.503895", "0.5036031", "0.50353795", "0.50307286", "0.5027354", "0.50060767", "0.49995577", "0.49982584", "0.49945986", "0.49909303", "0.49906126", "0.49836388", "0.4979259", "0.49782115", "0.49749655", "0.49742958", "0.49669603", "0.49637422", "0.49558052", "0.49518254", "0.49512747", "0.49506173", "0.4938212", "0.49194098", "0.4908029", "0.49068618", "0.49026388", "0.49025506", "0.49025506", "0.4902169", "0.48991933", "0.4893749", "0.48906216", "0.48900867", "0.4887525", "0.48725298", "0.48708686", "0.48699367", "0.48684907", "0.48550537", "0.48543912", "0.4853672", "0.48485482", "0.4848158", "0.48472407", "0.4845182", "0.48343152", "0.48328435", "0.4830019", "0.48219594", "0.48212764", "0.48195565", "0.48169157", "0.4811301", "0.48063076", "0.48019403", "0.47994474", "0.47989315", "0.47966328", "0.4794573", "0.47937787", "0.47911105", "0.4789666", "0.47787568", "0.4778118", "0.47771564", "0.47760984", "0.477604", "0.477604" ]
0.0
-1
Taken a word, if it is an abbreviation it returns the meaning
def get_abbr(self, word): assert (self.collection is not None) for conv in self.collection: ln = conv.split("*") if word == ln[0]: return ln[1] return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_abbrev(word):\r\n return abbreviations[word.lower()] if word.lower() in abbreviations.keys() else word", "def aux_lemma(word):\n if re.match(r\"(does|did|doing)\", word):\n return (\"do\")\n elif re.match(r\"(had|has|'ve|having)\", word):\n return (\"have\")\n elif re.match(r\"(is|are|am|was|were|been|'s|being)\", word):\n return (\"be\")\n elif word == (\"'d\"):\n return (\"would\")\n else:\n return word.lower()", "def get_casing(word):\n if len(word) == 0:\n return \"other\"\n elif word.isdigit(): # Is a digit\n return \"numeric\"\n elif word.islower(): # All lower case\n return \"allLower\"\n elif word.isupper(): # All upper case\n return \"allUpper\"\n # is a title, initial char upper, then all lower\n elif word[0].isupper():\n return \"initialUpper\"\n\n return \"other\"", "def disambiguate(self, word):\n matches = re.match(r'^pen([cdjz])(.*)$', word)\n if matches:\n return matches.group(1) + matches.group(2)", "def test_police_abbreviations(self):\n for word in self.report.get_words():\n for uword in self.rules.police_abbreviations:\n if uword[\"word\"] == word.text.lower():\n self.add_error(\n f\"{word.text} är en intern förkortning. \"\n f\"Använd {uword['means']} istället.\",\n word=word,\n )", "def get_american_term_definition(word: str) -> str:\n word = process_word(word)\n return AMERICAN_ENGLISH_ONLY_TERMS[word]", "def det_lemma(word):\n if word == (\"an\"):\n return (\"a\")\n else:\n return word.lower()", "def main() -> None:\n word: str = input(\"Write some text with some uppercase letters: \")\n abbreviation_out: str = abbreviate(word)\n print(f\"The abbreviation is \\\"{abbreviation_out}\\\".\")\n return None", "def match_name(sentence):\n if \"WIFE\" in sentence:\n return \"WIFE\"\n elif \"MAHAVIR\" in sentence or \"FATHER\" in sentence or \"SINGH\" in sentence: \n return \"MAHAVIR\"\n elif \"TEENAGER\" in sentence:\n return \"TEENAGER\"\n elif \"GIRL\" in sentence or \"WOMAN\" in sentence: \n return \"WOMAN\"\n elif \"GUY\" in sentence or \"MAN\" in sentence or \"BROTHER\" in sentence: \n return \"MAN\"\n elif \"COACH\" in sentence:\n return \"COACH\"\n elif \"COMMENT\" in sentence:\n return \"COMMENTATOR\"\n elif sentence[-2:] == \"ER\" or sentence[-3:] == \"IAN\" or sentence[-2:] == \"OR\" or sentence[-1:] == \"D\":\n return \"MISC\"\n \n return sentence", "def verb_lemma(word):\n if word.endswith(\"ed\"):\n if word[:-2].endswith(\"v\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"at\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"it\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"et\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"ut\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"ac\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"i\"):\n return word[:-3].lower() + \"y\"\n elif word[:-2].endswith(\"ir\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"ag\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"nc\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"nu\"):\n return word[:-2].lower() + \"e\"\n else:\n return word[:-2].lower() \n elif word.endswith(\"ing\"):\n if word[:-3].endswith(\"v\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"at\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"it\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"et\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"ut\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"ac\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"i\"):\n return word[:-4].lower() + \"y\"\n elif word[:-3].endswith(\"ir\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"ag\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"nc\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"nu\"):\n return word[:-3].lower() + \"e\"\n else:\n return word[:-3].lower()\n elif re.match(r\"(does|did|done)\", word):\n return (\"do\")\n elif re.match(r\"(is|are|am|was|will|were|been)\", word):\n return (\"be\")\n elif word == (\"'s\"):\n return (\"be\")\n elif re.match(r\"(had|has|'ve)\", word):\n return (\"have\")\n else:\n return word.lower()", "def is_abbr(word):\r\n\twith open(\"ABBR_DICT.txt\", \"r\", encoding = \"utf-8\") as f:\r\n\t\twordlist = [line.split(\"\\n\")[0] for line in f]\r\n\tif word.upper() in wordlist:\r\n\t\treturn 1\r\n\treturn 0", "def find_abecedarian_words():\n pass", "def is_abecedarian(word):\n pass", "def part_lemma(word):\n if word == (\"n't\"):\n return (\"not\")\n else:\n return word.lower()", "def verb_stem(s):\n \n #If the stem is have, its 3s form is has.\n if s == \"has\" :\n return \"have\"\n\n #If the stem ends in y preceded by a vowel, simply add s (pays, buys).\n elif re.match(r\"[A-z]+[aeiou][y]s\\b\", s):\n str = s[:-1]\n\n #If the stem ends in y preceded by a non-vowel and contains at least three letters, change the y to ies (flies, tries, unifies).\n elif re.match(r\"[A-z]+[^aeiou]ies\\b\", s):\n str = s[:-3] + 'y'\n\n #If the stem is of the form Xie where X is a single letter other than a vowel, simply add s (dies, lies, ties note that this doesnt account for unties).\n elif re.match(r\"[^aeiou]ies\\b\", s):\n str = s[:-1]\n\n #If the stem ends in o,x,ch,sh,ss or zz, add es (goes, boxes, attaches, washes, dresses, fizzes).\n elif re.match(r\"[A-z]+([ox]|[cs]h|[s]s|[z]z)es\\b\", s): \n str = s[:-2]\n\n #If the stem ends in se or ze but not in sse or zze, add s (loses, dazes, lapses, analyses).\n elif re.match(r\"[A-z]+([s][^s][e]|[z][^z][e])s\\b\", s):\n str = s[:-1]\n\n #If the stem ends in e not preceded by i,o,s,x,z,ch,sh, just add s (likes, hates, bathes).\n elif re.match(r\"[A-z]+([^iosxz]|[^ch]|[^sh])es\\b\", s):\n str = s[:-1]\n \n #If the stem ends in anything except s,x,y,z,ch,sh or a vowel, add s (eats, tells, shows)\n elif re.match(r\"[A-z]+([^sxyzaeiou]|[^cs]h)s\\b\", s):\n str = s[:-1]\n\n else: \n str = \"\"\n\n\n matches = [(w, t) for (w, t) in vb_list if (w == s or w == str)]\n\n tag_s = [(w, t) for (w, t) in matches if w == s and t == 'VBZ']\n\n if tag_s == True:\n return str\n else:\n tag_str = [t for (w, t) in matches if w == str and t == 'VB']\n\n if not (tag_s or tag_str):\n str = \"\"\n\n return str", "def choose_word():\n pass", "def acronym(name):\n return tuple(map(first, filter(capitalized, name.split())))", "def adj_lemma(word):\n if word.endswith(\"er\"):\n return word[:-2].lower()\n elif word != (\"best\") and word.endswith(\"est\"):\n return word[:-3].lower()\n else:\n return word.lower()", "def process_word(self, word: str) -> list[str]:\n d = self.d\n if not d:\n return None\n if d.check(word):\n return None\n # Speed doesn't matter here. The more we find, the more convenient.\n # Remove all digits.\n word = ''.join([i for i in word if not i.isdigit()])\n if d.check(word) or d.check(word.lower()):\n return None\n if word.find('_') > -1:\n # Snake case.\n words = word.split('_')\n for word2 in words:\n if not d.check(word2) and not d.check(word2.lower()):\n return d.suggest(word)\n return None\n words = g.unCamel(word)\n if words:\n for word2 in words:\n if not d.check(word2) and not d.check(word2.lower()):\n return d.suggest(word)\n return None\n return d.suggest(word)", "def process_word(self, word: str) -> list[str]:\n d = self.d\n if not d:\n return None\n if d.check(word):\n return None\n # Speed doesn't matter here. The more we find, the more convenient.\n # Remove all digits.\n word = ''.join([i for i in word if not i.isdigit()])\n if d.check(word) or d.check(word.lower()):\n return None\n if word.find('_') > -1:\n # Snake case.\n words = word.split('_')\n for word2 in words:\n if not d.check(word2) and not d.check(word2.lower()):\n return d.suggest(word)\n return None\n words = g.unCamel(word)\n if words:\n for word2 in words:\n if not d.check(word2) and not d.check(word2.lower()):\n return d.suggest(word)\n return None\n return d.suggest(word)", "def __isNoun__(self, word):\n self.nouns = ('door', 'bear', 'princess', 'cabinet')\n for noun in self.nouns:\n if noun == word:\n return ('noun', word), True\n return None, False", "def get_british_term_definition(word: str) -> str:\n word = process_word(word)\n return BRITISH_ENGLISH_ONLY_TERMS[word]", "def word_of_the_day():\n r = requests.get(\"http://www.urbandictionary.com\") # link is always homepage\n soup = BeautifulSoup(r.content, features=\"html.parser\") # sets up soup\n def_header = \"**\" + soup.find(\"div\", attrs={\"class\": \"def-header\"}).text.replace(\"unknown\",\n \"\") + \"**\" # header is the word we are defining\n # def_header = def_header[0:len(def_header) - 10] # header always ends in \"unknown\" this removes it\n meaning = soup.find(\"div\", attrs={\"class\": \"meaning\"}).text # gets the definition\n # formatting TODO move to controller\n for x in [1, 2, 3, 4, 5, 6, 7, 8, 9]:\n meaning = meaning.replace(str(x) + \". \", \"\\n\" + str(x) + \". \")\n for x in [\"v.\", \"n.\"]:\n meaning = meaning.replace(x, x.upper()[:-1])\n example = soup.find(\"div\", attrs={\"class\": \"example\"}).text # gets the example\n output = def_header + \": \" + \"```\" + meaning + \"\\nEx: \" + example + \"```\" # output string\n output = output.replace(\"&apos\", \"'\") # replaces weird formatting of ' from original\n return output # returns the word, defintion, and example", "def getDisambiguatedByNextVerb(self, word):\n\t\treturn disambig_const.DISAMBIGUATATION_TABLE.get(word, {}).get('verb', {}).get('vocalized', word);", "def indefinite(self):\n return \"an\" if self.short_desc[0] in 'aeiou' else \"a\"", "def med_in_english(word):\r\n\treturn int(med(TextBlob(word).correct(), word))", "def noun_lemma(word):\n if word.endswith(\"s\"):\n if word.endswith(\"ss\"):\n return word.lower()\n elif word.endswith(\"ies\"):\n return word[:-3].lower() + (\"y\")\n else:\n return word[:-1].lower()\n if word.endswith(\"men\"):\n return word[:-2].lower() + (\"an\")\n else:\n return word.lower()", "def a_or_an(s):\n if s[0].lower() in 'aeiou':\n return 'an'\n return 'a'", "def get_word(self) -> str: \n #return str(choice(word_list).upper())\n return \"ANONYMOUS\"", "def good_word(self, word):\r\n return word.strip().lower()", "def format_meaning(phrase, data):\n if 'tuc' not in data:\n return '%s: no meaning found' % phrase\n\n for t in data.get('tuc'):\n if 'meanings' in t:\n message = '; '.join([_clean_text(x['text'])\n for x in t['meanings']])\n return '%s: %s' % (phrase, message)\n\n # we didn't find a more detailed description, so check if there\n # is a simpler one so we don't return empty handed.\n for t in data.get('tuc'):\n if 'phrase' in t and 'text' in t.get('phrase', {}):\n return '%s: %s' % (phrase, _clean_text(t['phrase']['text']))\n\n return '%s: no meaning found' % phrase", "def lookup_pronunciations_for_word(word: Text) -> Sequence[Word]:\n return EnglishUtils.all_possible_forms_for(word)", "def pluralized(word):\n defined_plurals = {\n \"person\": \"people\"\n }\n if word in defined_plurals:\n return defined_plurals[word]\n\n es_endings = [\"s\", \"sh\", \"ch\", \"x\", \"z\"]\n if any([word.endswith(ending) for ending in es_endings]):\n return f\"{word}es\"\n else:\n return f\"{word}s\"", "def noun_stem (s):\n\n \"\"\"codes from statements.py (PART A)\"\"\"\n def match (p):\n return re.match(p + '$', s, re.IGNORECASE)\n\n if (s in unchanging_plurals_list):\n return s\n elif (s[-3:] == 'men'):\n return s[0:-3] + 'man'\n elif match('.*(?<!.[aeiousxyz]|sh|ch)s'):\n return s[:-1]\n elif match('.*[aeiou]ys'):\n return s[:-1]\n elif match('.*.[^aeiou]ies'):\n return s[:-3] + 'y'\n elif match('[^aeiou]ies'):\n return s[:-1]\n elif match('.*(o|x|ch|ss|zz|sh)es'):\n return s[:-2]\n elif match('.*([^s]se|[^z]ze)s'):\n return s[:-1]\n elif match('.*(?<!.[iosxz]|sh|ch)es'):\n return s[:-1]\n else:\n return ''", "def fry(word):\n\n # looks for a Y or y which will be (captured) followed and ended by an 'ou'\n match_you = re.match('([Yy])ou$', word)\n\n # First group will be the (captured) group so either 'Y' or 'y'\n if match_you:\n return match_you.group(1) + \"'all\"\n\n # looks for anyword ending in 'ing'\n match_ing = re.search('(.+)ing$', word)\n\n # checks if vowel exists before the 'ing'\n if match_ing:\n vowel_check = re.search('[aeiouy]', match_ing.group(1))\n # First group will be the (captured) group so everything before the 'ing'\n if vowel_check:\n return match_ing.group(1) + \"in'\"\n\n return word", "def abbreviation(self):\n return self._abbreviation", "def stem(self, word):\n word = word.lower()\n\n if word in self.__special_words:\n return self.__special_words[word]\n\n # Map the different apostrophe characters to a single consistent one\n word = (word.replace(u(\"\\u2019\"), u(\"\\x27\"))\n .replace(u(\"\\u2018\"), u(\"\\x27\"))\n .replace(u(\"\\u201B\"), u(\"\\x27\")))\n\n if word.startswith(u(\"\\x27\")):\n word = word[1:]\n\n if word.startswith(\"y\"):\n word = \"\".join((\"Y\", word[1:]))\n\n for i in range(1, len(word)):\n if word[i - 1] in self.__vowels and word[i] == \"y\":\n word = \"\".join((word[:i], \"Y\", word[i + 1:]))\n\n step1a_vowel_found = False\n step1b_vowel_found = False\n\n r1 = \"\"\n r2 = \"\"\n\n if word.startswith((\"gener\", \"commun\", \"arsen\")):\n if word.startswith((\"gener\", \"arsen\")):\n r1 = word[5:]\n else:\n r1 = word[6:]\n\n for i in range(1, len(r1)):\n if r1[i] not in self.__vowels and r1[i - 1] in self.__vowels:\n r2 = r1[i + 1:]\n break\n else:\n r1, r2 = self._r1r2_standard(word, self.__vowels)\n\n # STEP 0\n for suffix in self.__step0_suffixes:\n if word.endswith(suffix):\n word = word[:-len(suffix)]\n r1 = r1[:-len(suffix)]\n r2 = r2[:-len(suffix)]\n break\n\n # STEP 1a\n for suffix in self.__step1a_suffixes:\n if word.endswith(suffix):\n\n if suffix == \"sses\":\n word = word[:-2]\n r1 = r1[:-2]\n r2 = r2[:-2]\n\n elif suffix in (\"ied\", \"ies\"):\n if len(word[:-len(suffix)]) > 1:\n word = word[:-2]\n r1 = r1[:-2]\n r2 = r2[:-2]\n else:\n word = word[:-1]\n r1 = r1[:-1]\n r2 = r2[:-1]\n\n elif suffix == \"s\":\n for letter in word[:-2]:\n if letter in self.__vowels:\n step1a_vowel_found = True\n break\n\n if step1a_vowel_found:\n word = word[:-1]\n r1 = r1[:-1]\n r2 = r2[:-1]\n break\n\n # STEP 1b\n for suffix in self.__step1b_suffixes:\n if word.endswith(suffix):\n if suffix in (\"eed\", \"eedly\"):\n\n if r1.endswith(suffix):\n word = \"\".join((word[:-len(suffix)], \"ee\"))\n\n if len(r1) >= len(suffix):\n r1 = \"\".join((r1[:-len(suffix)], \"ee\"))\n else:\n r1 = \"\"\n\n if len(r2) >= len(suffix):\n r2 = \"\".join((r2[:-len(suffix)], \"ee\"))\n else:\n r2 = \"\"\n else:\n for letter in word[:-len(suffix)]:\n if letter in self.__vowels:\n step1b_vowel_found = True\n break\n\n if step1b_vowel_found:\n word = word[:-len(suffix)]\n r1 = r1[:-len(suffix)]\n r2 = r2[:-len(suffix)]\n\n if word.endswith((\"at\", \"bl\", \"iz\")):\n word = \"\".join((word, \"e\"))\n r1 = \"\".join((r1, \"e\"))\n\n if len(word) > 5 or len(r1) >= 3:\n r2 = \"\".join((r2, \"e\"))\n\n elif word.endswith(self.__double_consonants):\n word = word[:-1]\n r1 = r1[:-1]\n r2 = r2[:-1]\n\n elif ((r1 == \"\" and len(word) >= 3 and\n word[-1] not in self.__vowels and\n word[-1] not in \"wxY\" and\n word[-2] in self.__vowels and\n word[-3] not in self.__vowels)\n or\n (r1 == \"\" and len(word) == 2 and\n word[0] in self.__vowels and\n word[1] not in self.__vowels)):\n\n word = \"\".join((word, \"e\"))\n\n if len(r1) > 0:\n r1 = \"\".join((r1, \"e\"))\n\n if len(r2) > 0:\n r2 = \"\".join((r2, \"e\"))\n break\n\n # STEP 1c\n if (len(word) > 2\n and word[-1] in \"yY\"\n and word[-2] not in self.__vowels):\n word = \"\".join((word[:-1], \"i\"))\n if len(r1) >= 1:\n r1 = \"\".join((r1[:-1], \"i\"))\n else:\n r1 = \"\"\n\n if len(r2) >= 1:\n r2 = \"\".join((r2[:-1], \"i\"))\n else:\n r2 = \"\"\n\n # STEP 2\n for suffix in self.__step2_suffixes:\n if word.endswith(suffix):\n if r1.endswith(suffix):\n if suffix == \"tional\":\n word = word[:-2]\n r1 = r1[:-2]\n r2 = r2[:-2]\n\n elif suffix in (\"enci\", \"anci\", \"abli\"):\n word = \"\".join((word[:-1], \"e\"))\n\n if len(r1) >= 1:\n r1 = \"\".join((r1[:-1], \"e\"))\n else:\n r1 = \"\"\n\n if len(r2) >= 1:\n r2 = \"\".join((r2[:-1], \"e\"))\n else:\n r2 = \"\"\n\n elif suffix == \"entli\":\n word = word[:-2]\n r1 = r1[:-2]\n r2 = r2[:-2]\n\n elif suffix in (\"izer\", \"ization\"):\n word = \"\".join((word[:-len(suffix)], \"ize\"))\n\n if len(r1) >= len(suffix):\n r1 = \"\".join((r1[:-len(suffix)], \"ize\"))\n else:\n r1 = \"\"\n\n if len(r2) >= len(suffix):\n r2 = \"\".join((r2[:-len(suffix)], \"ize\"))\n else:\n r2 = \"\"\n\n elif suffix in (\"ational\", \"ation\", \"ator\"):\n word = \"\".join((word[:-len(suffix)], \"ate\"))\n\n if len(r1) >= len(suffix):\n r1 = \"\".join((r1[:-len(suffix)], \"ate\"))\n else:\n r1 = \"\"\n\n if len(r2) >= len(suffix):\n r2 = \"\".join((r2[:-len(suffix)], \"ate\"))\n else:\n r2 = \"e\"\n\n elif suffix in (\"alism\", \"aliti\", \"alli\"):\n word = \"\".join((word[:-len(suffix)], \"al\"))\n\n if len(r1) >= len(suffix):\n r1 = \"\".join((r1[:-len(suffix)], \"al\"))\n else:\n r1 = \"\"\n\n if len(r2) >= len(suffix):\n r2 = \"\".join((r2[:-len(suffix)], \"al\"))\n else:\n r2 = \"\"\n\n elif suffix == \"fulness\":\n word = word[:-4]\n r1 = r1[:-4]\n r2 = r2[:-4]\n\n elif suffix in (\"ousli\", \"ousness\"):\n word = \"\".join((word[:-len(suffix)], \"ous\"))\n\n if len(r1) >= len(suffix):\n r1 = \"\".join((r1[:-len(suffix)], \"ous\"))\n else:\n r1 = \"\"\n\n if len(r2) >= len(suffix):\n r2 = \"\".join((r2[:-len(suffix)], \"ous\"))\n else:\n r2 = \"\"\n\n elif suffix in (\"iveness\", \"iviti\"):\n word = \"\".join((word[:-len(suffix)], \"ive\"))\n\n if len(r1) >= len(suffix):\n r1 = \"\".join((r1[:-len(suffix)], \"ive\"))\n else:\n r1 = \"\"\n\n if len(r2) >= len(suffix):\n r2 = \"\".join((r2[:-len(suffix)], \"ive\"))\n else:\n r2 = \"e\"\n\n elif suffix in (\"biliti\", \"bli\"):\n word = \"\".join((word[:-len(suffix)], \"ble\"))\n\n if len(r1) >= len(suffix):\n r1 = \"\".join((r1[:-len(suffix)], \"ble\"))\n else:\n r1 = \"\"\n\n if len(r2) >= len(suffix):\n r2 = \"\".join((r2[:-len(suffix)], \"ble\"))\n else:\n r2 = \"\"\n\n elif suffix == \"ogi\" and word[-4] == \"l\":\n word = word[:-1]\n r1 = r1[:-1]\n r2 = r2[:-1]\n\n elif suffix in (\"fulli\", \"lessli\"):\n word = word[:-2]\n r1 = r1[:-2]\n r2 = r2[:-2]\n\n elif suffix == \"li\" and word[-3] in self.__li_ending:\n word = word[:-2]\n r1 = r1[:-2]\n r2 = r2[:-2]\n break\n\n # STEP 3\n for suffix in self.__step3_suffixes:\n if word.endswith(suffix):\n if r1.endswith(suffix):\n if suffix == \"tional\":\n word = word[:-2]\n r1 = r1[:-2]\n r2 = r2[:-2]\n\n elif suffix == \"ational\":\n word = \"\".join((word[:-len(suffix)], \"ate\"))\n\n if len(r1) >= len(suffix):\n r1 = \"\".join((r1[:-len(suffix)], \"ate\"))\n else:\n r1 = \"\"\n\n if len(r2) >= len(suffix):\n r2 = \"\".join((r2[:-len(suffix)], \"ate\"))\n else:\n r2 = \"\"\n\n elif suffix == \"alize\":\n word = word[:-3]\n r1 = r1[:-3]\n r2 = r2[:-3]\n\n elif suffix in (\"icate\", \"iciti\", \"ical\"):\n word = \"\".join((word[:-len(suffix)], \"ic\"))\n\n if len(r1) >= len(suffix):\n r1 = \"\".join((r1[:-len(suffix)], \"ic\"))\n else:\n r1 = \"\"\n\n if len(r2) >= len(suffix):\n r2 = \"\".join((r2[:-len(suffix)], \"ic\"))\n else:\n r2 = \"\"\n\n elif suffix in (\"ful\", \"ness\"):\n word = word[:-len(suffix)]\n r1 = r1[:-len(suffix)]\n r2 = r2[:-len(suffix)]\n\n elif suffix == \"ative\" and r2.endswith(suffix):\n word = word[:-5]\n r1 = r1[:-5]\n r2 = r2[:-5]\n break\n\n # STEP 4\n for suffix in self.__step4_suffixes:\n if word.endswith(suffix):\n if r2.endswith(suffix):\n if suffix == \"ion\":\n if word[-4] in \"st\":\n word = word[:-3]\n r1 = r1[:-3]\n r2 = r2[:-3]\n else:\n word = word[:-len(suffix)]\n r1 = r1[:-len(suffix)]\n r2 = r2[:-len(suffix)]\n break\n\n # STEP 5\n if r2.endswith(\"l\") and word[-2] == \"l\":\n word = word[:-1]\n elif r2.endswith(\"e\"):\n word = word[:-1]\n elif r1.endswith(\"e\"):\n if len(word) >= 4 and (word[-2] in self.__vowels or\n word[-2] in \"wxY\" or\n word[-3] not in self.__vowels or\n word[-4] in self.__vowels):\n word = word[:-1]\n\n word = word.replace(\"Y\", \"y\")\n return word", "def meaning_of(word, app_id, app_key):\n \n url = 'https://od-api.oxforddictionaries.com:443/api/v1/entries/' + language + '/' + word.lower()\n r = requests.get(url, headers={\"app_id\":app_id, \"app_key\":app_key})\n\n data = r.json()\n useful_data = {}\n\n for i in data['results'][0]['lexicalEntries'][0]['entries']:\n for j in i:\n for k in i[j][0]:\n try:\n subdata = i[j][0][k]\n if k == 'subsenses':\n useful_data.update({\"meanings\":subdata[0]['definitions']})\n elif k == 'examples':\n useful_data.update({\"examples\":subdata[0]['text']})\n else:\n pass\n except:\n pass\n return useful_data", "def main():\n word = input(\"Give me a word! \\n\\n\")\n vowels = ['a', 'e', 'i', 'o', 'u']\n if word[0].lower() in vowels:\n print(f\"\\n\\nPig latin: {word}way\")\n else:\n print(f\"\\n\\nPig latin: {word[1:]}{word[0]}ay\")", "def acronym_gen(name):\n return tuple(w[0] for w in name.split() if capitalized(w))", "def findPOS(word):\r\n\t\r\n lisPOS = list(wordtags[word])\r\n if \"ADJ\" in lisPOS:\r\n return \"ADJECTIVE\"\r\n if \"ADV\" in lisPOS:\r\n return \"ADVERB\"\r\n if \"NOUN\" in lisPOS:\r\n return \"NOUN\"", "def getCasing(word):\n casing = 'other'\n \n numDigits = 0\n for char in word:\n if char.isdigit():\n numDigits += 1\n \n digitFraction = numDigits / float(len(word))\n \n if word.isdigit(): #Is a digit\n casing = 'numeric'\n elif digitFraction > 0.5:\n casing = 'mainly_numeric'\n elif word.islower(): #All lower case\n casing = 'allLower'\n elif word.isupper(): #All upper case\n casing = 'allUpper'\n elif word[0].isupper(): #is a title, initial char upper, then all lower\n casing = 'initialUpper'\n elif numDigits > 0:\n casing = 'contains_digit'\n \n return casing", "def my_spell(word):\r\n from autocorrect import spell\r\n \r\n corrected_word = ''\r\n rescued_typo = 0\r\n \r\n if len(word)>1: # one letter word are not considered \r\n \r\n # try to correct typo\r\n if is_typo(word): \r\n print('typo: ' + word)\r\n word = spell(word)\r\n print('autocorrected typo: ' + word)\r\n\r\n if not is_typo(word): \r\n rescued_typo = 1\r\n corrected_word = word\r\n else:\r\n corrected_word = word\r\n\r\n return corrected_word, rescued_typo", "def _match_word_vocab(word, vocab):\n if word not in vocab:\n if word.lower() in vocab:\n return word.lower()\n elif word.upper() in vocab:\n return word.upper()\n elif word.capitalize() in vocab:\n return word.capitalize()\n return word", "def estimate(word):\n parts = re.split(r'[^aeiouy]+', word)\n valid_parts = []\n\n for part in parts:\n if part != '':\n valid_parts.append(part)\n\n syllables = 0\n\n for p in re_subsyllables:\n if p.match(word):\n syllables -= 1\n\n for p in re_addsyllables:\n if p.match(word):\n syllables += 1\n\n syllables += len(valid_parts)\n\n if syllables <= 0:\n syllables = 1\n\n return syllables", "def getDisambiguatedByNextNoun(self, word):\n\t\treturn disambig_const.DISAMBIGUATATION_TABLE.get(word, {}).get('noun', {}).get('vocalized', word);", "def getWord(wordType):\n if (wordType == ADJECTIVE) or (wordType == ADJECTIVE):\n newWord = input('Enter an ' + wordType.lower() + \":\\n\")\n return newWord\n else:\n newWord = input('Enter a ' + wordType.lower() + \":\\n\")\n return newWord", "def stem(s):\n special = {'appall', 'kill', 'stroll', 'kiss', 'thrill', 'chugg', 'dress', 'err', 'express', 'fall', 'free', 'gall', 'add','cross', 'impress', 'inn', 'call', 'ball', 'bill', 'buzz'} \n ie_words = {'vying', 'lying', 'dying', 'tying'}\n short_ing = {'bring','sling','sping', 'bring', 'sing', 'ring', 'king', 'cling' ,'fling', 'wing', 'ding', 'ping', 'ting'}\n c_k_words = {'kick', 'muck', 'lock','pick', 'back', 'mock', 'peck', 'lock', 'nick'}\n\n if len(s) <= 3:\n return s\n if s[-3:] == 'ing' or s[-4:] == 'ings': \n if s in short_ing:\n return s\n elif s in special:\n return s[:-3]\n elif s[:-3] not in special and s[-4] == s[-5]:\n return s[:-4]\n elif s[:-3] not in c_k_words and s[-4] == 'k':\n return s[:-4]\n elif s == 'everything' or s == 'anything' or s == 'something':\n return s[:-5]\n elif s in ie_words:\n return s[0] + 'ie'\n else:\n return s[:-3]\n elif s[-3:] == 'ers':\n return s[:-3]\n elif s[-2:] == 'es':\n return s[:-2]\n elif s[-2:] == 'en':\n return s[:-2]\n elif s[-2:] == 'er':\n if s[-3] == s[-4]:\n return s[:-3]\n else:\n return s[:-2] \n elif s[-2:] == 'ed':\n if s[-3] == s[-4]:\n return s[:-3]\n else:\n return s[:-2]\n elif s[-3:] == 'ies':\n return s[:-2]\n elif s[-1:] == 's':\n return s[:-1]\n elif s[-1:] == 'e' and s not in ie_words:\n return s[:-1]\n elif s[-3:] == 'ful':\n return s[:-3]\n elif s[:2] == 'de':\n return s[2:]\n elif len(s) > 4 and s[-4:] == 'able' or s[-4] == 'ible':\n return s[:-4]\n elif s[:2] == 'in' or s[:2] == 'il' or s[:2] == 'ir':\n return s[2:]\n elif s[-1:] == 'y':\n return s[:-1] + 'i'\n else:\n return s", "def filter(self, word):\n \n word = word.lower()\n try:\n self.engine.fetch(word)\n except socket.error:\n raise LemmaAPIError\n part_of_speeches = self.engine.part_of_speeches\n\n \n self.basic_form = word\n for part in part_of_speeches:\n if part == 'verb':\n if self.engine.is_verb_conjugated():\n if not self.conEngine.is_verb_regular(word, self.engine.get_basic_verb()):\n self.basic_form = self.engine.get_basic_verb()\n return word\n else:\n self.basic_form = self.engine.get_basic_verb()\n\n elif part == 'noun':\n if self.engine.is_noun_plural():\n if not self.conEngine.is_noun_regular(word, self.engine.get_singular_noun()):\n self.basic_form = self.engine.get_singular_noun() \n return word\n else:\n self.basic_form = self.engine.get_singular_noun()\n\n return self.basic_form", "def stem(self, word):\n word = word.lower()\n\n step1_success = False\n\n # All acute accents are replaced by grave accents.\n word = (word.replace(u(\"\\xE1\"), u(\"\\xE0\"))\n .replace(u(\"\\xE9\"), u(\"\\xE8\"))\n .replace(u(\"\\xED\"), u(\"\\xEC\"))\n .replace(u(\"\\xF3\"), u(\"\\xF2\"))\n .replace(u(\"\\xFA\"), u(\"\\xF9\")))\n\n # Every occurrence of 'u' after 'q'\n # is put into upper case.\n for i in range(1, len(word)):\n if word[i - 1] == \"q\" and word[i] == \"u\":\n word = \"\".join((word[:i], \"U\", word[i + 1:]))\n\n # Every occurrence of 'u' and 'i'\n # between vowels is put into upper case.\n for i in range(1, len(word) - 1):\n if word[i - 1] in self.__vowels and word[i + 1] in self.__vowels:\n if word[i] == \"u\":\n word = \"\".join((word[:i], \"U\", word[i + 1:]))\n elif word[i] == \"i\":\n word = \"\".join((word[:i], \"I\", word[i + 1:]))\n\n r1, r2 = self._r1r2_standard(word, self.__vowels)\n rv = self._rv_standard(word, self.__vowels)\n\n # STEP 0: Attached pronoun\n for suffix in self.__step0_suffixes:\n if rv.endswith(suffix):\n if rv[-len(suffix) - 4:-len(suffix)] in (\"ando\", \"endo\"):\n word = word[:-len(suffix)]\n r1 = r1[:-len(suffix)]\n r2 = r2[:-len(suffix)]\n rv = rv[:-len(suffix)]\n\n elif (rv[-len(suffix) - 2:-len(suffix)] in\n (\"ar\", \"er\", \"ir\")):\n word = \"\".join((word[:-len(suffix)], \"e\"))\n r1 = \"\".join((r1[:-len(suffix)], \"e\"))\n r2 = \"\".join((r2[:-len(suffix)], \"e\"))\n rv = \"\".join((rv[:-len(suffix)], \"e\"))\n break\n\n # STEP 1: Standard suffix removal\n for suffix in self.__step1_suffixes:\n if word.endswith(suffix):\n if suffix == \"amente\" and r1.endswith(suffix):\n step1_success = True\n word = word[:-6]\n r2 = r2[:-6]\n rv = rv[:-6]\n\n if r2.endswith(\"iv\"):\n word = word[:-2]\n r2 = r2[:-2]\n rv = rv[:-2]\n\n if r2.endswith(\"at\"):\n word = word[:-2]\n rv = rv[:-2]\n\n elif r2.endswith((\"os\", \"ic\")):\n word = word[:-2]\n rv = rv[:-2]\n\n elif r2 .endswith(\"abil\"):\n word = word[:-4]\n rv = rv[:-4]\n\n elif (suffix in (\"amento\", \"amenti\",\n \"imento\", \"imenti\") and\n rv.endswith(suffix)):\n step1_success = True\n word = word[:-6]\n rv = rv[:-6]\n\n elif r2.endswith(suffix):\n step1_success = True\n if suffix in (\"azione\", \"azioni\", \"atore\", \"atori\"):\n word = word[:-len(suffix)]\n r2 = r2[:-len(suffix)]\n rv = rv[:-len(suffix)]\n\n if r2.endswith(\"ic\"):\n word = word[:-2]\n rv = rv[:-2]\n\n elif suffix in (\"logia\", \"logie\"):\n word = word[:-2]\n rv = word[:-2]\n\n elif suffix in (\"uzione\", \"uzioni\",\n \"usione\", \"usioni\"):\n word = word[:-5]\n rv = rv[:-5]\n\n elif suffix in (\"enza\", \"enze\"):\n word = \"\".join((word[:-2], \"te\"))\n rv = \"\".join((rv[:-2], \"te\"))\n\n elif suffix == u(\"it\\xE0\"):\n word = word[:-3]\n r2 = r2[:-3]\n rv = rv[:-3]\n\n if r2.endswith((\"ic\", \"iv\")):\n word = word[:-2]\n rv = rv[:-2]\n\n elif r2.endswith(\"abil\"):\n word = word[:-4]\n rv = rv[:-4]\n\n elif suffix in (\"ivo\", \"ivi\", \"iva\", \"ive\"):\n word = word[:-3]\n r2 = r2[:-3]\n rv = rv[:-3]\n\n if r2.endswith(\"at\"):\n word = word[:-2]\n r2 = r2[:-2]\n rv = rv[:-2]\n\n if r2.endswith(\"ic\"):\n word = word[:-2]\n rv = rv[:-2]\n else:\n word = word[:-len(suffix)]\n rv = rv[:-len(suffix)]\n break\n\n # STEP 2: Verb suffixes\n if not step1_success:\n for suffix in self.__step2_suffixes:\n if rv.endswith(suffix):\n word = word[:-len(suffix)]\n rv = rv[:-len(suffix)]\n break\n\n # STEP 3a\n if rv.endswith((\"a\", \"e\", \"i\", \"o\", u(\"\\xE0\"), u(\"\\xE8\"),\n u(\"\\xEC\"), u(\"\\xF2\"))):\n word = word[:-1]\n rv = rv[:-1]\n\n if rv.endswith(\"i\"):\n word = word[:-1]\n rv = rv[:-1]\n\n # STEP 3b\n if rv.endswith((\"ch\", \"gh\")):\n word = word[:-1]\n\n word = word.replace(\"I\", \"i\").replace(\"U\", \"u\")\n return word", "def define(word):\n\treturn lexicon.get(word.upper(), \"I couldn't find the definition of {}\\n\".format(word))", "async def get_demon(self, ctx, game: str, name: str):\n\n name = await self.nearest_spelling(ctx, name.lower(), self.names[game])\n if name is not None:\n name = \" \".join([i.capitalize() for i in name.split()])\n return name", "def review_word(word):\n\n print \"===== NEXT WORD ======\"\n print \"Is this word \"", "def normalize(word):\n word = word.lower()\n # removing plural, it facilitates the matching\n if len(word)>0 and word[-1] == 's':\n return word[0:-1]\n return word", "def detectCapitalUse(self, word: str) -> bool:\n if not word:\n return True\n\n head_upper = word[0].isupper()\n\n # except for the head\n has_lower = False\n has_upper = False\n for w in word[1:]:\n if w.isupper():\n has_upper = True\n if has_lower or not head_upper:\n return False\n else:\n has_lower = True\n if has_upper:\n return False\n return True", "def __process_word(self, word):\n output = ''\n capitals = self.__capital_positions(word)\n c_index = 0\n\n for c in word:\n if c_index in capitals:\n output += c.upper()\n else:\n output += c.lower()\n\n c_index += 1\n\n return output", "def detectCapitalUse(self, word):\n\n # Check for no upper or all upper\n if all(l.isupper() for l in word) or all(l.islower() for l in word):\n return True\n elif word[0].isupper() and word[1:].islower():\n return True\n else:\n return False", "def findMeaning(data: str, shorten: bool) -> str:\n # # remove all spaces\n # data.replace(' ', '')\n\n # two cases\n # if 》string exists, then meaning is string after this string\n if data.find('》') != -1:\n # find the line after 》\n sym = '》'\n symbol_lines_index = symbol_line_location(data, sym, move=0, addLast=True)\n symbol = find(data, '》')\n\n # now find all places with period\n period = find(data, '。')\n\n # initialize meaning\n meaning = ''\n # add to the meaning\n for i in range(len(symbol)):\n indStart = symbol[i]\n indEnd = max([period[j] for j in range(len(period)) if (period[j] >= symbol_lines_index[i][1]\n and period[j] < symbol_lines_index[i + 1][0]) or\n period[j] < symbol_lines_index[i][1]])\n\n single_meaning = data[int(indStart + 1):indEnd + 1]\n meaning += single_meaning\n\n # if 》string does not exist, then meaning is string after 】\n else:\n # find the line after 】\n sym = '】'\n symbol_lines_index = symbol_line_location(data, sym, move=1, addLast=True)\n\n symbol = find(data, '】')\n\n # now find all places with period\n period = find(data, '。')\n\n # initialize meaning\n meaning = ''\n # add to the meaning\n for i in range(len(symbol)):\n indStart = symbol_lines_index[i][0]\n indEnd = max([period[j] for j in range(len(period)) if (period[j] >= symbol_lines_index[i][1]\n and period[j] < symbol_lines_index[i+1][0]) or period[j] < symbol_lines_index[i][1]])\n\n\n single_meaning = data[int(indStart + 1):indEnd+1]\n meaning += single_meaning\n\n # showMessage(symbol)\n # showMessage(line)\n # showMessage(lines_index)\n # showMessage(symbol_lines_index)\n # showMessage(period)\n\n # remove new lines\n meaning = meaning.replace('\\n', '')\n\n # lastly, get rid of spaces\n meaning = meaning.replace(\" \", \"\")\n\n # if we want to shorten, we will only take the first sentence\n if shorten:\n # find period\n period = find(meaning, '。')\n # take the first period\n meaning = meaning[0:period[0]]\n return meaning", "def test_abbreviation(self):\n self.assertEqual(self.compound.abbreviation, \"Cool\")", "def stem(s):\n short_words = {'is': 'is', 'the': 'the','he': 'he', 'she': 'she', \\\n 'my': 'my', }\n if s in short_words:\n return s\n if s[-1] == 's':\n s = s[:-1]\n special_cases = {'children': 'child', 'doing': 'do', 'did': 'do', \\\n 'string': 'string', 'spring': 'spring'}\n if s in special_cases:\n return special_cases[s]\n if s[-1] == 'e':\n s = s[:-1]\n if s[-3:] == 'ing' and len(s) > 5:\n if s[-5:-3] == 'mm' or s[-5:-3] == 'tt':\n s = s[-4]\n else:\n s = s[:-3]\n if s[-1] == 'y':\n s = s[:-1] + 'i'\n elif s[-2:] == 'er' and len(s) > 4:\n if s[-4:-2] == 'mm' or s[-4:-2] == 'tt':\n s = s[:-3]\n else:\n s = s[:-2]\n elif s[-2:] == 'ed' and len(s) > 4:\n if s[-4:-2] == 'mm' or s[-4:-2] == 'tt':\n s = s[:-3]\n else:\n s = s[:-2]\n return s", "def profanity_word_handler(word):\n return word[0] + ''.join([settings.CENSOR_PROFANITY_REPLACEMENT_CHARACTER for I in range(len(word)-2)]) + word [-1]", "def stationabbreviation(station):\n stations = {'Utrecht': 'Ut',\n 'Amsterdam Centraal': 'asd'}\n if station in stations:\n return stations[station]", "def test_find_word(self):\n mic = mi.MicrophoneToText()\n\n teststring = 'x transcript\": ort lautet testort }x'\n\n word = mic.find_word(teststring)\n\n self.assertEqual(word, ' ort lautet testort ')", "def stem(self, word):\n word = word.lower()\n\n if word in self.stopwords:\n return word\n\n step1_success = False\n\n r1, r2 = self._r1r2_standard(word, self.__vowels)\n rv = self._rv_standard(word, self.__vowels)\n\n # STEP 0: Attached pronoun\n for suffix in self.__step0_suffixes:\n if not (word.endswith(suffix) and rv.endswith(suffix)):\n continue\n\n if (\n rv[: -len(suffix)].endswith(\n (\n \"ando\",\n \"ar\",\n \"er\",\n \"iendo\",\n \"ir\",\n )\n )\n ) or (\n rv[: -len(suffix)].endswith(\"yendo\")\n and word[: -len(suffix)].endswith(\"uyendo\")\n ):\n\n word = self.__replace_accented(word[: -len(suffix)])\n r1 = self.__replace_accented(r1[: -len(suffix)])\n r2 = self.__replace_accented(r2[: -len(suffix)])\n rv = self.__replace_accented(rv[: -len(suffix)])\n break\n\n # STEP 1: Standard suffix removal\n for suffix in self.__step1_suffixes:\n if not word.endswith(suffix):\n continue\n\n if suffix == \"amente\" and r1.endswith(suffix):\n step1_success = True\n word = word[:-6]\n r2 = r2[:-6]\n rv = rv[:-6]\n\n if r2.endswith(\"iv\"):\n word = word[:-2]\n r2 = r2[:-2]\n rv = rv[:-2]\n\n if r2.endswith(\"at\"):\n word = word[:-2]\n rv = rv[:-2]\n\n elif r2.endswith((\"os\", \"ic\", \"ad\")):\n word = word[:-2]\n rv = rv[:-2]\n\n elif r2.endswith(suffix):\n step1_success = True\n if suffix in (\n \"adora\",\n \"ador\",\n \"acion\",\n \"adoras\",\n \"adores\",\n \"aciones\",\n \"ante\",\n \"antes\",\n \"ancia\",\n \"ancias\",\n ):\n word = word[: -len(suffix)]\n r2 = r2[: -len(suffix)]\n rv = rv[: -len(suffix)]\n\n if r2.endswith(\"ic\"):\n word = word[:-2]\n rv = rv[:-2]\n\n elif suffix in (\"logia\", \"logias\"):\n word = suffix_replace(word, suffix, \"log\")\n rv = suffix_replace(rv, suffix, \"log\")\n\n elif suffix in (\"ucion\", \"uciones\"):\n word = suffix_replace(word, suffix, \"u\")\n rv = suffix_replace(rv, suffix, \"u\")\n\n elif suffix in (\"encia\", \"encias\"):\n word = suffix_replace(word, suffix, \"ente\")\n rv = suffix_replace(rv, suffix, \"ente\")\n\n elif suffix == \"mente\":\n word = word[: -len(suffix)]\n r2 = r2[: -len(suffix)]\n rv = rv[: -len(suffix)]\n\n if r2.endswith((\"ante\", \"able\", \"ible\")):\n word = word[:-4]\n rv = rv[:-4]\n\n elif suffix in (\"idad\", \"idades\"):\n word = word[: -len(suffix)]\n r2 = r2[: -len(suffix)]\n rv = rv[: -len(suffix)]\n\n for pre_suff in (\"abil\", \"ic\", \"iv\"):\n if r2.endswith(pre_suff):\n word = word[: -len(pre_suff)]\n rv = rv[: -len(pre_suff)]\n\n elif suffix in (\"ivo\", \"iva\", \"ivos\", \"ivas\"):\n word = word[: -len(suffix)]\n r2 = r2[: -len(suffix)]\n rv = rv[: -len(suffix)]\n if r2.endswith(\"at\"):\n word = word[:-2]\n rv = rv[:-2]\n else:\n word = word[: -len(suffix)]\n rv = rv[: -len(suffix)]\n break\n\n # STEP 2a: Verb suffixes beginning 'y'\n if not step1_success:\n for suffix in self.__step2a_suffixes:\n if rv.endswith(suffix) and word[-len(suffix) - 1 : -len(suffix)] == \"u\":\n word = word[: -len(suffix)]\n rv = rv[: -len(suffix)]\n break\n\n # STEP 2b: Other verb suffixes\n for suffix in self.__step2b_suffixes:\n if rv.endswith(suffix):\n word = word[: -len(suffix)]\n rv = rv[: -len(suffix)]\n if suffix in (\"en\", \"es\", \"eis\", \"emos\"):\n if word.endswith(\"gu\"):\n word = word[:-1]\n\n if rv.endswith(\"gu\"):\n rv = rv[:-1]\n break\n\n # STEP 3: Residual suffix\n for suffix in self.__step3_suffixes:\n if rv.endswith(suffix):\n word = word[: -len(suffix)]\n if suffix in (\"e\", \"\\xE9\"):\n rv = rv[: -len(suffix)]\n\n if word[-2:] == \"gu\" and rv.endswith(\"u\"):\n word = word[:-1]\n break\n\n word = self.__replace_accented(word)\n\n return word", "def get_lemma(word):\n return stemmer.stem(word)", "def __isVerb__(self, word):\n self.verbs = ('go', 'stop', 'kill', 'eat')\n for verb in self.verbs:\n if verb == word:\n return ('verb', word), True\n return None, False", "def hey(self, sentence=\"\"):\n if sentence == \"\" or sentence.replace(\" \", \"\") == \"\":\n return \"Fine. Be that way!\"\n if sentence.isupper():\n return \"Woah, chill out!\"\n if sentence[-1] == \"?\":\n return \"Sure.\"\n return \"Whatever.\"", "def abbreviation(self):\n\n return self._abbreviation", "def usedWord(afz, word, output=True):\n count = 0\n for msg in msgs:\n if msg.afz == afz:\n if word.lower() in msg.msg.lower():\n count = count + 1\n print afz, 'heeft', count, 'keer het woord', word, 'gebruikt.'", "def normalize_word(self, word, treebank_tag):\n wordnet_pos, part_of_speech = self.get_wordnet_pos(treebank_tag)\n\n if wordnet_pos == wordnet.NOUN and part_of_speech == 'proper':\n return word, 'proper_noun'\n\n lemword = self.wordnetlemmatize.lemmatize(word, wordnet_pos)\n return self.stemmer.stem(lemword), part_of_speech", "def _parse_abbreviation(self, cell_content):\n span = cell_content.find(\"span\")\n full = span.attrs[\"title\"].strip()\n abbrv = span.text.strip()\n return abbrv, full", "def a_or_an(value):\n # TODO: handle confusing things like \"an hour\" or \"a unicycle\"\n vowel_sounds = [\"a\", \"e\", \"i\", \"o\", \"u\"]\n if value[0].lower() in vowel_sounds:\n return \"an\"\n else:\n return \"a\"", "def form_present_verb(word, number, person):\n assert word\n assert isinstance(word, basestring)\n if _is_first_person(person) or _is_second_person(person):\n return word\n elif _is_third_person(person):\n if _is_singular(number):\n return pluralize(word)\n if _is_dual(number) or _is_plural(number): \n return word\n return None", "def deabbreviate(self, st):\n\t\tabbrs = {'gws': 'greater western sydney giants',\n\t\t\t\t 'gwsg': 'greater western sydney giants',\n\t\t\t\t 'afl': 'australian football league',\n\t\t\t\t 'nrc': 'national rugby championship',\n\t\t\t\t 'nrl': 'national rugby league',\n\t\t\t\t 'syd': 'sydney',\n\t\t\t\t 'mel': 'melbourne',\n\t\t\t\t 'melb': 'melbourne',\n\t\t\t\t 'bris': 'brisbane',\n\t\t\t\t 'brisb': 'brisbane',\n\t\t\t\t 'gc': 'gold coast',\n\t\t\t\t 'adel': 'adelaide',\n\t\t\t\t 'canb': 'canberra',\n\t\t\t\t 'mt': 'mount',\n\t\t\t\t 'utd': 'united',\n\t\t\t\t 'cty': 'city',\n\t\t\t\t 'football club': 'fc',\n\t\t\t\t 'snr': 'senior',\n\t\t\t\t 'jr': 'junion',\n\t\t\t\t 'nsw': 'new south wales' ,\n\t\t\t\t 'vic': 'victoria',\n\t\t\t\t 'tas' : 'tasmania',\n\t\t\t\t 'sa': 'south australia',\n\t\t\t\t 'wa': 'western australia',\n\t\t\t\t 'act': 'australian capital territory',\n\t\t\t\t 'nt': 'northern territory',\n\t\t\t\t 'qld': 'queensland',\n\t\t\t\t 'champs': 'championships', \n\t\t\t\t 'champ': 'championship', \n\t\t\t\t 'soc': 'society',\n\t\t\t\t 'ent': 'entertainment',\n\t\t\t\t 'intl': 'international', \n\t\t\t\t 'int': 'international', \n\t\t\t\t 'aust': 'australian'}\n\n\t\t# first replace full state names by abbreviations;\n\t\tfor ab in abbrs:\n\t\t\tst = re.sub(r'\\b' + ab + r'\\b', abbrs[ab], st)\n\n\t\treturn st", "def replace_word_candidate(self, word):\n capital_flag = word[0].isupper()\n word = word.lower()\n if capital_flag and word in self.teencode_dict:\n return self.replace_teencode(word).capitalize()\n elif word in self.teencode_dict:\n return self.replace_teencode(word)\n\n for couple in self.word_couples:\n for i in range(2):\n if couple[i] == word:\n if i == 0:\n if capital_flag:\n return couple[1].capitalize()\n else:\n return couple[1]\n else:\n if capital_flag:\n return couple[0].capitalize()\n else:\n return couple[0]", "def guess_article(item_name):\n return 'an' if item_name[0].lower() in 'aeiouy' else 'a'", "def just_do_it(text):\n from string import capwords\n return capwords(text)", "def convert_single_word_into_plural_form(word):\n\n if word.endswith('y'):\n return word[:-1] + 'ies'\n elif word[-1] in 'sx' or word[-2:] in ['sh', 'ch']:\n return word + 'es'\n elif word.endswith('an'):\n return word[:-2] + 'en'\n else:\n return word + 's'", "def word_tag(self, word):\n if word[1] in (\"NN\", \"NNS\", \"NNP\", \"NNPS\"):\n return _wordnet.NOUN\n if word[1] in (\"JJ\", \"JJR\", \"JJS\"):\n return _wordnet.ADJ\n if word[1] in (\"VB\", \"VBD\", \"VBG\", \"VBN\", \"VBP\", \"VBZ\"):\n return _wordnet.VERB\n if word[1] in (\"RB\", \"RBR\", \"RBS\"):\n return _wordnet.ADV\n\n return None", "def get_token(word, flag):\n if flag == 1:\n return \"_RARE_\"\n elif flag == 2:\n if bool(re.search(r'\\d', word)):\n return \"AlphaNum\"\n else:\n return \"oThEr\"\n elif flag == 3:\n if word[-3:] == \"ing\":\n return \"enDiNg\"\n else:\n return \"oThEr\"", "def getType(word):\n rules = {\n 'direction': ['north', 'south', 'east', 'west', 'down', 'up', 'left', 'right', 'back'],\n 'verb': ['go', 'stop', 'kill', 'eat'],\n 'stop': ['the', 'in', 'of', 'from', 'at', 'it'],\n 'noun': ['door', 'bear', 'princess', 'cabinet']\n }\n if isNumber(word):\n return 'number'\n\n for rule, rule_list in rules.items():\n if word in rule_list:\n return rule\n\n return 'error'", "def check_word_capitalization(word):\n return_value = False\n if (len(word) > 1):\n return_value = True if (word[0].isupper() and word[1].islower()) else False\n return return_value", "def isDisambiguatedByNextVerb(self, word):\n\t\treturn 'verb' in disambig_const.DISAMBIGUATATION_TABLE.get(word, {});", "def replace_word(word, is_first, vocab, minimal_fequency):\n ### YOUR CODE HERE\n if word.isdigit():\n if len(word) == 2:\n return 'twoDigitNum'\n elif len(word) == 4:\n return 'fourDigitNum'\n else:\n return 'othernum'\n elif contains_digit(word):\n if contains_alpha(word):\n return 'containsDigitAndAlpha'\n elif '-' in word:\n return 'containsDigitAndDash'\n elif '/' in word or '\\\\' in word:\n return 'containsDigitAndSlash'\n elif '.' in word:\n return 'containsDigitAndPeriod'\n if word.isalpha() and word.isupper():\n return 'allCaps'\n elif CAP_PERIOD_PATTERN.match(word):\n return 'capPeriod'\n if is_first and vocab.get(word.lower(), 0) >= minimal_fequency:\n return word.lower()\n if not is_first and word[0].isupper():\n return 'initCap'\n if word.isalpha():\n for suffix in SUFFIXES:\n if word.endswith(suffix):\n return 'wordSuffix' + suffix\n if word.isalpha():\n for prefix in PREFIXES:\n if word.startswith(prefix):\n return prefix + 'WordPrefix'\n if '-' in word:\n return 'withDash'\n elif word.isalpha() and word.lower() == word:\n return 'lowercase'\n ### END YOUR CODE\n return UNKNOWN_WORD", "def isUnique(self, word):\n if len(word) < 3:\n abbrev = word\n else:\n abbrev = word[0] + str(len(word) - 2) + word[-1]\n if not abbrev in self.abbrev_dict:\n return True\n elif word in self.abbrev_dict[abbrev] and len(self.abbrev_dict[abbrev]) == 1:\n return True\n else:\n return False", "def guess_word(self, prefix):\n prefix = prefix.lower()\n current = self.search_prefix(prefix)\n if current:\n print(\"You typed: \" + '\"' + prefix + '\"')\n if current.get_end():\n print('\"' + prefix + '\" is a word, but you could have also been typing out:')\n else:\n print('\"' + prefix + '\" is not a word, perhaps you were typing out:')\n library = [prefix + word for word in self.get_library(current, library = [])]\n for word in library:\n print(word)\n return library # list includes empty string if its already a word\n print(\"I'm not quite sure what you meant by \" + '\"' + prefix + '\"...')\n return []", "def _step1b(self, word):\n # this NLTK-only block extends the original algorithm, so that\n # 'spied'->'spi' but 'died'->'die' etc\n if self.mode == self.NLTK_EXTENSIONS:\n if word.endswith(\"ied\"):\n if len(word) == 4:\n return self._replace_suffix(word, \"ied\", \"ie\")\n else:\n return self._replace_suffix(word, \"ied\", \"i\")\n\n # (m>0) EED -> EE\n if word.endswith(\"eed\"):\n stem = self._replace_suffix(word, \"eed\", \"\")\n if self._measure(stem) > 0:\n return stem + \"ee\"\n else:\n return word\n\n rule_2_or_3_succeeded = False\n\n for suffix in [\"ed\", \"ing\"]:\n if word.endswith(suffix):\n intermediate_stem = self._replace_suffix(word, suffix, \"\")\n if self._contains_vowel(intermediate_stem):\n rule_2_or_3_succeeded = True\n break\n\n if not rule_2_or_3_succeeded:\n return word\n\n return self._apply_rule_list(\n intermediate_stem,\n [\n (\"at\", \"ate\", None), # AT -> ATE\n (\"bl\", \"ble\", None), # BL -> BLE\n (\"iz\", \"ize\", None), # IZ -> IZE\n # (*d and not (*L or *S or *Z))\n # -> single letter\n (\n \"*d\",\n intermediate_stem[-1],\n lambda stem: intermediate_stem[-1] not in (\"l\", \"s\", \"z\"),\n ),\n # (m=1 and *o) -> E\n (\n \"\",\n \"e\",\n lambda stem: (self._measure(stem) == 1 and self._ends_cvc(stem)),\n ),\n ],\n )", "def keyword_from_meaning(name):\n # Try to adhere to keyword scheme in DICOM (CP850)\n\n # singular/plural alternative forms are made plural\n # e.g., “Physician(s) of Record” becomes “PhysiciansOfRecord”\n name = name.replace(\"(s)\", \"s\")\n\n # “Patient’s Name” -> “PatientName”\n # “Operators’ Name” -> “OperatorsName”\n name = name.replace(\"’s \", \" \")\n name = name.replace(\"'s \", \" \")\n name = name.replace(\"s’ \", \"s \")\n name = name.replace(\"s' \", \"s \")\n\n # Mathematical symbols\n name = name.replace(\"%\", \" Percent \")\n name = name.replace(\">\", \" Greater Than \")\n name = name.replace(\"=\", \" Equals \")\n name = name.replace(\"<\", \" Lesser Than \")\n\n name = re.sub(r\"([0-9]+)\\.([0-9]+)\", \"\\\\1 Point \\\\2\", name)\n name = re.sub(r\"\\s([0-9.]+)-([0-9.]+)\\s\", \" \\\\1 To \\\\2 \", name)\n\n name = re.sub(r\"([0-9]+)day\", \"\\\\1 Day\", name)\n name = re.sub(r\"([0-9]+)y\", \"\\\\1 Years\", name)\n\n # Remove category modifiers, such as \"(specimen)\", \"(procedure)\",\n # \"(body structure)\", etc.\n name = re.sub(r\"^(.+) \\([a-z ]+\\)$\", \"\\\\1\", name)\n\n name = camel_case(name.strip())\n\n # Python variables must not begin with a number.\n if re.match(r\"[0-9]\", name):\n name = \"_\" + name\n\n return name", "def getWord(self,):\n\t\treturn self.word;", "def guessed_word(self): # helper function to display_correct_guess()\n if self.correct_guess():\n if self.display_word.replace(' ', '') == self.chosen_word:\n self.display_word = self.chosen_word # process will continue until it reaches ***\n return True, self.display_word\n else:\n return False", "def __check_word__(self, word):\n self.directionValue, self.isDirection = self.__isDirection__(word.lower())\n self.verbValue, self.isVerb = self.__isVerb__(word.lower())\n self.stopValue, self.isStop = self.__isStopWord__(word.lower())\n self.nounValue, self.isNoun = self.__isNoun__(word.lower())\n self.numberValue, self.isNumber = self.__isNumber__(word.lower())\n\n if self.isDirection:\n return self.directionValue\n elif self.isVerb:\n return self.verbValue\n elif self.isStop:\n return self.stopValue\n elif self.isNoun:\n return self.nounValue\n elif self.isNumber:\n return self.numberValue\n else:\n return ('error', word)", "def is_cap_word(self, word):\n try:\n return word[0].isupper()\n except:\n return False", "def get_word_probability(self, label, term):\n\n if 'sod' in label:\n return self.cond_prob_sod[term]\n elif 'pop' in label:\n return self.cond_prob_pop[term]\n else:\n print(\"Just run the doctest Dev\")\n \n pass", "def normalize_word (word):\n return st.stem(word.strip ().lower ())", "def test_word_info(self):\n word = \"vitality\"\n rv = self.wordInfo(input_word=word)\n expected_output = {\n word: {\n \"frequency\": \"975\",\n \"defination\": \"{'Noun': ['an energetic style', 'a healthy capacity for vigorous activity', '(biology', 'not physical or chemical', 'the property of being able to survive and grow']}\",\n \"antonyms\": \"['enervation', 'inactivity', 'lethargy', 'weakness', 'lack']\",\n \"examples\": \"{1: 'And finally, both Lord Robertson and Secretary of State Powell pointed to what they called the vitality and the relevance of NATO, and said any damage done to the reputation of NATO over the last couple weeks can quite, in their words, be easily overcome.', 2: \\\"Professor Huxley himself has told us that he lived in 'the hope and the faith that in course of time we shall see our way from the constituents of the protoplasm to its properties,' _i. e._ from carbonic acid, water, and ammonia to that mysterious thing which we call vitality or life -- from the molecular motion of the brain to Socratic wisdom,\\\", 3: 'The strongest, the most amply endowed with what we call vitality or power to live, win.', 4: 'But the thought that it is mechanics and chemistry applied by something of which they as such, form no part, some agent or principle which we call vitality, is welcome to us.', 5: '\\\"The Indian savages,\\\" said Margrave, sullenly, \\\"have not a health as perfect as mine, and in what you call vitality -- the blissful consciousness of life -- they are as sticks and stones compared to me.\\\"'}\",\n \"pronounciation\": \"V AY0 T AE1 L AH0 T IY0\",\n \"synonyms\": \"['vigor', 'continuity', 'spunk', 'strength', 'verve']\"\n }\n }\n response_data = json.loads(rv.get_data(as_text=True))\n\n self.assertEquals(rv.status_code, 200)\n self.assertEquals(response_data[word][\"defination\"], expected_output[word][\"defination\"])\n self.assertEquals(response_data[word][\"antonyms\"], expected_output[word][\"antonyms\"])\n self.assertEquals(response_data[word][\"examples\"], expected_output[word][\"examples\"])\n self.assertEquals(response_data[word][\"frequency\"], expected_output[word][\"frequency\"])\n self.assertEquals(response_data[word][\"pronounciation\"], expected_output[word][\"pronounciation\"])\n self.assertEquals(response_data[word][\"synonyms\"], expected_output[word][\"synonyms\"])", "def morph_noun(word, number, a_an, feature):\n word = morph_number(word, number)\n if not (number in ['first', 'second']) and word[-1] == 's':\n return mark_noun_as_plural(word)\n else:\n return word\n\n if a_an == 'an':\n return mark_noun_as_an(word)\n else:\n return word", "def med_in_hindi(word):\r\n\treturn int(med(correction(word),word))", "def get_word(w):\n return ''.join(c for c in w if c.isalpha()).lower()", "def abbreviate(x: str) -> str:\n i = 0\n abbreviation: str = \"\"\n while i < len(x):\n if x[i].isupper():\n abbreviation += x[i]\n i += 1\n return abbreviation", "def map_word(self, word):\n for invariance in self.invariances:\n word = invariance.map_word(word)\n return word" ]
[ "0.6904979", "0.68378323", "0.6644737", "0.6541667", "0.6537515", "0.6440209", "0.6420517", "0.6390089", "0.63880235", "0.6369538", "0.63481516", "0.63338417", "0.6322253", "0.6277778", "0.62644935", "0.6260187", "0.62546563", "0.62457216", "0.6238056", "0.6238056", "0.6198607", "0.61820537", "0.61625916", "0.6147041", "0.6146263", "0.6145105", "0.61258423", "0.6106864", "0.6077692", "0.6065827", "0.60480535", "0.60479647", "0.60433114", "0.60301346", "0.60254735", "0.60036254", "0.6000636", "0.59983194", "0.59896314", "0.5987526", "0.59861356", "0.59831095", "0.5970029", "0.5967068", "0.5963", "0.5943353", "0.5908138", "0.5885942", "0.58848673", "0.5877995", "0.58575135", "0.585694", "0.5856682", "0.58510935", "0.5843788", "0.58410114", "0.584004", "0.5832579", "0.5831531", "0.5831312", "0.5831232", "0.5825457", "0.58101565", "0.580916", "0.5799324", "0.5791568", "0.57841176", "0.5762731", "0.5753913", "0.5748402", "0.57481796", "0.5739254", "0.5735073", "0.57318044", "0.57286024", "0.57279074", "0.57087654", "0.5702749", "0.5694456", "0.5693287", "0.5692739", "0.5689398", "0.5687838", "0.5685031", "0.5675526", "0.5672941", "0.56652874", "0.5664375", "0.56580323", "0.56563973", "0.5655768", "0.5653358", "0.5652246", "0.5651475", "0.565108", "0.564712", "0.56465846", "0.5639597", "0.56356245", "0.5631102" ]
0.66452616
2
Create an instance of all possible analysis. If you're not able to create it... you're not able to use it.
def build_all_analysis(self, matrix_handler, trajectory_handler): distance_matrix = matrix_handler.distance_matrix self.all_possible_analysis = {} # Pure queries self.all_possible_analysis["Details"] = Analysis("Details", self.analysis_function_details) self.all_possible_analysis["NumClusters"] = Analysis("Number of clusters", self.analysis_function_num_clusters) self.all_possible_analysis["NumClusteredElems"] = Analysis("Number of clustered elements", self.analysis_function_total_elements) self.all_possible_analysis["MeanClusterSize"] = Analysis("Mean cluster size", self.analysis_function_mean_cluster_size) self.all_possible_analysis["PercentInTop4"] = Analysis("Percent in top 4 clusters", self.analysis_function_top_4) self.all_possible_analysis["PercentInTop"] = Analysis("Percent in top cluster", self.analysis_function_top_percent) self.all_possible_analysis["ClustersTo90"] = Analysis("Clusters to 90", self.analysis_function_num_clusters_to_percent, 90) self.all_possible_analysis["NoiseLevel"] = Analysis("Noise level", self.analysis_function_noise_level, distance_matrix.row_length) # Evaluators self.all_possible_analysis["MirrorCohesion"] = Analysis("MirrorCohesion", self.evaluate_with_calculator, {"class":MirrorCohesionCalculator,"matrix":distance_matrix}) self.all_possible_analysis["Cohesion"] = Analysis("Cohesion", self.evaluate_with_calculator, {"class":CohesionCalculator,"matrix":distance_matrix}) self.all_possible_analysis["Separation"] = Analysis("Separation", self.evaluate_with_calculator, {"class":SeparationCalculator,"matrix":distance_matrix}) self.all_possible_analysis["MinimumMeanSeparation"] = Analysis("MinimumMeanSeparation", self.evaluate_with_calculator, {"class":MeanMinimumDistanceCalculator,"matrix":distance_matrix}) self.all_possible_analysis["Silhouette"] = Analysis("Silhouette", self.evaluate_with_calculator, {"class":SilhouetteCoefficientCalculator,"matrix":distance_matrix}) self.all_possible_analysis["Calinski-Harabasz"] = Analysis("Calinski-Harabasz", self.evaluate_with_calculator, {"class":CalinskiHarabaszCalculator,"matrix":distance_matrix}) self.all_possible_analysis["Dunn"] = Analysis("Dunn", self.evaluate_with_calculator, {"class":DunnCalculator,"matrix":distance_matrix}) self.all_possible_analysis["Davies-Bouldin"] = Analysis("Davies-Bouldin", self.evaluate_with_calculator, {"class":DaviesBouldinCalculator,"matrix":distance_matrix}) self.all_possible_analysis["GaussianSeparation"] = Analysis("GaussianSeparation", self.evaluate_with_calculator, {"class":GaussianSeparationCalculator,"matrix":distance_matrix}) self.all_possible_analysis["Compactness"] = Analysis("Compactness", self.evaluate_with_calculator, {"class":CompactnessCalculator,"matrix":distance_matrix}) # Cython self.all_possible_analysis["CythonMirrorCohesion"] = Analysis("CythonMirrorCohesion", self.evaluate_with_calculator, {"class":CythonMirrorCohesionCalculator,"matrix":distance_matrix}) self.all_possible_analysis["CythonMinimumMeanSeparation"] = Analysis("CythonMinimumMeanSeparation", self.evaluate_with_calculator, {"class":CythonMeanMinimumDistanceCalculator,"matrix":distance_matrix}) self.all_possible_analysis["CythonSilhouette"] = Analysis("CythonSilhouette", self.evaluate_with_calculator, {"class":CythonSilhouetteCoefficientCalculator,"matrix":distance_matrix}) # Graph self.all_possible_analysis["RatioCut"] = Analysis("RatioCut", self.evaluate_with_calculator, {"class":RatioCut,"matrix":distance_matrix}) self.all_possible_analysis["NCut"] = Analysis("NCut", self.evaluate_with_calculator, {"class":NCut,"matrix":distance_matrix}) self.all_possible_analysis["NormNCut"] = Analysis("NormNCut", self.analysis_function_norm_n_cut,distance_matrix) self.all_possible_analysis["MinMaxCut"] = Analysis("MinMaxCut", self.evaluate_with_calculator, {"class":MinMaxCut,"matrix":distance_matrix}) # Cython & Graph self.all_possible_analysis["CythonNormNCut"] = Analysis("CythonNormNCut", self.analysis_function_cython_norm_n_cut,distance_matrix) # PCA self.all_possible_analysis["PCAanalysis"] = Analysis("PCAanalysis", self.analysis_function_pca, trajectory_handler)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_analysis_tools(self):\r\n raise NotImplementedError()", "def analysis_setup(self):\n pass", "def init():\n # analyzer es utilizado para interactuar con el modelo\n analyzer = model.newAnalyzer()\n return analyzer", "def analysis(self, checkid):\r\n return analysis.Analysis(self, checkid)", "def __init__(self, samples, analysis):\r\n self.samples = samples\r\n self.analysis = analysis", "def newAnalyzer():\n analyzer = {'crimes': None,\n 'dateIndex': None,\n 'autors': None,\n 'instrumentalness': None,\n 'tempo':None,\n 'liveness':None,\n 'speechiness':None,\n 'danceability':None,\n 'valence':None,\n 'loudness':None,\n 'acousticness':None,\n 'energy':None,\n 'generos':None\n }\n\n analyzer['crimes'] = lt.newList('ARRAY_LIST', compareIds)\n analyzer['ids'] = lt.newList('ARRAY_LIST', compareIds)\n analyzer['dateIndex'] = om.newMap(omaptype='RBT',\n comparefunction=compareDates)\n\n analyzer['autors'] = om.newMap(omaptype='RBT',\n comparefunction=compareAUTOR)\n\n analyzer['instrumentalness'] = om.newMap(omaptype='RBT',\n comparefunction=compareInt)\n analyzer['tempo'] = om.newMap(omaptype='RBT',\n comparefunction=compareInt)\n analyzer['liveness'] = om.newMap(omaptype='RBT',\n comparefunction=compareInt)\n analyzer['speechiness'] = om.newMap(omaptype='RBT',\n comparefunction=compareInt)\n analyzer['danceability'] = om.newMap(omaptype='RBT',\n comparefunction=compareInt) \n analyzer['valence'] = om.newMap(omaptype='RBT',\n comparefunction=compareInt)\n analyzer['loudness'] = om.newMap(omaptype='RBT',\n comparefunction=compareInt)\n analyzer['acousticness'] = om.newMap(omaptype='RBT',\n comparefunction=compareInt)\n analyzer['energy'] = om.newMap(omaptype='RBT',\n comparefunction=compareInt) \n\n analyzer['generos']= m.newMap(11,\n maptype='CHAINING',\n loadfactor=4.0)\n \n return analyzer", "def __init__(self, autosub=False):\n self.G = nx.DiGraph()\n self.autosub = autosub\n \"\"\"\n Graph object of this analyzer.\n It is actually a networkx directed graph object(DiGraph), so you can apply all operations available to DiGraph object using networkx.\n \"\"\"\n self.entityList = [dict() for x in range(len(NEList))]\n \"\"\"\n List of entities appeared during this analysis round.\n \"\"\"\n self.proList = list()\n \"\"\"\n List of pronouns appeared during this analysis round.\n \"\"\"\n self.pos = 0\n \"\"\"\n Current position of the analyzer.\n \"\"\"\n self.proc = Subprocess('cabocha -f1')\n \"\"\"\n Communicator to backend for KnowledgeAnalyzer.\n \"\"\"", "def newAnalyzer():\n analyzer = {'tracks': None,\n 'songs': None,\n 'artists': None,\n 'char': None,\n 'char2': None,\n 'char3': None,\n }\n\n analyzer['tracks'] = lt.newList('SINGLE_LINKED', compareIds)\n analyzer['songs'] = om.newMap(omaptype='RBT',\n comparefunction=compareDates)\n analyzer['artists'] = om.newMap(omaptype='RBT',\n comparefunction=compareDates)\n analyzer['char'] = om.newMap(omaptype='RBT',\n comparefunction=compareDates)\n analyzer['char2'] = om.newMap(omaptype='RBT',\n comparefunction=compareDates)\n analyzer['char3'] = om.newMap(omaptype='RBT',\n comparefunction=compareDates)\n analyzer['char4'] = mp.newMap(numelements=50,\n maptype='PROBING',\n comparefunction=compareValue)\n \n return analyzer", "def __init__(self, input=None):\r\n BaseAnalyzer.__init__(self, input)", "def __init__(self, input=None):\r\n BaseAnalyzer.__init__(self, input)", "def __init__(self, analyzers: Iterable[Type[Analyzer]], model_repository: ModelRepository,\n data_service: DataService):\n self._model_repository = model_repository\n analyzers = [(a.__name__, a) for a in analyzers]\n analyzers.sort()\n self._analyzers = [a[1] for a in analyzers]\n self._data_service = data_service", "def analyse(self):\n pass", "def initAnalyzer():\n return controller.initAnalyzer()", "def analysis(self):\r\n return analysis.Analysis(self.parent, self.object_id)", "def __init__(self):\n super(TestAnalyzer, self).__init__()\n self._results = []", "def __init__(self, out_dir, analysis_out_dir, evaluation_run_name,\n evaluation_name):\n if out_dir is None:\n out_dir = os.getcwd()\n self.out_dir = out_dir\n\n # Copy the analysis results to the report output directory, so that the HTML\n # report can be correctly rendered even if we move the csv files, plots,\n # etc.\n if out_dir != analysis_out_dir:\n analysis_file_dirs = evaluator.load_directory_tree(\n out_dir=analysis_out_dir,\n run_name=evaluation_run_name,\n evaluation_name=evaluation_name)\n shutil.copytree(\n analysis_file_dirs[evaluator.KEY_RUN_DIR],\n os.path.join(out_dir, evaluation_run_name))\n\n self.analysis_results = analyzer.get_analysis_results(\n out_dir, evaluation_run_name, evaluation_name)\n\n self.analysis_results[KEY_NUM_ESTIMABLE_SETS_STATS_DF] = (\n ReportGenerator.add_parsed_sketch_estimator_name_cols(\n self.analysis_results[KEY_NUM_ESTIMABLE_SETS_STATS_DF],\n analyzer.SKETCH_ESTIMATOR_NAME))\n\n self.analysis_type = None", "def analyze(self):\n dataset = self.config.dataset\n class_config = dataset.class_config\n\n scene_id_to_cfg = {s.id: s for s in dataset.all_scenes}\n\n @lru_cache(maxsize=len(dataset.all_scenes))\n def build_scene(scene_id: str) -> Scene:\n cfg = scene_id_to_cfg[scene_id]\n scene = cfg.build(\n class_config, self.tmp_dir, use_transformers=False)\n return scene\n\n # build and run each AnalyzerConfig for each scene group\n for a in self.config.analyzers:\n for group_name, group_ids in dataset.scene_groups.items():\n if len(group_ids) == 0:\n log.info(f'Skipping scene group \"{group_name}\". '\n 'Empty scene group.')\n continue\n group_scenes = (build_scene(id) for id in group_ids)\n analyzer = a.build(scene_group=(group_name, group_scenes))\n\n log.info(f'Running {type(analyzer).__name__} on '\n f'scene group \"{group_name}\"...')\n analyzer.process(group_scenes, self.tmp_dir)", "def _build_analyzer():\n analyzer = {}\n for e in LOSSLESS:\n analyzer[e] = lambda d, _ext, name: d.lossless.append(name)\n for e in COMPRESSED:\n analyzer[e] = lambda d, _ext, name: d.compressed.append(name)\n for e in IMAGES:\n analyzer[e] = lambda d, _ext, name: d.images.append(name)\n for e in VIDEOS:\n analyzer[e] = lambda d, _ext, name: d.videos.append(name)\n\n def _increment_ignored(d, _ext, _name):\n d.ignored += 1 # Can't use assignment in lambda\n\n for e in IGNORE:\n analyzer[e] = _increment_ignored\n analyzer['cue'] = lambda d, _, name: d.cue.append(name)\n\n return analyzer", "def createAnalysisSampleVisitor(config, cuts):\n\n # TODO: warn user if this function is called but 'cutbased' is false? (or no jobs yet booked - must do this first if a cutbased analysis is desired)\n\n # read the channel definitions\n channels = config.getTagVString(\"channels\")\n\n CLI = config.getFolder(\"CLI+\")\n # flag indicating to run a robust analysis\n robust = CLI.getTagBoolDefault(\"robust\",False)\n # flag indicating to run a dummy analysis\n dummy = CLI.getTagBoolDefault(\"dummy\",False)\n\n if not config.getTagBoolDefault(\"useMultiChannelVisitor\",False) or robust or dummy:\n # using regular analysis sample visitor (default)\n visitor = QFramework.TQAnalysisSampleVisitor()\n visitor.setVerbose(True)\n visitor.setBaseCut(cuts)\n visitor.setPrettyPrint(config.getTagBoolDefault(\"prettyPrint\",True))\n visitor.setLineUpdates(config.getTagBoolDefault(\"lineUpdates\",True))\n visitor.setTagDouble(\"progressInterval\",config.getTagDoubleDefault(\"progressInterval\",5.))\n else:\n # using fast MultiChannel analysis sample visitor\n visitor = QFramework.TQMultiChannelAnalysisSampleVisitor()\n visitor.setVerbose(True)\n visitor.setPrettyPrint(config.getTagBoolDefault(\"prettyPrint\",True))\n visitor.setLineUpdates(config.getTagBoolDefault(\"lineUpdates\",True))\n visitor.setTagDouble(\"progressInterval\",config.getTagDoubleDefault(\"progressInterval\",5.))\n\n runtime = config.getFolder(\"runtime+\")\n # TODO: add some protection against not finding the mcasvchannels in the runtime config for whatever reason\n mcasvchannels = runtime.getTagVStandardString(\"mcasvchannels\")\n\n # TODO: cutlist was defined in runAnalysis.py, but aparently not used\n #cutlist = []\n for channel in mcasvchannels:\n cut = cuts.getClone()\n #cutlist.append(cut)\n visitor.addChannel(channel,cut)\n # TODO: used previously just as a list for cloneObservablesSmart\n # mcvchannels.append(channel)\n if config.getTagBoolDefault(\"showChannels\",False):\n visitor.printChannels()\n\n # TODO: SmartObservableCloning not yet migrated to CAFExample (or fully implemented?) - Initial author = Carsten\n # Safe for this to go here? i.e. only for MCASV and before analysis algorithms are attached\n cloneObservablesSmart = False\n if config.getTagBoolDefault(\"reduceMCVObservables\",False):\n try:\n from CAFExample.SmartObservableCloning import cloneSetSmart\n cloneObservablesSmart = True\n except ImportError:\n cloneObservablesSmart = False\n QFramework.ERROR(\"smart observable cloning unavailable, skipping\")\n if cloneObservablesSmart:\n for channel in mcasvchannels:\n QFramework.TQObservable.getManager().cloneActiveSet(channel)\n\n return visitor", "def __init__(self):\n Sampler.__init__(self)\n self._registeredIdentifiers = set() # tracks job identifiers used for this adaptive sampler and its inheritors\n self._prefixToIdentifiers = {} # tracks the mapping of run prefixes to particular identifiers\n self._inputIdentifiers = {} # identifiers for a single realization\n self._targetEvaluation = None # data object with feedback from sample realizations\n self._solutionExport = None # data object for solution printing\n self._requireSolnExport = False # if this object requires a solution export\n # NOTE TargetEvaluations consider all the Step <Output> DataObjects as candidates, so requiring\n # exactly one TargetEvaluation forces only having one <Output> DataObject in AdaptiveSampling\n # MultiRun Steps. For now, we leave it as \"n\".\n self.addAssemblerObject('TargetEvaluation', InputData.Quantity.one_to_infinity) # Place where realization evaluations go", "def newAnalyzer():\n analyzer = {'accidentes': None,\n 'dateIndex': None\n }\n\n analyzer['accidentes'] = lt.newList('SINGLE_LINKED', compareseverity)\n analyzer['dateIndex'] = om.newMap(omaptype='RBT',\n comparefunction=compareDates)\n return analyzer", "def newAnalyzer():\n analyzer = {'accidents': None,\n 'dateIndex': None,\n 'timeIndex': None\n }\n\n analyzer['accidents'] = lt.newList('ARRAY_LIST', compareIds)\n analyzer['dateIndex'] = om.newMap(omaptype='RBT',\n comparefunction=compareDates)\n analyzer['timeIndex'] = om.newMap(omaptype='RBT',\n comparefunction=compareTimes)\n \n return analyzer", "def __init__(self):\n IContainsAnimals.__init__(self, 15)\n IContainsPlants.__init__(self, 3)\n Identifiable.__init__(self)\n Biome.__init__(self, \"Coastline\")", "def __init__(self):\n self.analysis=''\n self.terms=''\n self.path=''", "def __init__(self):\n self._tool_data = {}\n self.feedback = []\n self.ignored_feedback = []\n self.suppressions = {}\n self.suppressed_labels = {}\n self.hiddens = set()\n self.groups = []\n self.group = None\n self.group_names = {}\n self.hooks = {}\n self.class_hooks = {}\n self.submission = None\n self.format = Formatter()\n self.result = None\n self.resolves = []\n self.overridden_feedbacks = set()\n log.debug(\"New Pedal Report created.\")", "def __init__(self):\n self.train(positivity_files, 0)\n self.train(subjectivity_files, 1)", "def __init__(self):\n self.model_description: Dict[str, Any] = get_model_description()\n self.model_name: str = self.model_description['name']\n self.model_version: str = self.model_description['version']\n\n # Make sure we do not have a trailing slash to muck up processing later.\n self.event_dir: Optional[str] = None\n self.zone_name: Optional[str] = None\n self.fault_time: Optional[str] = None\n\n self.example: Example = None\n self.validator: ExampleValidator = ExampleValidator()\n self.common_features_df: pd.DataFrame = None\n\n self.cavity_onnx_session: rt.InferenceSession = rt.InferenceSession(os.path.join(os.path.dirname(__file__),\n 'model_files',\n 'cavity_model.onnx'))\n self.fault_onnx_session: rt.InferenceSession = rt.InferenceSession(os.path.join(os.path.dirname(__file__),\n 'model_files',\n 'fault_model.onnx'))", "def __init__(self, store_directory):\n # Read in data\n analysis_script_path = os.path.join(store_directory, 'analysis.yaml')\n if not os.path.isfile(analysis_script_path):\n err_msg = 'Cannot find analysis.yaml script in {}'.format(store_directory)\n raise RuntimeError(err_msg)\n with open(analysis_script_path, 'r') as f:\n analysis = yaml.load(f)\n phases = []\n signs = {}\n ncfiles = {}\n for phase, sign in analysis:\n phases.append(phase)\n signs[phase] = sign\n ncfile_path = os.path.join(store_directory, phase + '.nc')\n ncfiles[phase] = nc.Dataset(ncfile_path, 'r')\n self.phases = phases\n self.signs = signs\n self.ncfiles = ncfiles\n self.nphases = len(phases)\n # Assign flags for other sections along with their global variables\n # General Data\n self._general_run = False\n self.iterations = {}\n # Equilibration\n self._equilibration_run = False\n self.u_ns = {}\n self.nequils = {}\n self.g_ts = {}\n self.Neff_maxs = {}\n # Decorrelation break-down\n self._decorrelation_run = False\n # Mixing Run (state)\n self._mixing_run = False\n # Replica mixing\n self._replica_mixing_run = False\n self._free_energy_run = False", "def __init__(self):\n self.libpath = os.sep.join(os.path.abspath(__file__).split(os.sep)[:-1])\n sys.path.append(self.libpath)\n libpath2 = os.sep.join(self.libpath.split(os.sep)[:-1])\n sys.path.append(libpath2)\n # Initialize TCMetaSchema with correct libpath\n TCMetaSchema(self.libpath)\n self.args, self.unknown = IceteaManager._parse_arguments()\n # If called with --clean, clean up logs.\n if self.args.clean:\n _cleanlogs(silent=self.args.silent, log_location=self.args.log)\n\n LogManager.init_base_logging(self.args.log, verbose=self.args.verbose,\n silent=self.args.silent, color=self.args.color,\n no_file=(self.args.list or self.args.listsuites),\n truncate=not self.args.disable_log_truncate)\n\n self.logger = LogManager.get_logger(\"icetea\")\n self.pluginmanager = None\n self.resourceprovider = ResourceProvider(self.args)\n self._init_pluginmanager()\n self.resourceprovider.set_pluginmanager(self.pluginmanager)", "def _generate_trading_instances(self):\n print(\"Creating DataHandler, Strategy, Portfolio, and ExecutionHandler for\")\n\n # Set internal data members equal to the classes we passed in earlier, along with necessary parameters.\n # https://softwareengineering.stackexchange.com/questions/131403/what-is-the-name-of-in-python/131415\n self.data_handler = self.data_handler_class(self.events, self.csv_dir, self.symbol_list)\n self.strategy = self.strategy_class(self.data_handler, self.events)\n self.portfolio = self.portfolio_class(self.data_handler, self.events, self.start_date, self.initial_capital)\n self.execution_handler = self.execution_handler_class(self.events) # The Event Queue sent to ExecutionHandler", "def __init__(self, test):\n self.all_grams = Ngram(self.START_OF_SENTENCE_TOKEN)\n for label in self.label_type_map:\n self.words_labels_counts[label] = {}\n self.words_labels_counts[label][self.UNKNOWN_TOKEN] = 0\n if test:\n self.train(\"train.txt\")\n self.test(\"test.txt\")\n else:\n self.train(\"train_partial.txt\")\n self.validate(\"validation_partial.txt\")", "def __init__(self, **kwargs):\n\n # call base class constructor registering that this tool performs everything.\n Algorithm.__init__(\n self,\n performs_projection = True,\n use_projected_features_for_enrollment = True,\n requires_enroller_training = True\n )", "def __init__(self):\r\n self.filter_p_number = 3 # First one with enough data for statistics\r\n self.prfs_d = extract_settings_elvis()\r\n\r\n ccds = True\r\n filtered = False\r\n scamp = False\r\n\r\n input_df = read_csv('cats/cat_clean_ssos.csv', index_col=0)\r\n filt_cat = self.gets_filtered_catalog() # Gets data from filtered\r\n\r\n if ccds:\r\n cats_d = self.extract_cats()\r\n self.extract_stats_ccds(cats_d, input_df, filt_cat)\r\n elif filtered:\r\n self.extract_stats_filt(filt_cat, input_df)\r\n elif scamp:\r\n pass\r\n # self.extract_stats_scamp(input_df)\r\n else:\r\n pass", "def __init__(self, analysis: Analysis) -> None:\n self.analysis = analysis\n self.output_basename = os.path.join(\"REPORTS\", self.analysis.basename)\n self.only_human = True\n\n self.max_records_str = utils.get_picard_max_records_string(\n self.analysis.parameters[\"picard_max_records\"]\n )\n self.sort_tempdir = os.path.join(\n self.analysis.get_bam_dir(), \"%s_sort_tmp\" % self.analysis.sample\n )", "def __init__(self):\n\n self.read_input_file()\n self.read_simulation_files()", "def __init__(self):\n self.data = None\n self.decisionTree = {}\n self.enClass = 0\n self.nlClass = 0\n self.listAttributes = [\"Contains-het\", \"Contains-de\", \"Contains-een\", \"Contains-en/aan\", \"Contains-ij\", \"wordLength14\",\n \"Contains-a/an\", \"Contains-are/were\", \"Contains-and\", \"Contains-on/to\", \"Contains-the\"]\n self.infoGain = []\n self.entropy = 0", "def __init__(self):\n self._predefined_cluster_topics()\n self._gatherSEs()", "def analysis(self, game_info):\n pass", "def __init__(self, x=None, y=None, analysis=None, error=None):\n logger.info(\"Creating Diva 2D Result object\")\n if isinstance(x, np.ndarray):\n self.x = x\n elif isinstance(x, list):\n self.x = np.array(x)\n else:\n logger.debug(\"X vector not defined. Should be np.ndarray or list\")\n self.x = None\n\n if isinstance(y, np.ndarray):\n self.y = y\n elif isinstance(x, list):\n self.y = np.array(y)\n else:\n logger.debug(\"Y vector not defined. Should be np.ndarray or list\")\n self.y = None\n\n if isinstance(analysis, np.ndarray):\n if analysis.shape[0] == len(x) and analysis.shape[1] == len(y):\n logger.debug('Consistent dimensions for the analysed field')\n self.analysis = analysis\n if isinstance(error, np.ndarray):\n if analysis.shape == error.shape:\n logger.debug('Consistent dimensions for the error field')\n self.error = error\n else:\n logger.error(\"Dimension mismatch\")\n raise Exception(\"Dimension mismatch\")\n else:\n logger.debug(\"Error field not defined\")\n self.error = None\n else:\n logger.error(\"Dimension mismatch\")\n raise Exception(\"Dimension mismatch\")\n else:\n logger.debug(\"Analysed field not defined\")\n self.analysis = None\n self.error = None", "def __init__(self):\n for item in grammar:\n item['matches_compiled'] = {}\n for name,pattern in item['matches'].items():\n item['matches_compiled'][name] = \\\n re.compile(pattern, re.IGNORECASE)\n\n item['semantics_compiled'] = {}\n for name,pattern in item['semantics'].items():\n item['semantics_compiled'][name] = \\\n re.compile(pattern)\n\n if constants.SPELLCHECK:\n self.didyoumean = DidYouMean('en-us', constants.DICT_DIR)", "def __init__(\n self, api,\n ):\n self._api = api\n self._api_response = self._api.find_cases(range=\"all\", sort=[])\n self._all30_dict = {}\n self._all60_dict = {}\n self._all90_dict = {}\n\n self._data_frame_30days = None\n self._data_frame_60days = None\n self._data_frame_90days = None\n self._data_frame_counts = None\n self._dataset = None", "def run_analysis(wf):\n if wf.analysis[\"type\"] == \"one_sample_tests\":\n start_one_sample_tests(wf)\n\n elif wf.analysis[\"type\"] == \"two_sample_tests\":\n start_two_sample_tests(wf)\n\n elif wf.analysis[\"type\"] == \"factorial_tests\":\n start_factorial_tests(wf)\n\n elif wf.analysis[\"type\"] == \"n_sample_tests\":\n start_n_sample_tests(wf)\n\n info(\"> Finished analysis\")", "def initiateAnalysis(self,):\n\n #\n # Imports\n #\n import os\n import sys\n\n #\n # get optional arguments from commandline\n #\n self.getComandLineOptions()\n \n #\n # for logmessages\n #\n tmpLogMessages = ['----------------\\n']\n tmpLogMessage = self.createLogHeader()\n tmpLogMessages.append(tmpLogMessage)\n #print tmpLogMessage\n \n #\n # check analysis path\n #\n if os.path.isdir(self.analysisPath):\n tmpLogMessage = 'WARNING: the analysis path already exists.\\n'\n print tmpLogMessage\n tmpLogMessages.append(tmpLogMessage)\n else:\n tmpLogMessage = 'Creating directory \"'+self.analysisPath+'\".\\n'\n #print tmpLogMessage\n tmpLogMessages.append(tmpLogMessage)\n os.makedirs(self.analysisPath)\n \n #\n # create the logfile\n #\n tmpLogMessages += self.openLogfileConnection()\n \n #\n # write tmpLogMessages to logfile\n #\n SEAseqPipeLine.logfile.write(''.join(tmpLogMessages))\n \n #\n # create the database\n #\n self.database.create()\n \n #\n # add run to runs table\n #\n self.database.addToRunsTable(self.startTimeStr, self.command, self.commandLine, True, MASTER)\n \n return 0", "def __init__(self, *args):\n # The first Analysis object with all dependencies.\n # Any item from the first executable cannot be removed.\n self._main = None\n\n self._dependencies = {}\n\n self._id_to_path = {}\n for _, i, p in args:\n self._id_to_path[os.path.normcase(i)] = p\n\n # Get the longest common path\n common_prefix = os.path.commonprefix([os.path.normcase(os.path.abspath(a.scripts[-1][1])) for a, _, _ in args])\n self._common_prefix = os.path.dirname(common_prefix)\n if self._common_prefix[-1] != os.sep:\n self._common_prefix += os.sep\n logger.info(\"Common prefix: %s\", self._common_prefix)\n\n self._merge_dependencies(args)", "def __init__(self, langName):\n self.langName = langName\n self.readDataSets(langName)\n # self.getVMWEReport()\n self.analyzeSents()\n self.orderParentVMWEs()\n self.getTrainAndTest()\n self.cleanSents()\n self.extractDictionaries()\n self.deleteNonRecognizableMWE()\n printStats(self.trainingSents, 'Train', mweDic=self.mweDictionary, langName=langName, test=False)\n printStats(self.testingSents, 'Test', mweDic=self.mweDictionary, test=True)", "def __init__(self):\n\n self.name = None\n self.summary = None\n self.cases = []", "def __init__(self,classes=['normalizeText','tagger','stem','stopWord','spellChecker']):\n self._support = prebotSupport()\n self._classes = classes\n if(\"tagger\" in self._classes):\n self._tagger = tagger()\n if(\"normalizeText\" in self._classes):\n self._normalize = normalizeText()\n if(\"spellChecker\" in self._classes):\n self._spellChecker = spellChecker()\n if(\"stopWord\" in self._classes):\n self._stopWord = stopWord()\n if(\"stem\" in self._classes):\n self._stem = stemming()", "def do_analysis(ckpt, queries_type, entities_type, request):\n global currently_analyzing, results, d, analysis_user\n try:\n print(\"starting analysis!\")\n if entities_type == \"all\":\n print(\"using all entities detected!\")\n elif entities_type == \"uploaded\":\n print(\"using only entities specified in csv file!\")\n \n currently_analyzing = True\n analysis_user = request.user.username\n results = []\n proj_path = os.path.abspath(os.path.dirname(__file__)).split(\"FYP_Web_App\")[0]\n ckpt = proj_path + \"FewRel/checkpoint/\" + ckpt\n if d is None or d.ckpt_path != ckpt:\n d = DetectionFramework(ckpt_path=ckpt)\n if cancel_flag[0]:\n return\n d.clear_support_queries()\n if len([i for i in os.listdir(\"temp/relation_support_datasets\") if 'csv' in i and request.user.username in i]) == 0:\n raise ValueError(\"Please upload relation support dataset!\")\n \n d.load_support_files(\"temp/relation_support_datasets\", request.user.username)\n if queries_type == \"csv_option\":\n if not os.path.exists(\"temp/queries.csv\"):\n raise ValueError(\"Please upload query CSV dataset!\")\n d.load_queries_csv(\"temp/queries.csv\")\n \n elif queries_type == \"url_option\":\n if not os.path.exists(\"temp/url.txt\"):\n raise ValueError(\"Please specify news article url!\")\n with open(\"temp/url.txt\") as f:\n url = f.read()\n d.load_url(url)\n \n elif queries_type == \"txt_option\":\n d.load_text_files(os.path.abspath(\"temp/text_files\"))\n \n elif queries_type == \"ind_sentence_option\":\n ind_sentence = request.POST.get('ind_sent')\n d.load_ind_sentence(ind_sentence)\n \n elif queries_type == \"html_option\":\n d.load_html_file_queries(os.path.abspath(\"temp/html_files\"))\n \n if entities_type == \"uploaded\":\n d.trim_queries_based_on_entities_file(os.path.abspath(\"temp/entities_csv_file.csv\"))\n\n if cancel_flag[0]:\n return\n d.detect(rt_results=results, cancel_flag=cancel_flag)\n if cancel_flag[0]:\n return\n src=None\n if queries_type == \"csv_option\":\n src = \"queries_csv\"\n elif queries_type == \"txt_option\":\n src = \"queries_text_file\"\n elif queries_type == \"ind_sentence_option\":\n src = \"ind_sentence\"\n elif queries_type == \"url_option\":\n with open(\"temp/url.txt\") as f:\n src = f.read()\n elif queries_type == \"html_option\":\n src = \"html_files\"\n \n s = Source(source=src, user=request.user)\n s.save()\n for r in results:\n er = ExtractedRelation(sentence=r['sentence'],head=r['head'],tail=r['tail'],pred_relation=r['pred_relation'],sentiment=r['sent'],conf=r['conf'],ckpt=ckpt, source=s)\n er.save()\n except Exception as e:\n print(len(str(e)))\n print(str(e))\n errors.append(str(e))\n tb = traceback.format_exc()\n print(tb)\n finally:\n currently_analyzing = False\n analysis_user = None", "def analyse(self, data=None):\n pass", "def __init__(self, database):\n self.max_axiom_arity = max([p.arity() for p in database.non_entails_axioms.itervalues()]) + 1 # one more than the max\n\n self.database = database\n\n self.max_unconstrained_arity = 10000000\n self.searcher = SearchProblem(database, max_unconstrained_arity = self.max_unconstrained_arity)\n\n self.tautologies = set()\n for p in self.database.propositions.itervalues():\n e_hyps = [h for h in p.hyps if h.type == 'e']\n if p.vclass=='|-' and len(e_hyps) == 0:\n self.tautologies.add(p.label)\n print 'tautologies:', len(self.tautologies)\n \n # the propositions with trivial unconstrained arity. That is, the ones\n # that are really easy to apply.\n self.constrained_propositions = set(\n p.label for p in self.database.propositions.itervalues()\n if p.vclass == '|-' and p.unconstrained_arity() == 0\n )\n \n # figure out the names of the read variables\n # self.real_wff_names = set()\n # self.real_set_names = set()\n # self.real_class_names = set()\n # real_name_dict = {'wff': self.real_wff_names, 'set': self.real_set_names, 'class': self.real_class_names}\n #\n # for p in self.database.propositions.itervalues():\n # for label in p.f:\n # vclass = p.f[label].vclass\n # real_name_dict[vclass].add(label)\n # print real_name_dict\n\n self.constructor_dictionary = [{} for _ in range(self.max_axiom_arity)]\n\n # we need to define some extra variables, which we'll randomly assign when we read in a statement\n # this is a reasonable amount of data augmentation.\n self.extra_wffs = language_model_extra_variables_of_each_type+max(len([f for f in p.f.itervalues() if f.vclass=='wff']) for p in database.propositions.itervalues() )\n self.extra_classes = language_model_extra_variables_of_each_type+max(len([f for f in p.f.itervalues() if f.vclass=='class']) for p in database.propositions.itervalues() )\n self.extra_sets = language_model_extra_variables_of_each_type+max(len([f for f in p.f.itervalues() if f.vclass=='set']) for p in database.propositions.itervalues() )\n\n # hand code these in.\n self.extra_sets = 20\n self.extra_wffs = 18\n self.extra_classes = 27\n\n self.wff_names = ['WFFVar'+str(i) for i in range(self.extra_wffs)]\n self.set_names = ['SetVar'+str(i) for i in range(self.extra_sets)]\n self.class_names = ['ClassVar'+str(i) for i in range(self.extra_classes)]\n\n self.num_extra_variable_names = len(self.wff_names)+len(self.set_names)+len(self.class_names)\n self.extra_variable_dict = {}\n\n # the names for the unconstrained variables\n #self.ua_names = ['UA'+str(i) for i in range(self.max_unconstrained_arity)]\n\n # add them to the dictionary\n arityzerodict = self.constructor_dictionary[0]\n for i in range(self.extra_wffs):\n arityzerodict['WFFVar'+str(i)]=len(arityzerodict)\n self.extra_variable_dict['WFFVar'+str(i)]=len(self.extra_variable_dict)\n for i in range(self.extra_classes):\n arityzerodict['ClassVar'+str(i)]=len(arityzerodict)\n self.extra_variable_dict['ClassVar'+str(i)]=len(self.extra_variable_dict)\n for i in range(self.extra_sets):\n arityzerodict['SetVar'+str(i)]=len(arityzerodict)\n self.extra_variable_dict['SetVar'+str(i)]=len(self.extra_variable_dict)\n # for i in range(len(self.ua_names)):\n # arityzerodict['UA'+str(i)]=len(arityzerodict)\n # self.extra_variable_dict['UA'+str(i)]=len(self.extra_variable_dict)\n\n # a block to create a dictionary that takes a symbol to its vclass\n self.symbol_to_vclass = {label:database.propositions[label].vclass for label in database.non_entails_axioms}\n for symbol in self.wff_names:\n self.symbol_to_vclass[symbol] = 'wff'\n for symbol in self.set_names:\n self.symbol_to_vclass[symbol] = 'set'\n for symbol in self.class_names:\n self.symbol_to_vclass[symbol] = 'class'\n\n # a list of all of the extra variables, for use later\n self.new_names = self.wff_names+self.set_names+self.class_names\n\n # describe the number of variables we've used\n print 'wff variables:',self.extra_wffs\n print 'class variables:',self.extra_classes\n print 'set variables:',self.extra_sets\n #print 'ua variables:', self.ua_names\n\n # now add the actual constructor axioms to our dictionary\n for p in database.non_entails_axioms.itervalues():\n c_dict = self.constructor_dictionary[p.arity()]\n c_dict[p.label] = len(c_dict)\n\n for i in range(self.max_axiom_arity):\n print len(self.constructor_dictionary[i]),'constructor axioms with arity',i\n\n # build a pair of dictionaries that convert (arity,num) to total_num\n # and vice versa. This is ugly. Whatever\n self.arity_num_to_global_index = {}\n self.global_index_to_arity_num=[]\n global_index = 0\n for arity in range(self.max_axiom_arity):\n for num in range(len(self.constructor_dictionary[arity])):\n self.global_index_to_arity_num.append((arity,num))\n self.arity_num_to_global_index[(arity,num)]=global_index\n global_index+=1\n\n \"\"\"sets up the data sets. We divide the propositions into training/validation/test and\n then compile the corresponding list of statements\"\"\"\n list_of_propositions = self.database.propositions_list[:] # database.propositions.values()\n np.random.seed(seed=121451345)\n list_of_propositions = np.random.permutation(list_of_propositions)\n\n num_validation = len(list_of_propositions)/10\n num_test = num_validation\n num_training = len(list_of_propositions)-num_test-num_validation\n self.training_propositions = list_of_propositions[:num_training]\n self.training_propositions = [_ for _ in self.training_propositions if _.type=='p']\n self.validation_propositions = list_of_propositions[num_training:num_training+num_validation]\n self.validation_propositions = [_ for _ in self.validation_propositions if _.type=='p']\n self.test_propositions = list_of_propositions[num_training+num_validation:]\n self.test_propositions = [_ for _ in self.test_propositions if _.type=='p']\n\n if self.database.remember_proof_steps:\n self.all_proof_steps = [] # except those that refer to e or f-type hypotheses\n for p in self.database.propositions.itervalues():\n self.all_proof_steps += [step for step in p.entails_proof_steps if not (step.prop.type=='f' or step.prop.type == 'e')]\n\n\n self.training_proof_steps = []\n for p in self.training_propositions:\n self.training_proof_steps += [step for step in p.entails_proof_steps\n if not (step.prop.type=='f' or step.prop.type == 'e')]\n\n self.validation_proof_steps = []\n for p in self.validation_propositions:\n self.validation_proof_steps += [step for step in p.entails_proof_steps\n if not (step.prop.type=='f' or step.prop.type == 'e')]\n\n self.test_proof_steps = []\n for p in self.test_propositions:\n self.test_proof_steps += [step for step in p.entails_proof_steps\n if not (step.prop.type=='f' or step.prop.type == 'e')]\n\n print\n print 'training steps:', len(self.training_proof_steps)\n print 'validation steps:', len(self.validation_proof_steps)\n print 'test steps:', len(self.test_proof_steps)\n\n\n # figure out how frequenly each proposition is used\n self.prop_usage = [0 for p in self.database.propositions]\n for s in self.all_proof_steps:\n self.prop_usage[s.prop.number]+=1\n\n # figure out what the most difficult proof step is\n self.max_depth = max([s.height for s in self.all_proof_steps]) + 1\n print 'max proof step depth:', self.max_depth-1\n\n\n # figure out the number of times each proposition is used.\n # self.prop_uses = [0.1] * len(self.database.propositions) # for numberical stability\n # for step in self.all_proof_steps:\n # self.prop_uses[step.prop.number] += 1\n # self.initial_b = np.log(1.0*np.array(self.prop_uses)/sum(self.prop_uses))\n\n\n # build up a database of propositions by unconstrained arity\n # that is, total_unconstrained_arity is the total\n # of all of the unconstrained arities of all of the propositions.\n # and unconstrained_arity_indices is a list of p.unconstrained_arity()\n # unique indices for each proposition p.\n self.total_unconstrained_arity = 0\n self.unconstrained_arity_indices = {}\n self.unconstrained_label_to_number = {}\n for p in self.database.propositions_list: # in order of proposition number\n u_arity = p.unconstrained_arity()\n self.unconstrained_arity_indices[p.label]=range(self.total_unconstrained_arity, self.total_unconstrained_arity + u_arity)\n self.total_unconstrained_arity += u_arity\n self.unconstrained_label_to_number[p.label]=len(self.unconstrained_label_to_number)\n #self.max_unconstrained_arity = max([p.unconstrained_arity() for p in self.database.propositions.itervalues()])\n\n self.total_constructor_arity = 0\n self.constructor_arity_indices = {}\n self.constructor_label_to_number = {}\n self.constructor_labels = []\n for p in database.non_entails_axioms.itervalues():\n u_arity = p.arity()\n self.constructor_arity_indices[p.label]=range(self.total_constructor_arity, self.total_constructor_arity + u_arity)\n self.total_constructor_arity += u_arity\n self.constructor_label_to_number[p.label]=len(self.constructor_label_to_number)\n self.constructor_labels.append(p.label)\n for name in self.wff_names+self.set_names+self.class_names: #+self.ua_names:\n self.constructor_arity_indices[name] = [] # the extra arity 0 constructors\n self.constructor_label_to_number[name]=len(self.constructor_label_to_number)\n self.constructor_labels.append(name)\n\n # a lookup table for the index into all the propositions of the label\n self.label_to_number = {x.label:x.number for x in self.database.propositions.itervalues()}\n for x in self.new_names:\n self.label_to_number[x] = -1 # all variables should always be included", "def __init__(self):\n\n # Dictionary of types seen so far. Builtin types always available.\n # Values : list of constructors which the type defines\n # This is a smartdict, so keys can be retrieved.\n self.knownTypes = smartdict.Smartdict()\n for typecon in ast.builtin_types_map.values():\n self.knownTypes[typecon()] = None\n\n # Dictionary of constructors encountered so far.\n # Value: Type which the constructor produces.\n # This is a smartdict, so keys can be retrieved.\n self.knownConstructors = smartdict.Smartdict()", "def _init(self, *args, **kwargs):\n f = Framer()\n election = Election.objects.filter(electionid=self.this_election).first()\n for measure in self.list_of_measures:\n requested_url = \"%s%s\" % (self.api_url, measure)\n response = requests.get(requested_url, headers=self.request_headers)\n measure_data = response.json()[\"measure\"]\n identifying_information = measure_data[\"official_identifier\"].split(\" \")\n measure_data[\"official_identifier\"] = \"Proposition %s\" % (identifying_information[1])\n measure_data[\"official_identifier_slug\"] = f._slug(measure_data[\"official_identifier\"])\n measure_data[\"election_id\"] = election.id\n measure_data = f._massage_measure_title(measure_data)\n saver = Saver()\n saver.make_measure(measure_data)\n saver.make_measure_contributor(measure_data)\n saver.make_measure_total(measure_data)", "def main():\n\t#ps = PackageScanner()\n\t#packages = ps.getInstalledPackages()\n\t#print(packages)\n\t#ps.saveScanResults()\n\n\tan = Analyzer()\n\tan.loadFromFile(config.PKG_SCAN_DIR / config.PKG_SCAN_FILE)\n\t#an.loadFromPackageCont(packages)\n\tan.analyze()\n\tan.saveAnalysisResults()", "def test_init_analysis(network):\n bf_init_analysis(\"test_analysis\", _stable_question_dir)", "def testGetAnalyzerInstances(self):\n analyzer_names = manager.AnalyzersManager.GetAnalyzerNames()\n analyzers = manager.AnalyzersManager.GetAnalyzerInstances(analyzer_names)\n self.assertEqual(len(analyzer_names), len(analyzers))\n for analyzer in analyzers:\n self.assertIsInstance(analyzer, interface.BaseAnalyzer)", "def make_all(self):\n # General matrices #\n self.tsv_seq_to_concepts()\n self.tsv_seq_to_names()\n self.list_sequence_concept()\n # Only in the with 'samples' case #\n if self.a.abundances: self.tsv_samples_to_names()\n if self.a.abundances: self.biom_output()\n # Graphical outputs #\n self.per_seq_dot_files()\n if self.a.abundances: self.per_sample_dot_files()", "def __init__(self):\n self.cause_texts = set()\n self.effect_texts = set()\n self.evidence_texts = set()\n self.cause_polarity = None\n self.effect_polarity = None\n self.cause_type = None\n self.effect_type = None", "def __init__(self, description):\n super(AimaProver, self).__init__()\n description = DistinctAndNotMover.run(description)\n self.knowledgeBase = KnowledgeBase(Sets.newHashSet(description))", "def __init__(self):\n self.name = \"Schaffer\"\n objectives = [o_sh_1, o_sh_2]\n decisions = [Decision(-10 ** 5, 10 ** 5)]\n Model.__init__(self, objectives, None, decisions)", "def newAnalyzer():\n analyzer = {'accidents': None,\n 'dateIndex': None\n }\n\n analyzer['accidents'] = lt.newList('ARRAY_LIST', compareIds)\n analyzer['dateIndex'] = om.newMap(omaptype='BST',\n comparefunction=compareDates)\n return analyzer", "def __init__(self):\n self.name = \"Kursawe\"\n objectives = [o_ku_1, o_ku_2]\n decisions = [Decision(-5, 5), Decision(-5, 5), Decision(-5, 5)]\n Model.__init__(self, objectives, None, decisions)", "def analyze_run():\n file_datas_dict = load_datas(Args.data_files)\n plotables_dict = dict()\n for file_name, datas in file_datas_dict.viewitems():\n analized_datas = analyze_datas(datas,Args.analysis_attributes)\n plotables = ana_results_to_plotables(\n analized_datas,\n Args.analysis_attributes\n )\n if Args.dm_file_out:\n analysis_save_dm(\n analized_datas,\n plotables,\n Args.analysis_attributes,\n Args.dm_file_out\n )\n if Args.mat_file_out:\n analysis_save(\n plotables,\n Args.analysis_attributes,\n Args.mat_file_out\n )\n if Args.verbose:\n plotables_dict[file_name] = plotables\n if Args.verbose:\n ana_plot_figures(plotables_dict,Args.analysis_attributes)", "def __init__(self):\n\n #: Dict[str, Any]: Experiment metadata\n self.__experiment_metadata = None\n\n #: List[CurveData]: Processed experiment data set.\n self.__processed_data_set = list()\n\n #: Backend: backend object used for experimentation\n self.__backend = None\n\n # Add expected options to instance variable so that every method can access to.\n for key in self._default_options().__dict__:\n setattr(self, f\"__{key}\", None)\n\n # Add fixed parameters to instance variable so that every method can access to.\n for key in self.__fixed_parameters__:\n setattr(self, f\"__{key}\", None)", "def __init__(self):\n self.dataset_path = input('Enter the path to the root directory of your dataset:\\n')\n self.classes = [c.lower() for c in os.listdir(self.dataset_path)]\n self.year = str(datetime.datetime.now().year)\n self.kit_path = input(\"Enter the path ot your VOCdevkit directory:\\n\")\n self.annotation_path = self.kit_path + '/VOC' + self.year + '/Annotations'\n self.renamer = data_renamer.DataRenamer(self.dataset_path, self.year)\n self.data_splitter = data_splitter.DataSplitter(self.dataset_path, self.classes, self.year, self.kit_path)\n self.annotation_maker = annotation_maker.AnnotationMaker(self.dataset_path, self.kit_path, self.year,\n self.annotation_path)", "def get_analysis(analysis_info):\n return _get_analysis(_LOCAL_API_ENDPOINT,\n analysis_info[\"ecosystem\"],\n analysis_info[\"package\"],\n analysis_info[\"version\"])", "def __init__(self):\n\n self.current_path = os.getcwd()\n self.data_path = self.current_path + \"/data\"\n\n self.original_files = {}\n self.imitation_files = {}\n self.original_test_files = {}\n self.imitation_test_files = {}\n\n self.training_set = None\n self.original_test_set = None\n self.imitation_test_set = None\n\n self.accuracy = 0.\n self.threshold = 0.\n\n self.get_files()", "def __init__(self):\n\t\tself.parsed = False\n\t\tdir_path = os.path.dirname(os.path.realpath(__file__))\n\t\tself.xsdfilename = os.path.join(dir_path, 'xml', 'schema.xsd')\n\t\tself.schema = 'schema.xsd'\n\t\tself.predictors = []\n\t\tself.predictors_types = []\n\t\tself.preprocessing_methods = []", "def newAnalyzer():\n analyzer = {'events': None,\n \"musical_genre\":None,\n 'artist_ID': None,\n \"track_ID\": None,\n \"instrumentalness\": None,\n \"acousticness\": None,\n \"liveness\": None,\n \"speechiness\": None,\n \"energy\":None,\n \"danceability\": None,\n \"valence\": None\n }\n # Listas\n analyzer['events'] = lt.newList('ARRAY_LIST', compareIds)\n\n # RBT \n analyzer[\"instrumentalness\"] = om.newMap(omaptype='RBT',comparefunction=compareDates)\n analyzer['acousticness'] = om.newMap(omaptype='RBT',comparefunction=compareDates)\n analyzer['liveness'] = om.newMap(omaptype='RBT',comparefunction=compareDates)\n analyzer['speechiness'] = om.newMap(omaptype='RBT',comparefunction=compareDates)\n analyzer['energy'] = om.newMap(omaptype='RBT',comparefunction=compareDates)\n analyzer['danceability'] = om.newMap(omaptype='RBT',comparefunction=compareDates)\n analyzer['valence'] = om.newMap(omaptype='RBT',comparefunction=compareDates)\n analyzer['tempo'] = om.newMap(omaptype='RBT',comparefunction=compareDates)\n analyzer['created_at'] = om.newMap(omaptype='RBT',comparefunction=compareDates)\n analyzer['dates_u'] = om.newMap(omaptype='RBT',comparefunction=compareDates)\n \n\n # Tablas de Hash \n analyzer['artist_ID'] = mp.newMap(10000,maptype='PROBING',loadfactor=0.5, comparefunction=compareByName)\n analyzer['track_ID'] = mp.newMap(10000,maptype='PROBING',loadfactor=0.5, comparefunction=compareByName)\n analyzer['ID'] = mp.newMap(10000,maptype='PROBING',loadfactor=0.5, comparefunction=compareByName)\n analyzer[\"musical_genre\"] = mp.newMap(15,maptype='PROBING',loadfactor=0.5, comparefunction=compareByName)\n analyzer[\"sen\"] = mp.newMap(15,maptype='PROBING',loadfactor=0.5, comparefunction=compareByName)\n analyzer['track_ID_S'] = mp.newMap(10000,maptype='PROBING',loadfactor=0.5, comparefunction=compareByName)\n\n return analyzer", "def single_analysis(config, name):\n # graphviz = GephiOutput()\n graphviz = GraphvizOutput()\n graphviz.output_file = name\n\n print \"Preparing test case...\"\n radio, lines = _prepare_test_case()\n\n print \"Running test case...\"\n with PyCallGraph(output=graphviz, config=config):\n _run_test_case(radio, lines)", "def __init__(self):\n self.site = pywikibot.Site(u'commons', u'commons')\n self.generator = self.getGenerator()", "def _generate_trading_instances(self):\n print(\n \"Initizalization...\"\n )\n\n self.data_handler = self.data_handler_cls(self.events, self.csv_dir, self.symbol_list, self.start_date,\n self.end_date)\n self.portfolio = self.portfolio_cls(self.data_handler, self.events, self.initial_capital, self.start_date,\n self.strategy_id, self.spread, self.commission,self.csv_dir)\n self.strategy = self.strategy_cls(self.data_handler, self.events, self.portfolio, self.spread, self.commission)\n self.plot = self.plot_cls(self.csv_dir, self.portfolio, self.strategy_id)", "def __init__(self):\n self._results = {}\n self._logs = {}", "def get_sentiment_analysis(sender, instance, **kwargs):\n text_analysis = TextAnalysis(instance.text)\n\n # Prevent sentiment_analysis API call every time the document is saved\n if instance.sentiment_analysis is None:\n instance.get_sentiment_analysis()", "async def _analyze(self) -> Report:\n forecasts = []\n daily = []\n hourly = []\n for data in self._series:\n model = await self._build_model(data=data)\n future = await self._forecast_single(model=model)\n h, d = await self._process_trends_single(future=future)\n forecasts.append(Timeseries.from_df(\n name=data.get_name() + '_forecast',\n df=future,\n time_col='ds',\n val_col='yhat',\n ))\n daily.append(d)\n hourly.append(h)\n daily_agg = pd.concat(daily).groupby(level=0).mean()\n hourly_agg = pd.concat(hourly).groupby(level=0).mean()\n return Report(\n forecasts=forecasts,\n daily_trend=Trend(trend_vals=daily_agg.to_dict()),\n hourly_trend=Trend(trend_vals=hourly_agg.to_dict()),\n )", "def __init__(self):\n self.machines = {}\n self.configs = {}\n self.systems = {}\n self.jobs = {}\n self.benchmarks = {}\n self.projects = {}", "def __init__(self, objectives, constraints, decisions):\n self.objectives = objectives\n self.constraints = constraints\n self.decisions = decisions", "def __init__(self, **kwargs):\n super(BaseAG, self).__init__()\n self.domains = None\n self._info = {}\n\n mutation_name = kwargs.get(\"mutation\", self._args.mutation)\n representation_name = kwargs.get(\"representation\",\n self._args.representation)\n crossover_name = kwargs.get(\"crossover\", self._args.crossover)\n selection_name = kwargs.get(\"selection\", self._args.selection)\n\n self._population = kwargs.get(\"population\", self._args.population)\n self._selection_crossover = kwargs.get(\"selection_crossover\",\n self._args.selection_crossover)\n self._selection_mutation = kwargs.get(\"selection_mutation\",\n self._args.selection_mutation)\n self._generations = kwargs.get(\"generations\",\n self._args.generations)\n dimension = kwargs.get(\"dimension\", self._args.dimensions)\n precision = kwargs.get(\"precision\", self._args.precision)\n\n crossovers = cros_factory.crossover_factory()\n mutations = mut_factory.mutation_factory()\n representations = repr_factory.representations_factory()\n selections = selection_factory.selection_factory()\n\n for item in crossovers:\n if crossover_name == item.name():\n self._crossover = item()\n for item in representations:\n if representation_name == item.name():\n # NOTE(mmicu): the dimension is know when we get the function\n # eliminate this requirement\n self._representation = item(dimension,\n precision)\n\n for item in mutations:\n if mutation_name == item.name():\n self._mutation = item()\n\n for item in selections:\n if selection_name == item.name():\n self._selection = item(self._representation)", "def my_main() -> None: # pragma: no cover\n universe = AutomataUniverse(SQUARE_GRID_NEIGHBORS, [2,3], [3])\n instance = AutomataTransforms(universe)\n assert isinstance(instance, AutomataTransforms)\n # # _is_rot_mat_test(instance)\n # # _rotations_check(instance)\n # # _prime_cells_check(instance)\n # _check_transform_test(instance)\n # # _hashable_transform_test(instance)\n # _duplicate_test(instance)\n # _collision_test(instance)\n # _end_cycle_test(instance)\n # _add_transform_test(instance)\n # instance.generate_combination_transforms()\n\n # # _matrix_rotate_test(instance)\n # # _duplicate_test(instance) # test again after transform(s) added\n # # _collision_test(instance) # test again after transform(s) added «also refactoring»\n # instance.dbg_report_instance() # DEBUG", "def async_analysis(kwargs):\n # we can't pickle our objects for remote works so we pickle the raw request\n # and then load it here.\n data = analysis_input_schema.load(kwargs).data\n return analysis(**data)", "def __init__(self, attributes=None):\n super().__init__(attributes)\n \n # processing parameters\n self.set = _Settings()\n\n # results storage\n self.measure_time = None # store here in case we average FIDs, filled by chain!\n self.frequency_shift = None\n self.phase_0 = None\n self.data = None\n \n if attributes is not None:\n self.inflate(attributes)\n\n self.chain = None", "def setup_class(self):\n self.iqcalc = iqcalc_astropy.IQCalc(logger=self.logger)\n self.fwhm_funcs = (self.iqcalc.calc_fwhm_gaussian,\n self.iqcalc.calc_fwhm_moffat,\n self.iqcalc.calc_fwhm_lorentz)\n self.answers = ((2.8551, 2.7732), # Gaussian\n (2.77949, 2.6735), # Moffat\n (1.9570, 1.8113) # Lorentz\n )", "def setUp(self):\n self.m = m = random.randint(1, 100)\n self.n = n = random.randint(1, 100)\n self.sig = sig = Signature(\"name\", Dim(\"m\"), Dim(\"n\"),\n sData(\"A\", \"ldA * n\"), Ld(\"ldA\", \"m\"),\n dData(\"B\", \"ldB * m\"), Ld(\"ldB\", \"m\"),\n cData(\"C\", \"ldC * n\"), Ld(\"ldC\", \"n\"))\n self.ex = ex = Experiment()\n ex.calls = [sig(m, n, \"X\", None, \"Y\", None, \"Z\", None)]\n ex.infer_lds()\n self.i = Symbol(\"i\")\n self.j = Symbol(\"j\")", "def create_analysis():\n \n date_now = datetime.now()\n for analysis in Analysis.objects.filter(activated=True):\n\t\n\tif analysis.last_report == None or analysis.last_report <= date_now - timedelta( seconds=PERIOD_CHOICES[analysis.interval]):\n\t \n\t if analysis.last_report != None and analysis.interval == 'n':\n\t\tcontinue\n\t \n\t results = []\n\t for report in analysis.queries.filter(activated=True):\n\t\t\n\t\tif analysis.date_from != None and analysis.date_to != None:\n\t\t report_results = ReportResult.objects.filter(report=report, run_date__lte=analysis.date_to, run_date__gte=analyses.date_from).order_by('run_date') \n\t\telif analysis.date_from == None and analysis.date_to != None:\n\t\t report_results = ReportResult.objects.filter(report=report, run_date__lte=analysis.date_to).order_by('run_date')\n\t\telif analysis.date_from != None and analysis.date_to == None:\n\t\t report_results = ReportResult.objects.filter(report=report, run_date__gte=analyses.date_from).order_by('run_date')\n\t\telse:\n\t\t report_results = ReportResult.objects.filter(report=report).order_by('run_date')\n\t\t\n\t\t# create output from mongo output\n\t\toutput_result = OutputResult(report=report.title)\n\t\toutput_result.date_array = []\n\t\toutput_result.output_array = []\n\t\tprint \"\\n KOLIK: \"+ str(output_result.output_array)\n\t\tfor result in report_results:\n\t\t output_result.date_array.append(result.run_date)\n\t\t #print result.output\n\t\t #print \"\\nouttest: \"+str(output_result.output_array)\n\t\t mongo_output = OutputMongo(result.output)\n\t\t output_result.output_array.append(mongo_output.getoutput())\n\n\t\tprint \"out: \",output_result.output_array\n\t\tresults.append(output_result) \n\n\n\t #print results[0].output_array\n\t #print \"\\n\\n\"\n\t #print results[1].output_array\n\t # process outputs\n\t if not process_output_reports(results, analysis, date_now):\n\t\tprint \"Error in execute analysis: %s\" % (analysis.title)\n\t\tcontinue\n\t \n\t if analysis.interval != 'n':\n\t\tif analysis.date_to != None:\n\t\t analysis.date_to = analysis.date_to + timedelta( seconds=PERIOD_CHOICES[analysis.interval])\n\t\tif analysis.date_from != None:\n\t\t analysis.date_from = analysis.date_from + timedelta( seconds=PERIOD_CHOICES[analysis.interval])\n\t\t \n return True", "def __init__(self):\n self.RRTFamilySolver = RRTFamilyPathPlanner()\n self.PRMSolver = PRMPathPlanner()", "def __init__(self) -> None:\n self.metrics = {}\n self.current = None\n self.run = None", "def __init__(self):\r\n\t\twith open(\"eqs.json\") as qData:\r\n\t\t\tself.questions = json.load(qData)\r\n\t\twith open(\"eqsave.json\") as uData:\r\n\t\t\tself.records = json.load(uData)\r\n\t\tself.types = {\"1\": \"Reformer\", \"2\": \"Helper\", \"3\": \"Achiever\", \"4\": \"Individualist\", \"5\": \"Investigator\", \"6\": \"Loyalist\", \"7\": \"Enthusiast\", \"8\": \"Challenger\", \"9\": \"Peacemaker\"}", "def __init__(self, num_detectors):\n self.dataset = []\n self.num_detectors = num_detectors\n self.add_detectors()", "def __init__(self):\n\n self.result = None # To store the result\n self.predictor = None # To store the fit predictor", "def init_analysis(session, args):\r\n path = os.path.join(session.abs_path,'{}_{}_{}_{}'.format(args[12],args[13],args[14],args[15]))\r\n session.case = Case(path, session.method)\r\n case = session.case\r\n if args[0].split('#')[0]=='R':\r\n args[7]= -args[7]\r\n\r\n case.file_U.set_field('internalField', 'uniform ({} {} 0)'.format(args[6], args[7]))\r\n\r\n case.file_U.set_field('boundaryField',\r\n {'inlet': {'type': 'freestream',\r\n 'freestreamValue': 'uniform ({} {} 0)'.format(args[6], args[7])},\r\n 'outlet': {'type': 'zeroGradient'},\r\n 'intrados': {'type': 'fixedValue', 'value':'uniform (0 0 0)'},\r\n 'extrados': {'type': 'fixedValue', 'value':'uniform (0 0 0)'},\r\n 'top_down': {'type': 'empty'},\r\n 'cyclic_in_1': {'type': 'cyclic'},\r\n 'cyclic_in_2': {'type': 'cyclic'},\r\n 'cyclic_out_1': {'type': 'cyclic'},\r\n 'cyclic_out_2': {'type': 'cyclic'}})\r\n\r\n case.file_p.set_field('internalField', 'uniform {}'.format(args[10]))\r\n case.file_p.set_field('boundaryField',\r\n {'inlet': {'type': 'freestreamPressure'},\r\n 'outlet': {'type': 'zeroGradient'},\r\n 'intrados': {'type': 'zeroGradient'},\r\n 'extrados': {'type': 'zeroGradient'},\r\n 'top_down': {'type': 'empty'},\r\n 'cyclic_in_1': {'type': 'cyclic'},\r\n 'cyclic_in_2': {'type': 'cyclic'},\r\n 'cyclic_out_1': {'type': 'cyclic'},\r\n 'cyclic_out_2': {'type': 'cyclic'}})\r\n\r\n session.case.file_T.set_field('internalField', 'uniform {}'.format(args[11]))\r\n session.case.file_T.set_field('boundaryField',\r\n {'inlet': {'type': 'fixedValue', 'value': 'uniform {}'.format(args[11])},\r\n 'outlet': {'type': 'zeroGradient'},\r\n 'intrados': {'type': 'slip'},\r\n 'extrados': {'type': 'slip'},\r\n 'top_down': {'type': 'empty'},\r\n 'cyclic_in_1': {'type': 'cyclic'},\r\n 'cyclic_in_2': {'type': 'cyclic'},\r\n 'cyclic_out_1': {'type': 'cyclic'},\r\n 'cyclic_out_2': {'type': 'cyclic'}})\r\n \"\"\"\r\n\r\n session.case.file_U.set_field('internalField', 'uniform ({} {} 0)'.format(args[6], args[7]))\r\n session.case.file_U.set_field('boundaryField',\r\n {'inlet': {'type': 'fixedValue',\r\n 'value': 'uniform ({} {} 0)'.format(args[6], args[7])},\r\n 'outlet': {'type': 'inletOutlet','inletValue':'uniform ({} {} 0)'.format(args[6], args[7]),\r\n 'value':'uniform ({} {} 0)'.format(args[6], args[7])},\r\n 'intrados': {'type': 'noSlip'},\r\n 'extrados': {'type': 'noSlip'},\r\n 'top_down': {'type': 'empty'},\r\n 'cyclic_in_1': {'type': 'cyclic'},\r\n 'cyclic_in_2': {'type': 'cyclic'},\r\n 'cyclic_out_1': {'type': 'cyclic'},\r\n 'cyclic_out_2': {'type': 'cyclic'}})\r\n\r\n session.case.file_p.set_field('internalField', 'uniform {}'.format(args[10]))\r\n session.case.file_p.set_field('boundaryField',\r\n {'inlet': {'type': 'fixedValue', 'value': 'uniform {}'.format(args[10])},\r\n 'outlet': {'type': 'zeroGradient'},\r\n 'intrados': {'type': 'zeroGradient'},\r\n 'extrados': {'type': 'zeroGradient'},\r\n 'top_down': {'type': 'empty'},\r\n 'cyclic_in_1': {'type': 'cyclic'},\r\n 'cyclic_in_2': {'type': 'cyclic'},\r\n 'cyclic_out_1': {'type': 'cyclic'},\r\n 'cyclic_out_2': {'type': 'cyclic'}})\r\n\r\n session.case.file_T.set_field('internalField', 'uniform {}'.format(args[11]))\r\n session.case.file_T.set_field('boundaryField',\r\n {'inlet': {'type': 'fixedValue','value':'uniform {}'.format(args[11])},\r\n 'outlet': {'type': 'inletOutlet','inletValue':'uniform {}'.format(args[11]),'value':'uniform {}'.format(args[11])},\r\n 'intrados': {'type': 'zeroGradient'},\r\n 'extrados': {'type': 'zeroGradient'},\r\n 'top_down': {'type': 'empty'},\r\n 'cyclic_in_1': {'type': 'cyclic'},\r\n 'cyclic_in_2': {'type': 'cyclic'},\r\n 'cyclic_out_1': {'type': 'cyclic'},\r\n 'cyclic_out_2': {'type': 'cyclic'}})\r\n \"\"\"\r\n \"\"\"\r\n session.case.file_U.set_field('internalField', 'uniform ({} {} 0)'.format(args[7], args[6]))\r\n session.case.file_U.set_field('boundaryField',\r\n {'inlet': {'type': 'fixedValue',\r\n 'value': 'uniform ({} {} 0)'.format(args[7], args[6])},\r\n 'outlet': {'type': 'zeroGradient'},\r\n 'intrados': {'type': 'noSlip'},\r\n 'extrados': {'type': 'noSlip'},\r\n 'top_down': {'type': 'empty'},\r\n 'cyclic_in_1': {'type': 'cyclic'},\r\n 'cyclic_in_2': {'type': 'cyclic'},\r\n 'cyclic_out_1': {'type': 'cyclic'},\r\n 'cyclic_out_2': {'type': 'cyclic'}})\r\n\r\n session.case.file_p.set_field('internalField', 'uniform {}'.format(args[10]))\r\n session.case.file_p.set_field('boundaryField',\r\n {'inlet': {'type': 'fixedValue', 'value': 'uniform {}'.format(args[10])},\r\n 'outlet': {'type': 'zeroGradient'},\r\n 'intrados': {'type': 'zeroGradient'},\r\n 'extrados': {'type': 'zeroGradient'},\r\n 'top_down': {'type': 'empty'},\r\n 'cyclic_in_1': {'type': 'cyclic'},\r\n 'cyclic_in_2': {'type': 'cyclic'},\r\n 'cyclic_out_1': {'type': 'cyclic'},\r\n 'cyclic_out_2': {'type': 'cyclic'}})\r\n\r\n session.case.file_T.set_field('internalField', 'uniform {}'.format(args[11]))\r\n session.case.file_T.set_field('boundaryField',\r\n {'inlet': {'type': 'zeroGradient'},\r\n 'outlet': {'type': 'zeroGradient'},\r\n 'intrados': {'type': 'zeroGradient'},\r\n 'extrados': {'type': 'zeroGradient'},\r\n 'top_down': {'type': 'empty'},\r\n 'cyclic_in_1': {'type': 'cyclic'},\r\n 'cyclic_in_2': {'type': 'cyclic'},\r\n 'cyclic_out_1': {'type': 'cyclic'},\r\n 'cyclic_out_2': {'type': 'cyclic'}})\r\n \"\"\"\r\n \"\"\"\r\n session.case.file_U.set_field('internalField', 'uniform ({} {} 0)'.format(args[5], args[6]))\r\n session.case.file_U.set_field('boundaryField',\r\n {'inlet': {'type': 'fixedValue', 'value': 'uniform ({} {} 0)'.format(args[5], args[6])},\r\n 'outlet': {'type': 'zeroGradient'},\r\n 'intrados': {'type': 'noSlip'},\r\n 'extrados': {'type': 'noSlip'},\r\n 'top_down': {'type': 'empty'},\r\n 'cyclic_in_1': {'type': 'cyclic'},\r\n 'cyclic_in_2': {'type': 'cyclic'},\r\n 'cyclic_out_1': {'type': 'cyclic'},\r\n 'cyclic_out_2': {'type': 'cyclic'}})\r\n\r\n session.case.file_p.set_field('boundaryField', {'inlet': {'type': 'fixedValue','value':'uniform {}'.format(args[9])},\r\n 'outlet': {'type': 'zeroGradient'},\r\n 'intrados': {'type': 'zeroGradient'},\r\n 'extrados': {'type': 'zeroGradient'},\r\n 'top_down': {'type': 'empty'},\r\n 'cyclic_in_1': {'type': 'cyclic'},\r\n 'cyclic_in_2': {'type': 'cyclic'},\r\n 'cyclic_out_1': {'type': 'cyclic'},\r\n 'cyclic_out_2': {'type': 'cyclic'}})\r\n session.case.file_p.set_field('internalField', 'uniform {}'.format(args[9]))\r\n\r\n session.case.file_T.set_field('boundaryField', {'inlet': {'type': 'fixedValue', 'value': 'uniform {}'.format(args[10])},\r\n 'outlet': {'type': 'zeroGradient'},\r\n 'intrados': {'type': 'zeroGradient'},\r\n 'extrados': {'type': 'zeroGradient'},\r\n 'top_down': {'type': 'empty'},\r\n 'cyclic_in_1': {'type': 'cyclic'},\r\n 'cyclic_in_2': {'type': 'cyclic'},\r\n 'cyclic_out_1': {'type': 'cyclic'},\r\n 'cyclic_out_2': {'type': 'cyclic'}})\r\n session.case.file_T.set_field('internalField','uniform 300')\r\n\r\n session.case.file_nut.set_field('boundaryField', {'inlet':{'type':'calculated', 'value':'uniform 0'},\r\n 'outlet':{'type':'calculated', 'value':'uniform 0'},\r\n 'intrados': {'type': 'nutkWallFunction', 'Cmu':'0.09', 'kappa':'0.41', 'E':'9.8', 'value':'uniform 0'},\r\n 'extrados': {'type': 'nutkWallFunction', 'Cmu':'0.09', 'kappa':'0.41', 'E':'9.8', 'value':'uniform 0'},\r\n 'top_down': {'type': 'empty'},\r\n 'cyclic_in_1': {'type': 'cyclic'},\r\n 'cyclic_in_2': {'type': 'cyclic'},\r\n 'cyclic_out_1': {'type': 'cyclic'},\r\n 'cyclic_out_2': {'type': 'cyclic'}})\r\n\r\n session.case.file_k.set_field('internalField', 'uniform 1')\r\n session.case.file_k.set_field('boundaryField', {\r\n 'inlet': {'type': 'turbulentIntensityKineticEnergyInlet', 'intensity': '0.05', 'value': 'uniform 1'},\r\n 'outlet': {'type': 'inletOutlet', 'inletValue': 'uniform 1', 'value': 'uniform 1'},\r\n 'intrados': {'type': 'kqRWallFunction','value':'uniform 1'},\r\n 'extrados': {'type': 'kqRWallFunction','value':'uniform 1'},\r\n 'top_down': {'type': 'empty'},\r\n 'cyclic_in_1': {'type': 'cyclic'},\r\n 'cyclic_in_2': {'type': 'cyclic'},\r\n 'cyclic_out_1': {'type': 'cyclic'},\r\n 'cyclic_out_2': {'type': 'cyclic'}})\r\n\r\n session.case.file_epsilon.set_field('boundaryField', {'inlet': {'type': 'turbulentMixingLengthDissipationRateInlet', 'mixingLength': '0.005', 'value': 'uniform 200'},\r\n 'outlet': {'type': 'inletOutlet', 'inletValue': 'uniform 200', 'value': 'uniform 200'},\r\n 'intrados': {'type': 'epsilonWallFunction', 'Cmu':'0.09', 'kappa':'0.41', 'E':'9.8', 'value':'uniform 200'},\r\n 'extrados': {'type': 'epsilonWallFunction', 'Cmu':'0.09', 'kappa':'0.41', 'E':'9.8', 'value':'uniform 200'},\r\n 'top_down': {'type': 'empty'},\r\n 'cyclic_in_1': {'type': 'cyclic'},\r\n 'cyclic_in_2': {'type': 'cyclic'},\r\n 'cyclic_out_1': {'type': 'cyclic'},\r\n 'cyclic_out_2': {'type': 'cyclic'}})\r\n session.case.file_epsilon.set_field('internalField', 'uniform 200')\r\n\r\n session.case.file_alphat.set_field('boundaryField', {'inlet':{'type':'calculated', 'value':'uniform 0'},\r\n 'outlet':{'type':'calculated', 'value':'uniform 0'},\r\n 'intrados': {'type': 'compressible::alphatWallFunction', 'Prt':'0.85', 'value':'uniform 0'},\r\n 'extrados': {'type': 'compressible::alphatWallFunction', 'Prt':'0.85', 'value':'uniform 0'},\r\n 'top_down': {'type': 'empty'},\r\n 'cyclic_in_1': {'type': 'cyclic'},\r\n 'cyclic_in_2': {'type': 'cyclic'},\r\n 'cyclic_out_1': {'type': 'cyclic'},\r\n 'cyclic_out_2': {'type': 'cyclic'}})\r\n \"\"\"\r\n session.case.file_controlDict.set_field('endTime', '10000')\r\n session.case.file_controlDict.set_field('startFrom', 'latestTime')\r\n session.case.file_controlDict.set_field('functions', {\"#includeFunc\":\"MachNo\"})\r\n session.case.file_turbulenceProperties.set_field('simulationType', 'laminar')\r\n session.case.interacting(100)\r\n sim = session.case.simulation(\"open40\") # Build files\r\n sim.limit_write = 50\r\n sim.block_mesh(string=write_block_mesh(args[1], args[2], args[3], args[4], args[5], session.mesh))\r\n sim.check_mesh()\r\n\r\n result_dict={\"T\": 0, \"p\":0, \"Theta\":0, \"z\":0, \"profile\":args[14]}\r\n\r\n def _function(container, args):\r\n current_time = container['current_time']\r\n if float(current_time)>=0.000015:\r\n print('Parsing results')\r\n sim.foamToVTK()\r\n results = sim.get_last_results('outlet')\r\n result_U = results.GetCellData('U')\r\n result_p = results.GetCellData('p')\r\n result_T = results.GetCellData('T')\r\n theta = 0.0\r\n z = 0.0\r\n p=0.0\r\n t=0.0\r\n U_length = len(result_U)\r\n p_length = len(result_p)\r\n t_length = len(result_T)\r\n for i,j,k in zip(result_p, result_T, result_U):\r\n p+= float(i[0])/p_length\r\n t+= float(j[0])/t_length\r\n theta += float(k[1])/U_length\r\n z += float(k[0])/U_length\r\n\r\n args[\"T\"] = t\r\n args[\"p\"] = p\r\n args[\"Theta\"] = theta\r\n args[\"z\"] = z\r\n return True\r\n return False\r\n \r\n #sim.run(_function, result_dict)\r\n #result_dict = {'T': 195.38959999999997, 'z': 429.3120571428572, 'p': 74001.90285714286, 'Theta': -207.19442857142855, 'profile': 0}\r\n\r\n print('Sending results')\r\n if args[0].split('#')[0]=='R':\r\n result_dict['Theta']= -result_dict['Theta']\r\n\r\n session.socket_design.send({'new_data':result_dict})", "def __init__(self, logging=True):\n self.matrix_creator = MatrixCreator()\n self.matrix_computer = MatrixComputer()\n self.equation_parser = EquationParser()\n self.balancing_validator = BalancingValidator(logging=logging)\n self.logger = Logger(active=logging)", "def __init__(self):\n self._names = []\n self._forwardFactories = []\n self._inputs = []\n self._inputFilters = {}\n self._outputFilters = []\n self._inputCheckers = []\n pass", "def __init__(self, model = None, cso = None, fast_classification = True, paper = None):\n self.cso = cso #Stores the CSO Ontology\n self.paper = paper #Paper to analyse\n self.model = model #contains the cached model\n self.min_similarity = 0.90 #Initialises the min_similarity\n self.fast_classification = fast_classification # if will use the full model or not\n self.explanation = dict()", "def __init__(self, language, dataset_name):\n self._language = language\n self._dataset_name = dataset_name\n\n # TODO: Maybe the paths should be passed as parameters or read from a configuration file.\n self._trainset_path = \"data/raw/{}/{}_Train.tsv\".format(language.lower(), dataset_name)\n self._devset_path = \"data/raw/{}/{}_Dev.tsv\".format(language.lower(), dataset_name)\n self._testset_path = \"data/raw/{}/{}_Test.tsv\".format(language.lower(), dataset_name)\n\n self._trainset = None\n self._devset = None\n self._testset = None\n\n \"\"\"spaCy object handling\"\"\"\n if self._language == \"english\":\n self.nlp = spacy.load('en_core_web_lg')\n elif self._language == \"spanish\":\n self.nlp = spacy.load(\"es_core_news_md\")\n elif self._language == \"german\":\n self.nlp = spacy.load('de_core_news_sm')\n elif self._language == \"french\":\n self.nlp = spacy.load('fr_core_news_md')\n\n self._trainset_spacy_path = \"data/interim/{}/{}_Train-spacy-objs.pkl\".format(\n language.lower(), dataset_name)\n self._devset_spacy_path = \"data/interim/{}/{}_Dev-spacy-objs.pkl\".format(\n language.lower(), dataset_name)\n self._testset_spacy_path = \"data/interim/{}/{}_Test-spacy-objs.pkl\".format(\n language.lower(), dataset_name)", "def __init__(self):\n super(Modules, self).__init__()\n \n global superclasses\n superclasses['universe'] = []\n superclasses['actions'] = ['universe']\n superclasses['booleans'] = ['universe']\n\n global instances\n instances['universe'] = set()\n instances['actions'] = set()\n instances['booleans'] = set()", "def __init__(self):\n \n #\n # imports\n #\n import time\n from socket import gethostname\n import commands\n \n #\n # Declare variables and set standard default values\n #\n self.database = None\n self.analysisPath = None\n self.command = None\n self.commandLine = None\n self.commandLineList = None\n SEAseqPipeLine.settings = Settings()\n SEAseqPipeLine.logfile = None\n SEAseqPipeLine.startTime = time.time()\n SEAseqPipeLine.startTimeStr = time.strftime(\"%A, %d %b %Y %H:%M:%S\",time.localtime())\n self.availableCommands = {\n 'initiateAnalysis':self.initiateAnalysis,\n 'addData':self.addData,\n 'changeSettings':self.changeSettings,\n 'startAnalysis':self.startAnalysis,\n 'commandLog':self.commandLog,\n 'help':self.printHelp,\n }\n if gethostname().split('.')[1] == 'uppmax': self.onUppmax = True\n else: self.onUppmax = False\n tempFolderName = 'SEAseq2temporaryFiles'\n if self.onUppmax: self.tempFileFolder = os.path.abspath(commands.getoutput('echo $SNIC_TMP'))+'/'+tempFolderName\n else: self.tempFileFolder = self.analysisPath+'/'+tempFolderName\n if not os.path.isdir(self.tempFileFolder): os.makedirs(self.tempFileFolder)\n \n #\n # Get information from commandline\n #\n self.getComandAndPath()\n self.doCurrentTask()", "def analyses(self, analysis_id=None, count=None, offset=None, format_group_name=None):\n if analysis_id:\n logger.debug(\"Get analysis\")\n return self._raw_api.analyses.get(analysis_id)\n\n logger.debug(\"Get analysis list\")\n data = filter_data(\n count=count,\n offset=offset,\n format_group_name=format_group_name\n )\n return self._raw_api.analyses.get(json=data)", "def __init__(self):\n this = _libsbml.new_RDFAnnotationParser()\n try: self.this.append(this)\n except: self.this = this", "def test_creation(self):\n config = config_factory()\n db = config.arango_db\n\n # Create analyzer\n analyzer = ArangoAnalyzer(\"analyzer_sample\")\n analyzer.set_stopwords(\n language=\"english\",\n custom_stopwords=[\"stop\", \"word\"],\n include_default=False,\n )\n analyzer.type = ArangoAnalyzer._TYPE_TEXT\n\n analyzer.create(db)\n assert analyzer.__dict__ == {\n \"name\": \"analyzer_sample\",\n \"type\": \"text\",\n \"features\": [\"frequency\", \"norm\", \"position\"],\n \"locale\": \"en\",\n \"case\": \"lower\",\n \"stopwords\": [\"stop\", \"word\"],\n \"accent\": False,\n \"stemming\": True,\n \"edge_ngram\": None,\n \"delimiter\": \",\",\n \"min\": 2,\n \"max\": 5,\n \"preserve_original\": False,\n \"start_marker\": \"\",\n \"end_marker\": \"\",\n \"stem_type\": \"binary\",\n }\n assert isinstance(db.analyzer(\"analyzer_sample\"), dict)", "def __init__(self, datapath, fileType='TargetLynx', sop='Generic', **kwargs):\n\n super().__init__(sop=sop, **kwargs)\n self.filePath, fileName = os.path.split(datapath)\n self.fileName, fileExtension = os.path.splitext(fileName)\n\n self.name = self.fileName\n\n # Load files and match data, calibration report and SOP, then Apply the limits of quantification\n if fileType == 'TargetLynx':\n # Read files, filter calibration samples, filter IS, applyLLOQ, clean object\n self._loadTargetLynxDataset(datapath, **kwargs)\n # Finalise object\n self.VariableType = VariableType.Discrete\n self.AnalyticalPlatform = AnalyticalPlatform.MS\n self.initialiseMasks()\n elif fileType == 'Bruker Quantification':\n # Read files, clean object\n self._loadBrukerXMLDataset(datapath, **kwargs)\n # Finalise object\n self.VariableType = VariableType.Discrete\n self.AnalyticalPlatform = AnalyticalPlatform.NMR\n self.initialiseMasks()\n elif fileType == 'empty':\n # Build empty object for testing\n pass\n else:\n raise NotImplementedError\n\n # Check the final object is valid and log\n if fileType != 'empty':\n validDataset = self.validateObject(verbose=False, raiseError=False, raiseWarning=False)\n if not validDataset['BasicTargetedDataset']:\n raise ValueError('Import Error: The imported dataset does not satisfy to the Basic TargetedDataset definition')\n self.Attributes['Log'].append([datetime.now(),\n '%s instance initiated, with %d samples, %d features, from %s'\n % (self.__class__.__name__, self.noSamples, self.noFeatures, datapath)])\n # Check later\n if 'Metadata Available' not in self.sampleMetadata:\n self.sampleMetadata['Metadata Available'] = False", "def __init__(self):\n self.parser_model_dir = None\n self.parser_options = {}\n self.reranker_model = None\n self.unified_model_dir = None" ]
[ "0.6476997", "0.64702034", "0.63428855", "0.62444305", "0.623065", "0.5992507", "0.59902406", "0.59565115", "0.5947113", "0.5947113", "0.59458697", "0.5939853", "0.59148765", "0.58706826", "0.5858152", "0.58355874", "0.58180684", "0.5758296", "0.5750707", "0.57501674", "0.57413834", "0.5738538", "0.5715878", "0.56486434", "0.5614197", "0.5611831", "0.5589196", "0.55858403", "0.55789053", "0.5572369", "0.55642426", "0.5562384", "0.5529692", "0.55241185", "0.5520147", "0.5517567", "0.55152875", "0.5515282", "0.5508454", "0.5488664", "0.5477658", "0.5477041", "0.5473829", "0.5470642", "0.5453264", "0.5451085", "0.5435981", "0.5417105", "0.54141426", "0.5412175", "0.54075485", "0.5393213", "0.53888947", "0.53853875", "0.5383172", "0.53604674", "0.53519857", "0.5351758", "0.5338383", "0.5327076", "0.5317966", "0.5309994", "0.53022146", "0.52984214", "0.5295567", "0.52852947", "0.52825624", "0.5281911", "0.5277759", "0.5266383", "0.5265841", "0.52640754", "0.5257535", "0.5256249", "0.52534366", "0.5253423", "0.52512443", "0.5248757", "0.5248619", "0.5241286", "0.52372897", "0.52367264", "0.5232069", "0.5229489", "0.52280605", "0.52177083", "0.5217616", "0.52146685", "0.5211114", "0.52062017", "0.5203665", "0.5203595", "0.51946443", "0.51946187", "0.5191996", "0.5185701", "0.51832896", "0.51790726", "0.51783323", "0.51773155" ]
0.60086685
5
Generates the list of required analyzers.
def get_analysis_list(self): analysys_list = [] analysis_types = AnalysisPopulator.get_query_and_evaluation_analysis_types(self.parameters) for analysis_type in analysis_types: if analysis_type in self.all_possible_analysis: analysys_list.append(self.all_possible_analysis[analysis_type]) else: print "[WARNING]", analysis_type, "is not an allowed analysis type" return analysys_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_analyzers(args: argparse.Namespace):\n first = True\n queue = [tuple(c) + (\"lookout.\",) for c in pkgutil.iter_modules(lookout.__path__)]\n while queue:\n importer, name, ispkg, prefix = queue.pop(0)\n\n if not ispkg or name == \"core\":\n continue\n\n m = importer.find_module(name).load_module(name)\n if getattr(m, \"__meta__\", False):\n queue.extend(tuple(c) + (prefix + name + \".\",)\n for c in pkgutil.iter_modules(m.__path__))\n continue\n\n try:\n cls = m.analyzer_class\n except AttributeError:\n continue\n if first:\n first = False\n else:\n print()\n print(prefix + name)\n print(\"\\t%s\" % cls.version)\n print(\"\\t\" + cls.description)", "def get_analyzers(cls):\n for analyzer_name, analyzer_class in iter(cls._class_registry.items()):\n yield analyzer_name, analyzer_class", "def main(args):\n\n # If the given output format is not 'table', redirect logger's output to\n # the stderr.\n logger.setup_logger(args.verbose if 'verbose' in args else None,\n None if args.output_format == 'table' else 'stderr')\n\n context = analyzer_context.get_context()\n working_analyzers, errored = analyzer_types.check_supported_analyzers(\n args.analyzers,\n context)\n analyzer_types.check_available_analyzers(working_analyzers, errored)\n\n analyzer_environment = env.extend(context.path_env_extra,\n context.ld_lib_path_extra)\n\n analyzer_config_map = analyzer_types.build_config_handlers(\n args, context, working_analyzers)\n\n def uglify(text):\n \"\"\"\n csv and json format output contain this non human readable header\n string: no CamelCase and no space.\n \"\"\"\n return text.lower().replace(' ', '_')\n\n def match_guideline(checker_name, selected_guidelines):\n \"\"\"\n Returns True if checker_name gives reports related to any of the\n selected guideline rule.\n checker_name -- A full checker name.\n selected_guidelines -- A list of guideline names or guideline rule IDs.\n \"\"\"\n guideline = context.guideline_map.get(checker_name, {})\n guideline_set = set(guideline)\n for value in guideline.values():\n guideline_set |= set(value)\n\n return any(g in guideline_set for g in selected_guidelines)\n\n def format_guideline(guideline):\n \"\"\"\n Convert guideline rules to human-readable format.\n guideline -- Dictionary in the following format:\n {\"guideline_1\": [\"rule_1\", \"rule_2\"]}\n \"\"\"\n return ' '.join('Related {} rules: {}'.format(g, ', '.join(r))\n for g, r in guideline.items())\n\n # List available checker profiles.\n if 'profile' in args and args.profile == 'list':\n if 'details' in args:\n header = ['Profile name', 'Description']\n rows = context.profile_map.available_profiles().items()\n else:\n header = ['Profile name']\n rows = [(key, \"\") for key in\n context.profile_map.available_profiles()]\n\n if args.output_format in ['csv', 'json']:\n header = list(map(uglify, header))\n\n print(twodim.to_str(args.output_format, header, rows))\n return\n\n # List checker config options.\n if 'checker_config' in args:\n if 'details' in args:\n header = ['Option', 'Description']\n else:\n header = ['Option']\n\n if args.output_format in ['csv', 'json']:\n header = list(map(uglify, header))\n\n rows = []\n analyzer_failures = []\n for analyzer in working_analyzers:\n config_handler = analyzer_config_map.get(analyzer)\n analyzer_class = analyzer_types.supported_analyzers[analyzer]\n\n configs = analyzer_class.get_checker_config(config_handler,\n analyzer_environment)\n if not configs:\n analyzer_failures.append(analyzer)\n continue\n\n rows.extend((':'.join((analyzer, c[0])), c[1]) if 'details' in args\n else (':'.join((analyzer, c[0])),) for c in configs)\n\n if rows:\n print(twodim.to_str(args.output_format, header, rows))\n\n if analyzer_failures:\n LOG.error(\"Failed to get checker configuration options for '%s' \"\n \"analyzer(s)! Please try to upgrade your analyzer \"\n \"version to use this feature.\",\n ', '.join(analyzer_failures))\n sys.exit(1)\n\n return\n\n if args.guideline is not None and len(args.guideline) == 0:\n result = defaultdict(set)\n\n for _, guidelines in context.guideline_map.items():\n for guideline, rules in guidelines.items():\n result[guideline] |= set(rules)\n\n header = ['Guideline', 'Rules']\n if args.output_format in ['csv', 'json']:\n header = list(map(uglify, header))\n\n if args.output_format == 'json':\n rows = [(g, sorted(list(r))) for g, r in result.items()]\n else:\n rows = [(g, ', '.join(sorted(r))) for g, r in result.items()]\n\n if args.output_format == 'rows':\n for row in rows:\n print('Guideline: {}'.format(row[0]))\n print('Rules: {}'.format(row[1]))\n else:\n print(twodim.to_str(args.output_format, header, rows))\n return\n\n # List available checkers.\n if 'details' in args:\n header = ['Enabled', 'Name', 'Analyzer', 'Severity', 'Guideline',\n 'Description']\n else:\n header = ['Name']\n\n if args.output_format in ['csv', 'json']:\n header = list(map(uglify, header))\n\n rows = []\n for analyzer in working_analyzers:\n config_handler = analyzer_config_map.get(analyzer)\n analyzer_class = analyzer_types.supported_analyzers[analyzer]\n\n checkers = analyzer_class.get_analyzer_checkers(config_handler,\n analyzer_environment)\n\n profile_checkers = []\n if 'profile' in args:\n if args.profile not in context.profile_map.available_profiles():\n LOG.error(\"Checker profile '%s' does not exist!\",\n args.profile)\n LOG.error(\"To list available profiles, use '--profile list'.\")\n sys.exit(1)\n\n profile_checkers = [('profile:' + args.profile, True)]\n\n config_handler.initialize_checkers(context,\n checkers,\n profile_checkers)\n\n for checker_name, value in config_handler.checks().items():\n state, description = value\n\n if state != CheckerState.enabled and 'profile' in args:\n continue\n\n if state == CheckerState.enabled and 'only_disabled' in args:\n continue\n elif state != CheckerState.enabled and 'only_enabled' in args:\n continue\n\n if args.output_format == 'json':\n state = state == CheckerState.enabled\n else:\n state = '+' if state == CheckerState.enabled else '-'\n\n if args.guideline is not None:\n if not match_guideline(checker_name, args.guideline):\n continue\n\n if 'details' in args:\n severity = context.severity_map.get(checker_name)\n guideline = context.guideline_map.get(checker_name, {})\n if args.output_format != 'json':\n guideline = format_guideline(guideline)\n rows.append([state, checker_name, analyzer,\n severity, guideline, description])\n else:\n rows.append([checker_name])\n\n if 'show_warnings' in args:\n severity = context.severity_map.get('clang-diagnostic-')\n for warning in get_warnings(analyzer_environment):\n warning = 'clang-diagnostic-' + warning\n\n if args.guideline is not None:\n if not match_guideline(warning, args.guideline):\n continue\n\n guideline = context.guideline_map.get(warning, {})\n if args.output_format != 'json':\n guideline = format_guideline(guideline)\n\n if 'details' in args:\n rows.append(['', warning, '-', severity, guideline, '-'])\n else:\n rows.append([warning])\n\n if rows:\n print(twodim.to_str(args.output_format, header, rows))\n\n analyzer_types.print_unsupported_analyzers(errored)", "def __init__(self, analyzers: Iterable[Type[Analyzer]], model_repository: ModelRepository,\n data_service: DataService):\n self._model_repository = model_repository\n analyzers = [(a.__name__, a) for a in analyzers]\n analyzers.sort()\n self._analyzers = [a[1] for a in analyzers]\n self._data_service = data_service", "def test_get_analyzers(self):\n analyzers = manager.AnalysisManager.get_analyzers()\n analyzer_list = [x for x in analyzers]\n first_analyzer_tuple = analyzer_list[0]\n analyzer_name, analyzer_class = first_analyzer_tuple\n self.assertIsInstance(analyzer_list, list)\n self.assertIsInstance(first_analyzer_tuple, tuple)\n self.assertEqual(analyzer_class, MockAnalyzer)\n self.assertEqual(analyzer_name, 'mockanalyzer')", "def initAnalyzer():\n return controller.initAnalyzer()", "def check_for_analyzers(self):\n executed = self.analyzer_state.executed_analyzers()\n for analyzer in executed:\n # check for wish\n if self.analyzer_state.check_wish(analyzer, 'cancel'):\n print(\"validator: cancelled {} upon request\".format(analyzer['_id']))\n continue\n\n print(\"validating and committing {}\".format(analyzer['_id']))\n\n self.analyzer_state.transition(analyzer['_id'], 'executed', 'validating')\n\n exe_res = analyzer['execution_result']\n temporary_coll = self.cc.temporary_db[exe_res['temporary_coll']]\n\n if exe_res['timespans'] is not None and exe_res['upload_ids'] is not None:\n self.analyzer_state.transition_to_error(analyzer['_id'],\n 'internal error: either timespans or upload_ids can have a '\n 'value but not both. I cannot decide if direct or normal analyzer')\n continue\n\n if exe_res['timespans'] is None and exe_res['upload_ids'] is None:\n self.analyzer_state.transition_to_error(analyzer['_id'],\n 'internal error: it\\'s not allowed to have both timespans and upload_ids to be None. '\n 'I cannot decide if direct or normal analyzer')\n continue\n\n if exe_res['upload_ids'] is not None:\n print(\"using direct commit\")\n valid_count, errors, action_id = commit_direct(\n analyzer['_id'], analyzer['working_dir'], self._action_id_creator,\n exe_res['upload_ids'], exe_res['max_action_id'], temporary_coll,\n self.cc.observations_coll, analyzer['output_types'],\n self.cc.action_log)\n else:\n print(\"using normal commit\")\n valid_count, errors, action_id = commit_normal(\n analyzer['_id'], analyzer['working_dir'], self._action_id_creator,\n exe_res['timespans'], exe_res['max_action_id'], temporary_coll,\n self.cc.observations_coll, analyzer['output_types'],\n self.cc.action_log)\n\n if len(errors) > 0:\n print(\"analyzer {} with action id {} has at least {} valid records but {} have problems:\".format(analyzer['_id'], action_id, valid_count, len(errors)))\n for idx, error in enumerate(errors):\n print(\"{}: {}\".format(idx, error))\n\n self.analyzer_state.transition_to_error(analyzer['_id'], 'error when executing validator:\\n' + '\\n'.join((str(error) for error in errors)))\n else:\n print(\"successfully commited analyzer {} run with action id {}. {} records inserted\".format(analyzer['_id'], action_id, valid_count))\n self.analyzer_state.transition(analyzer['_id'], 'validating', 'sensing', {'action_id': action_id})", "def newAnalyzer():\n analyzer = {'crimes': None,\n 'dateIndex': None,\n 'autors': None,\n 'instrumentalness': None,\n 'tempo':None,\n 'liveness':None,\n 'speechiness':None,\n 'danceability':None,\n 'valence':None,\n 'loudness':None,\n 'acousticness':None,\n 'energy':None,\n 'generos':None\n }\n\n analyzer['crimes'] = lt.newList('ARRAY_LIST', compareIds)\n analyzer['ids'] = lt.newList('ARRAY_LIST', compareIds)\n analyzer['dateIndex'] = om.newMap(omaptype='RBT',\n comparefunction=compareDates)\n\n analyzer['autors'] = om.newMap(omaptype='RBT',\n comparefunction=compareAUTOR)\n\n analyzer['instrumentalness'] = om.newMap(omaptype='RBT',\n comparefunction=compareInt)\n analyzer['tempo'] = om.newMap(omaptype='RBT',\n comparefunction=compareInt)\n analyzer['liveness'] = om.newMap(omaptype='RBT',\n comparefunction=compareInt)\n analyzer['speechiness'] = om.newMap(omaptype='RBT',\n comparefunction=compareInt)\n analyzer['danceability'] = om.newMap(omaptype='RBT',\n comparefunction=compareInt) \n analyzer['valence'] = om.newMap(omaptype='RBT',\n comparefunction=compareInt)\n analyzer['loudness'] = om.newMap(omaptype='RBT',\n comparefunction=compareInt)\n analyzer['acousticness'] = om.newMap(omaptype='RBT',\n comparefunction=compareInt)\n analyzer['energy'] = om.newMap(omaptype='RBT',\n comparefunction=compareInt) \n\n analyzer['generos']= m.newMap(11,\n maptype='CHAINING',\n loadfactor=4.0)\n \n return analyzer", "def create_analysis_tools(self):\r\n raise NotImplementedError()", "def create_phases(inputs):\n feed_tensors = inputs.values()\n\n remaining_analyzers = tf.get_collection(analyzers.ANALYZER_COLLECTION)\n analyzer_output_ready = {}\n for analyzer in remaining_analyzers:\n for tensor in analyzer.outputs:\n analyzer_output_ready[tensor] = False\n\n # Construct `AnalyzerInfo`s, removing any tensors that are analyzer outputs\n # from the ASSET_FILEPATHS collection. These tensors will be replaced and\n # the replacements will be added to the ASSET_FILEPATHS. Setting\n # AnalyzerOutputInfo.is_asset instructs the implementation to do this.\n asset_filepaths_collection = tf.get_collection_ref(\n tf.GraphKeys.ASSET_FILEPATHS)\n asset_filepaths = collections.OrderedDict(\n (tensor, True)\n for tensor in tf.get_collection(tf.GraphKeys.ASSET_FILEPATHS))\n\n phases = []\n while remaining_analyzers:\n analyzer_inputs = []\n for analyzer in remaining_analyzers:\n analyzer_inputs.extend(analyzer.inputs)\n ready_init_ops, ready_analyzer_inputs = (\n graph_tools.determine_ready_tensors_and_table_initializers(\n tf.get_default_graph(), analyzer_inputs, feed_tensors,\n analyzer_output_ready))\n ready_analyzer_inputs = set(ready_analyzer_inputs)\n\n new_remaining_analyzers = []\n analyzer_infos = []\n for analyzer in remaining_analyzers:\n if all(tensor in ready_analyzer_inputs for tensor in analyzer.inputs):\n input_tensor_names = [tensor.name for tensor in analyzer.inputs]\n output_infos = [\n AnalyzerOutputInfo(tensor.name, asset_filepaths.pop(tensor, False))\n for tensor in analyzer.outputs]\n analyzer_infos.append(AnalyzerInfo(\n input_tensor_names, analyzer.attributes, output_infos))\n\n for tensor in analyzer.outputs:\n analyzer_output_ready[tensor] = True\n else:\n new_remaining_analyzers.append(analyzer)\n phases.append(Phase(analyzer_infos, ready_init_ops))\n\n assert len(new_remaining_analyzers) < len(remaining_analyzers)\n remaining_analyzers = new_remaining_analyzers\n\n del asset_filepaths_collection[:]\n asset_filepaths_collection.extend(six.iterkeys(asset_filepaths))\n\n return phases", "def newAnalyzer():\n analyzer = {'accidents': None,\n 'dateIndex': None,\n 'timeIndex': None\n }\n\n analyzer['accidents'] = lt.newList('ARRAY_LIST', compareIds)\n analyzer['dateIndex'] = om.newMap(omaptype='RBT',\n comparefunction=compareDates)\n analyzer['timeIndex'] = om.newMap(omaptype='RBT',\n comparefunction=compareTimes)\n \n return analyzer", "def testGetAnalyzerInstances(self):\n analyzer_names = manager.AnalyzersManager.GetAnalyzerNames()\n analyzers = manager.AnalyzersManager.GetAnalyzerInstances(analyzer_names)\n self.assertEqual(len(analyzer_names), len(analyzers))\n for analyzer in analyzers:\n self.assertIsInstance(analyzer, interface.BaseAnalyzer)", "def newAnalyzer():\n analyzer = {'accidents': None,\n 'dateIndex': None\n }\n\n analyzer['accidents'] = lt.newList('ARRAY_LIST', compareIds)\n analyzer['dateIndex'] = om.newMap(omaptype='BST',\n comparefunction=compareDates)\n return analyzer", "def add_analyzer_arg(parser):\n parser.add(\"analyzer\", nargs=\"+\",\n help=\"Fully qualified package name with an analyzer. Current directory is \"\n \"included into PYTHONPATH.\")", "def _build_analyzer():\n analyzer = {}\n for e in LOSSLESS:\n analyzer[e] = lambda d, _ext, name: d.lossless.append(name)\n for e in COMPRESSED:\n analyzer[e] = lambda d, _ext, name: d.compressed.append(name)\n for e in IMAGES:\n analyzer[e] = lambda d, _ext, name: d.images.append(name)\n for e in VIDEOS:\n analyzer[e] = lambda d, _ext, name: d.videos.append(name)\n\n def _increment_ignored(d, _ext, _name):\n d.ignored += 1 # Can't use assignment in lambda\n\n for e in IGNORE:\n analyzer[e] = _increment_ignored\n analyzer['cue'] = lambda d, _, name: d.cue.append(name)\n\n return analyzer", "def newAnalyzer():\n analyzer = {'accidentes': None,\n 'dateIndex': None\n }\n\n analyzer['accidentes'] = lt.newList('SINGLE_LINKED', compareseverity)\n analyzer['dateIndex'] = om.newMap(omaptype='RBT',\n comparefunction=compareDates)\n return analyzer", "def _generate_index_analysis(self, query_analysis, indexes):\n needs_recommendation = True\n full_indexes = []\n partial_indexes = []\n coverage = \"unknown\"\n\n if indexes is not None:\n for index_key in indexes.keys():\n index = indexes[index_key]\n index_report = self._generate_index_report(index,\n query_analysis)\n if index_report['supported'] is True:\n if index_report['coverage'] == 'full':\n full_indexes.append(index_report)\n if index_report['idealOrder']:\n needs_recommendation = False\n elif index_report['coverage'] == 'partial':\n partial_indexes.append(index_report)\n\n if len(full_indexes) > 0:\n coverage = \"full\"\n elif (len(partial_indexes)) > 0:\n coverage = \"partial\"\n elif query_analysis['supported']:\n coverage = \"none\"\n\n # INDEX ANALYSIS\n return OrderedDict([('indexStatus', coverage),\n ('fullIndexes', full_indexes),\n ('partialIndexes', partial_indexes)])", "def run_analyzers(args: argparse.Namespace):\n log = logging.getLogger(\"run\")\n model_repository = create_model_repo_from_args(args)\n log.info(\"Created %s\", model_repository)\n if args.request_server == \"auto\":\n data_request_address = \"%s:10301\" % args.server.split(\":\")[0]\n else:\n data_request_address = args.request_server\n data_service = DataService(data_request_address)\n log.info(\"Created %s\", data_service)\n sys.path.append(os.getcwd())\n manager = AnalyzerManager(\n analyzers=[importlib.import_module(a).analyzer_class for a in args.analyzer],\n model_repository=model_repository,\n data_service=data_service,\n )\n sys.path = sys.path[:-1]\n log.info(\"Created %s\", manager)\n listener = EventListener(address=args.server, handlers=manager, n_workers=args.workers)\n log.info(\"Created %s\", listener)\n listener.start()\n log.info(\"Listening %s\", args.server)\n listener.block()\n model_repository.shutdown()\n data_service.shutdown()", "def _generate_index_analysis(self, query_analysis, indexes):\r\n needs_recommendation = True\r\n full_indexes = []\r\n partial_indexes = []\r\n coverage = \"unknown\"\r\n\r\n if indexes is not None:\r\n for index_key in indexes.keys():\r\n index = indexes[index_key]\r\n index_report = self._generate_index_report(index,\r\n query_analysis)\r\n if index_report['supported'] is True:\r\n if index_report['coverage'] == 'full':\r\n full_indexes.append(index_report)\r\n if index_report['idealOrder']:\r\n needs_recommendation = False\r\n elif index_report['coverage'] == 'partial':\r\n partial_indexes.append(index_report)\r\n\r\n if len(full_indexes) > 0:\r\n coverage = \"full\"\r\n elif (len(partial_indexes)) > 0:\r\n coverage = \"partial\"\r\n elif query_analysis['supported']:\r\n coverage = \"none\"\r\n\r\n # INDEX ANALYSIS\r\n return OrderedDict([('indexStatus', coverage),\r\n ('fullIndexes', full_indexes),\r\n ('partialIndexes', partial_indexes)])", "def build_analyzer(self):\n\t\tanalyser = super(TfidfVectorizer, self).build_analyzer()\n\t\treturn lambda doc: (lemmatizer.lemmatize(w) for w in analyser(doc))", "def get_learners(beam_width, min_covered_examples, max_rule_length):\n ordered_entropy = Orange.classification.rules.CN2Learner()\n\n unordered = Orange.classification.rules.CN2UnorderedLearner()\n\n laplace = Orange.classification.rules.CN2Learner()\n laplace.rule_finder.quality_evaluator = Orange.classification.rules.LaplaceAccuracyEvaluator()\n\n learners = [ordered_entropy, unordered, laplace]\n labels = [\"ordered\", \"unordered\", \"laplace\"]\n\n for learner in learners:\n \"\"\"\n Parametrize the learners\n \"\"\"\n learner.rule_finder.search_algorithm.beam_width = beam_width\n learner.rule_finder.general_validator.min_covered_examples = min_covered_examples\n learner.rule_finder.general_validator.max_rule_length = max_rule_length\n\n return learners, labels", "def newAnalyzer():\n analyzer = {'tracks': None,\n 'songs': None,\n 'artists': None,\n 'char': None,\n 'char2': None,\n 'char3': None,\n }\n\n analyzer['tracks'] = lt.newList('SINGLE_LINKED', compareIds)\n analyzer['songs'] = om.newMap(omaptype='RBT',\n comparefunction=compareDates)\n analyzer['artists'] = om.newMap(omaptype='RBT',\n comparefunction=compareDates)\n analyzer['char'] = om.newMap(omaptype='RBT',\n comparefunction=compareDates)\n analyzer['char2'] = om.newMap(omaptype='RBT',\n comparefunction=compareDates)\n analyzer['char3'] = om.newMap(omaptype='RBT',\n comparefunction=compareDates)\n analyzer['char4'] = mp.newMap(numelements=50,\n maptype='PROBING',\n comparefunction=compareValue)\n \n return analyzer", "def required_tool_results():\n return [MethylResultModule]", "def generate(env, daos_prefix, comp_prefix, args):\n analyzer = Analyzer(env, daos_prefix, comp_prefix, args)\n analyzer.analyze_on_exit()", "def list_resolver_rules(MaxResults=None, NextToken=None, Filters=None):\n pass", "def handle(self, *args, **options):\n chains = sorted(plugin.BY_REQUIREMENTS.keys())\n for chain in chains:\n print(chain, \"chain:\")\n reqs = sorted(\"{} -> {}\".format(\", \".join(sorted(k)) or 'START',\n \", \".join(sorted(v))) for k, v in\n plugin.BY_REQUIREMENTS[chain].iteritems())\n for req in reqs:\n print(\"-\", req)", "def build_suggesters(DomainName=None):\n pass", "def do_generate(self, args):\n\t\t[lang.hierarchyLengths for lang in self.languages]", "def requires(self):\n bamcls = self.parent()[0]\n indexcls = ratatosk.lib.tools.samtools.Index\n return [bamcls(target=self.source()[0])] + [CombineVariants(target=os.path.join(self.outdir, \"CombinedVariants.vcf\"))] + [indexcls(target=rreplace(self.source()[0], bamcls().sfx(), indexcls().sfx(), 1), parent_task=fullclassname(bamcls))]", "def main(aligner):\n\n # load config file\n config = file_utils.load_json(\n os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n 'config',\n 'normal_config.json',\n ))\n\n # map of type of analyses required before particular analysis can run\n # note: keep this order to avoid checking requirements more than once\n required_analyses_map = {\n 'annotation': [\n 'hmmcopy',\n 'align',\n ],\n 'hmmcopy': ['align'],\n 'align': [],\n }\n\n # get colossus analysis information objects with status not complete\n analyses = colossus_api.list(\n \"analysis_information\",\n analysis_run__run_status_ne=\"complete\",\n aligner=aligner if aligner else config[\"default_aligner\"],\n )\n\n for analysis in analyses:\n # get library id\n library_id = analysis[\"library\"][\"pool_id\"]\n log.info(f\"{library_id}\")\n\n # skip analysis if marked as complete\n status = analysis[\"analysis_run\"][\"run_status\"]\n\n # skip analyses older than this year\n # parse off ending time range\n last_updated_date = parser.parse(analysis[\"analysis_run\"][\"last_updated\"][:-6])\n if last_updated_date < datetime(2020, 1, 1):\n continue\n\n jira_ticket = analysis[\"analysis_jira_ticket\"]\n log.info(f\"checking ticket {jira_ticket} library {library_id}\")\n for analysis_type in required_analyses_map:\n log.info(f\"checking requirements for {analysis_type}\")\n # check if analysis exists on tantalus\n try:\n tantalus_analysis = tantalus_api.get(\n 'analysis',\n jira_ticket=jira_ticket,\n analysis_type__name=analysis_type,\n )\n except:\n tantalus_analysis = None\n\n if tantalus_analysis is not None:\n # check if running or complete\n status = tantalus_analysis[\"status\"]\n if status in ('running', 'complete'):\n log.info(f\"skipping {analysis_type} for {jira_ticket} since status is {status}\")\n\n # update run status on colossus\n if analysis_type == \"annotation\" and status == \"complete\":\n analysis_run_id = analysis[\"analysis_run\"][\"id\"]\n analysis_run = colossus_api.get(\"analysis_run\", id=analysis_run_id)\n colossus_api.update(\"analysis_run\", id=analysis_run_id, run_status=\"complete\")\n\n continue\n\n log.info(f\"running {analysis_type} in library {library_id} with ticket {jira_ticket}\")\n # otherwise run analysis\n saltant_utils.run_analysis(\n tantalus_analysis['id'],\n analysis_type,\n jira_ticket,\n config[\"scp_version\"],\n library_id,\n aligner if aligner else config[\"default_aligner\"],\n config,\n )\n else:\n # set boolean determining trigger of run\n is_ready_to_create = True\n # check if required completed analyses exist\n for required_analysis_type in required_analyses_map[analysis_type]:\n try:\n required_analysis = tantalus_api.get(\n 'analysis',\n jira_ticket=jira_ticket,\n analysis_type__name=required_analysis_type,\n status=\"complete\",\n )\n except:\n log.error(\n f\"a completed {required_analysis_type} analysis is required to run before {analysis_type} runs for {jira_ticket}\"\n )\n # set boolean as false since analysis cannot be created yet\n is_ready_to_create = False\n break\n\n # create analysis and trigger on saltant if analysis creation has met requirements\n if is_ready_to_create:\n log.info(f\"creating {analysis_type} analysis for ticket {jira_ticket}\")\n\n try:\n tantalus_utils.create_qc_analyses_from_library(\n library_id,\n jira_ticket,\n config[\"scp_version\"],\n analysis_type,\n )\n except Exception as e:\n log.error(f\"failed to create {analysis_type} analysis for ticket {jira_ticket}\")\n continue\n tantalus_analysis = tantalus_api.get(\n 'analysis',\n jira_ticket=jira_ticket,\n analysis_type__name=analysis_type,\n )\n\n log.info(f\"running {analysis_type} in library {library_id} with ticket {jira_ticket}\")\n saltant_utils.run_analysis(\n tantalus_analysis['id'],\n analysis_type,\n jira_ticket,\n config[\"scp_version\"],\n library_id,\n aligner if aligner else config[\"default_aligner\"],\n config,\n )\n\n # get completed analyses that need montage loading\n analyses = colossus_api.list(\n \"analysis_information\",\n montage_status=\"Pending\",\n analysis_run__run_status=\"complete\",\n )\n\n for analysis in analyses:\n # get library id\n library_id = analysis[\"library\"][\"pool_id\"]\n\n # skip analyses older than this year\n # parse off ending time range\n last_updated_date = parser.parse(analysis[\"analysis_run\"][\"last_updated\"][:-6])\n if last_updated_date < datetime(2020, 1, 1):\n continue\n\n jira_ticket = analysis[\"analysis_jira_ticket\"]\n update_jira_dlp(jira_ticket, \"M\")\n # upload qc report to jira ticket\n attach_qc_report(jira_ticket, library_id, config[\"storages\"])\n\n # load analysis into montage\n load_ticket(jira_ticket)", "def help_analyze(self):\n print(ANALYZE)", "def workflowLessTypes(self):\n\n tools = [c.getName() for c in\n self.atgenerator.getGeneratedTools(self.package)\n if not\n utils.isTGVFalse(c.getTaggedValue('autoinstall'))]\n tools.sort()\n return tools", "def build_action_randomizers(cls, constants) -> List[ActionRandomizer]:\n return []", "def existing_analysis_sweeps(self):\n setup_list = self.existing_analysis_setups\n sweep_list = []\n s_type = self.solution_type\n for el in setup_list:\n sweep_list.append(el + \" : \" + s_type)\n return sweep_list", "def list_rulesets(command):\n namespace = app.main(command)\n assert namespace.command == 'lr' or namespace.command == \"listrulesets\"", "def generate_sla_metrics(self):\n for module in self.account_definitions:\n\n metric_spec = Definition.return_spec(\n type_set='metric_set',\n module=module\n )\n \n metric_module = importlib.util.module_from_spec(metric_spec)\n metric_spec.loader.exec_module(metric_module)\n try:\n self.metric_sets.append(metric_module.metric_set)\n except AttributeError as _ex:\n print(\"Module has no attribute metric_set\")\n sla_spec = Definition.return_spec(\n type_set='sla_set',\n module=module\n )\n \n sla_module = importlib.util.module_from_spec(sla_spec)\n sla_spec.loader.exec_module(sla_module)\n try:\n self.sla_sets.append(sla_module.sla_set)\n except AttributeError as _ex:\n print(\"Module has no attribute sla_set\")", "def list_algorithms(CreationTimeAfter=None, CreationTimeBefore=None, MaxResults=None, NameContains=None, NextToken=None, SortBy=None, SortOrder=None):\n pass", "def generateAssociationRule(freqSet):", "def analysis_setup(self):\n pass", "def _make_suggestions(self):\n\n #build concordance based on current approved\n concordance = dict()\n for term in self.tree.get_children('approved'):\n words = [word.strip(',.:;*').lower() \\\n for word in str(self.tree.item(term)['values'][0]).split(' ')]\n for word in words:\n# if word == 'ad':\n# messagebox.showwarning(\"word == 'ad'\",\"concordance={}\".format(concordance))\n# pass\n if word not in ['and', 'the', 'a', 'to', 'of'] \\\n and not word.isdigit():\n if word not in concordance:\n concordance[word] = set([term, ])\n else:\n concordance[word].add(term)\n# if word == 'ad':\n# messagebox.showwarning(\"word 'ad' added?\",\"concordance={}\".format(concordance))\n# pass\n \n \n #so concordance now holds a list of words in approved terms along with\\\n #list of index of terms() they occur in\n \n for term in self.tree.get_children('suggestions'):\n self._look_in_concordance(term, concordance)\n\n for term in self.tree.get_children('unknown'):\n self._look_in_concordance(term, concordance)\n\n self._collapse_all()", "def main(argv):\n argparser = ArgumentParser(\n description=\"Measure agreement of the sentiment corpus.\")\n argparser.add_argument(\"dir1\",\n help=\"directort containing annotations of the\"\n \" first expert\")\n argparser.add_argument(\"dir2\",\n help=\"directort containing annotations of the\"\n \" second expert\")\n args = argparser.parse_args(argv)\n\n def check_arg(arg):\n if not os.path.isdir(args.dir1) or not os.access(args.dir1, os.R_OK):\n print(\"Invalid argument {!r} should be a readable\"\n \" directory\".format(arg), file=sys.stderr)\n\n check_arg(args.dir1)\n check_arg(args.dir2)\n annotations = []\n skip_ids = set()\n for fpath1 in glob.iglob(os.path.join(args.dir1, \"*.tsv\")):\n if not os.access(fpath1, os.R_OK):\n print(\"WARNING: cannot read file {:s}\".format(\n fpath1\n ), file=sys.stderr)\n continue\n base_fpath1 = os.path.basename(fpath1)\n fpath2 = os.path.join(args.dir2, base_fpath1)\n if not os.path.exists(fpath2):\n print(\"WARNING: file {:s} not found\".format(\n fpath1\n ), file=sys.stderr)\n continue\n elif not os.access(fpath2, os.R_OK):\n print(\"WARNING: cannot read file {:s}\".format(\n fpath1\n ), file=sys.stderr)\n continue\n read_annotations(annotations, skip_ids, fpath1, 1)\n read_annotations(annotations, skip_ids, fpath2, 2)\n annotations = [(coder, item, label)\n for (coder, item, label) in annotations\n if item not in skip_ids]\n at = AnnotationTask(annotations, distance=interval_distance)\n # print(at.avg_Ao())\n # print(at.Ae_kappa())\n print(\"Cohen's Kappa: {:.4f}\\nKrippendorff's Alpha: {:4f}\".format(\n at.kappa(), at.alpha()\n ))", "def build_frequency_list(name_list):\n analyzer = build_analyzer()\n char_list = []\n for name in name_list:\n char_list += analyzer(name)\n return char_list", "def testAnalyzerRegistration(self):\n number_of_analyzers = len(manager.AnalyzersManager._analyzer_classes)\n manager.AnalyzersManager.RegisterAnalyzer(TestAnalyzer)\n self.assertEqual(\n number_of_analyzers + 1, len(\n manager.AnalyzersManager._analyzer_classes))\n\n with self.assertRaises(KeyError):\n manager.AnalyzersManager.RegisterAnalyzer(TestAnalyzer)\n\n manager.AnalyzersManager.DeregisterAnalyzer(TestAnalyzer)\n\n self.assertEqual(\n number_of_analyzers, len(manager.AnalyzersManager._analyzer_classes))", "def newAnalyzer():\n analyzer = {'events': None,\n \"musical_genre\":None,\n 'artist_ID': None,\n \"track_ID\": None,\n \"instrumentalness\": None,\n \"acousticness\": None,\n \"liveness\": None,\n \"speechiness\": None,\n \"energy\":None,\n \"danceability\": None,\n \"valence\": None\n }\n # Listas\n analyzer['events'] = lt.newList('ARRAY_LIST', compareIds)\n\n # RBT \n analyzer[\"instrumentalness\"] = om.newMap(omaptype='RBT',comparefunction=compareDates)\n analyzer['acousticness'] = om.newMap(omaptype='RBT',comparefunction=compareDates)\n analyzer['liveness'] = om.newMap(omaptype='RBT',comparefunction=compareDates)\n analyzer['speechiness'] = om.newMap(omaptype='RBT',comparefunction=compareDates)\n analyzer['energy'] = om.newMap(omaptype='RBT',comparefunction=compareDates)\n analyzer['danceability'] = om.newMap(omaptype='RBT',comparefunction=compareDates)\n analyzer['valence'] = om.newMap(omaptype='RBT',comparefunction=compareDates)\n analyzer['tempo'] = om.newMap(omaptype='RBT',comparefunction=compareDates)\n analyzer['created_at'] = om.newMap(omaptype='RBT',comparefunction=compareDates)\n analyzer['dates_u'] = om.newMap(omaptype='RBT',comparefunction=compareDates)\n \n\n # Tablas de Hash \n analyzer['artist_ID'] = mp.newMap(10000,maptype='PROBING',loadfactor=0.5, comparefunction=compareByName)\n analyzer['track_ID'] = mp.newMap(10000,maptype='PROBING',loadfactor=0.5, comparefunction=compareByName)\n analyzer['ID'] = mp.newMap(10000,maptype='PROBING',loadfactor=0.5, comparefunction=compareByName)\n analyzer[\"musical_genre\"] = mp.newMap(15,maptype='PROBING',loadfactor=0.5, comparefunction=compareByName)\n analyzer[\"sen\"] = mp.newMap(15,maptype='PROBING',loadfactor=0.5, comparefunction=compareByName)\n analyzer['track_ID_S'] = mp.newMap(10000,maptype='PROBING',loadfactor=0.5, comparefunction=compareByName)\n\n return analyzer", "def create_rules(app_names, error):\n # If no applications is given on the command line, generate the rules\n # for all the registered applications\n if not app_names:\n app_names = [entry.name for entry in pkg_resources.iter_entry_points('nagare.applications')]\n\n package = pkg_resources.Requirement.parse('nagare')\n static = pkg_resources.resource_filename(package, 'static')\n\n apps = [('nagare', static)] # Initialize the result tuple with the static contents of the framework\n\n for app_name in app_names:\n (cfgfile, app, dist, aconf) = util.read_application(app_name, error)\n\n static = aconf['application'].get('static', os.path.join(dist.location, 'static') if dist else None)\n\n if static and os.path.isdir(static):\n apps.append((aconf['application']['name'], static))\n\n return sorted(apps, key=lambda x: len(x[0]))", "def get_agency_terms(self):\n return # osid.authentication.AgencyQueryInspector", "def compile_tuning(self, cf):\n logger = self._logger\n for s in cf.sections():\n logger.debug('Collecting definitions for %s', s)\n try:\n rexp = cf.get(s, 'regex')\n except ConfigParser.NoOptionError:\n self._logger.error('Section \"%s\" missing \"regex\" option', s)\n continue\n try:\n r = re.compile(rexp)\n except:\n self._logger.error('Section \"%s\" invalid \"regex\" option', s)\n continue\n try:\n transfer = cf.getint(s, 'transfer')\n except ConfigParser.NoOptionError:\n self._logger.info('Section \"%s\" missing \"transfer\" option', s)\n transfer = 512\n except ValueError:\n self._logger.error('Section \"%s\", \"transfer\" option must be integer', s)\n continue\n try:\n readahead = cf.getint(s, 'readahead')\n except ConfigParser.NoOptionError:\n self._logger.info('Section \"%s\" missing \"readahead\" option', s)\n readahead = 2*transfer\n except ValueError:\n self._logger.error('Section \"%s\", \"readahead\" option must be integer', s)\n continue\n try:\n scheduler = cf.get(s, 'scheduler')\n except ConfigParser.NoOptionError:\n self._logger.info('Section \"%s\" missing \"scheduler\" option', s)\n scheduler = None\n opts = []\n if scheduler == 'deadline':\n opts = []\n for deadOpt in ('fifo_batch', 'read_expire', 'write_expire', 'writes_starved', 'front_merges'):\n try:\n deadVal = cf.getint(s, deadOpt)\n except ConfigParser.NoOptionError:\n self._logger.info('Section \"%s\" missing \"%s\" option', s, deadOpt)\n deadVal = None\n except ValueError:\n self.logger.error('Section \"%s\", \"%s\" option must be integer', s, deadOpt)\n continue\n opts += [ deadVal ]\n self._lunMatch.append( ( r, transfer, readahead, scheduler, opts ) )", "def test_creation(self):\n config = config_factory()\n db = config.arango_db\n\n # Create analyzer\n analyzer = ArangoAnalyzer(\"analyzer_sample\")\n analyzer.set_stopwords(\n language=\"english\",\n custom_stopwords=[\"stop\", \"word\"],\n include_default=False,\n )\n analyzer.type = ArangoAnalyzer._TYPE_TEXT\n\n analyzer.create(db)\n assert analyzer.__dict__ == {\n \"name\": \"analyzer_sample\",\n \"type\": \"text\",\n \"features\": [\"frequency\", \"norm\", \"position\"],\n \"locale\": \"en\",\n \"case\": \"lower\",\n \"stopwords\": [\"stop\", \"word\"],\n \"accent\": False,\n \"stemming\": True,\n \"edge_ngram\": None,\n \"delimiter\": \",\",\n \"min\": 2,\n \"max\": 5,\n \"preserve_original\": False,\n \"start_marker\": \"\",\n \"end_marker\": \"\",\n \"stem_type\": \"binary\",\n }\n assert isinstance(db.analyzer(\"analyzer_sample\"), dict)", "def get_all_parsers():\n return [OptimizerFactory.get_parser(optimizer) for optimizer in OptimizerFactory.optimizers]", "def list_modules(lookup_paths: list = None):\n result = []\n\n if lookup_paths is None:\n lookup_paths = analyzer_paths()\n\n for path in lookup_paths:\n analyzer_module_root = resource_filename(path, \"modules\")\n # analyzer_root = os.path.join(anchore_module_root, \"modules\")\n for f in os.listdir(analyzer_module_root):\n thecmd = os.path.join(analyzer_module_root, f)\n if re.match(r\".*\\.py$\", thecmd):\n result.append(thecmd)\n\n result.sort(key=lambda x: analyzer_name_from_path(x))\n return result", "def add_arguments_to_parser(parser):\n\n parser.add_argument('--analyzers',\n nargs='+',\n dest='analyzers',\n metavar='ANALYZER',\n required=False,\n choices=analyzer_types.supported_analyzers,\n default=list(analyzer_types.supported_analyzers.\n keys()),\n help=\"Show checkers only from the analyzers \"\n \"specified.\")\n\n if get_diagtool_bin():\n parser.add_argument('-w', '--warnings',\n dest='show_warnings',\n default=argparse.SUPPRESS,\n action='store_true',\n required=False,\n help=\"Show available warning flags.\")\n\n parser.add_argument('--details',\n dest='details',\n default=argparse.SUPPRESS,\n action='store_true',\n required=False,\n help=\"Show details about the checker, such as \"\n \"description, if available.\")\n\n parser.add_argument('--profile',\n dest='profile',\n metavar='PROFILE/list',\n required=False,\n default=argparse.SUPPRESS,\n help=\"List checkers enabled by the selected profile. \"\n \"'list' is a special option showing details \"\n \"about profiles collectively.\")\n\n parser.add_argument('--guideline',\n dest='guideline',\n nargs='*',\n required=False,\n default=None,\n help=\"List checkers that report on a specific \"\n \"guideline rule. Here you can add the guideline \"\n \"name or the ID of a rule. Without additional \"\n \"parameter, the available guidelines and their \"\n \"corresponding rules will be listed.\")\n\n parser.add_argument('--checker-config',\n dest='checker_config',\n default=argparse.SUPPRESS,\n action='store_true',\n required=False,\n help=\"Show checker configuration options for all \"\n \"existing checkers supported by the analyzer. \"\n \"These can be given to 'CodeChecker analyze \"\n \"--checker-config'.\")\n\n filters = parser.add_mutually_exclusive_group(required=False)\n\n filters.add_argument('--only-enabled',\n dest='only_enabled',\n default=argparse.SUPPRESS,\n action='store_true',\n help=\"Show only the enabled checkers.\")\n\n filters.add_argument('--only-disabled',\n dest='only_disabled',\n default=argparse.SUPPRESS,\n action='store_true',\n help=\"Show only the disabled checkers.\")\n\n parser.add_argument('-o', '--output',\n dest='output_format',\n required=False,\n default='rows',\n choices=USER_FORMATS,\n help=\"The format to list the applicable checkers as.\")\n\n logger.add_verbose_arguments(parser)\n parser.set_defaults(func=main)", "def collect_rules():\n all_rules = []\n keys = ['mobileconfig',\n 'macOS',\n 'severity',\n 'title',\n 'check',\n 'fix',\n 'odv',\n 'tags',\n 'id',\n 'references',\n 'result',\n 'discussion']\n references = ['disa_stig',\n 'cci',\n 'cce',\n '800-53r4',\n 'srg']\n\n\n for rule in glob.glob('../rules/**/*.yaml',recursive=True) + glob.glob('../custom/rules/**/*.yaml',recursive=True):\n if \"supplemental\" in rule:\n continue\n rule_yaml = get_rule_yaml(rule, custom=False)\n for key in keys:\n try:\n rule_yaml[key]\n except:\n rule_yaml.update({key: \"missing\"})\n if key == \"references\":\n for reference in references:\n try:\n rule_yaml[key][reference]\n except:\n rule_yaml[key].update({reference: [\"None\"]})\n\n if \"n_a\" in rule_yaml['tags']:\n rule_yaml['tags'].remove(\"n_a\")\n if \"inherent\" in rule_yaml['tags']:\n rule_yaml['tags'].remove(\"inherent\")\n if \"manual\" in rule_yaml['tags']:\n rule_yaml['tags'].remove(\"manual\")\n if \"none\" in rule_yaml['tags']:\n rule_yaml['tags'].remove(\"none\")\n if \"permanent\" in rule_yaml['tags']:\n rule_yaml['tags'].remove(\"permanent\")\n if \"supplemental\" in rule_yaml['tags']:\n rule_yaml['tags'].remove(\"supplemental\")\n if \"i386\" in rule_yaml['tags']:\n rule_yaml['tags'].remove(\"i386\")\n if \"arm64\" in rule_yaml['tags']:\n rule_yaml['tags'].remove(\"arm64\")\n\n all_rules.append(MacSecurityRule(rule_yaml['title'].replace('|', '\\|'),\n rule_yaml['id'].replace('|', '\\|'),\n rule_yaml['severity'].replace('|', '\\|'),\n rule_yaml['discussion'].replace('|', '\\|'),\n rule_yaml['check'].replace('|', '\\|'),\n rule_yaml['fix'].replace('|', '\\|'),\n rule_yaml['references']['cci'],\n rule_yaml['references']['cce'],\n rule_yaml['references']['800-53r4'],\n rule_yaml['references']['disa_stig'],\n rule_yaml['references']['srg'],\n rule_yaml['odv'],\n rule_yaml['tags'],\n rule_yaml['result'],\n rule_yaml['mobileconfig'],\n rule_yaml['mobileconfig_info']\n ))\n return all_rules", "def scan(self):\n exclude_patterns_config = self.doxygen_config.get(\"EXCLUDE_PATTERNS\", [])\n exclude_patterns = [\n pattern.replace(\"*/\", \"**/\") for pattern in exclude_patterns_config\n ]\n file_patterns = self.doxygen_config.get(\"FILE_PATTERNS\", [\"*.c\", \"*.h\"])\n if self.doxygen_config.get(\"RECURSIVE\", [\"YES\"]) == [\"YES\"]:\n file_patterns = [f\"**/{pattern}\" for pattern in file_patterns]\n nodes = []\n names = []\n for node in self.doxygen_input:\n if os.path.isdir(node.abspath()):\n for i in node.ant_glob(incl=file_patterns, excl=exclude_patterns):\n nodes.append(i)\n else:\n nodes.append(node)\n return (nodes, names)", "def check(self, manager):\n for all_json in self.api_dir.rglob(ALL_JSON):\n stem = all_json.relative_to(self.api_dir)\n yield dict(\n name=f\"validate:translation:{stem}\",\n doc=f\"Validate {stem} with the JupyterLab Translation API\",\n file_dep=[all_json],\n actions=[(self.validate_one_json_file, [None, all_json])],\n )", "def extract_rule_names(self):\n if self.scanner == YARA:\n return sorted({result['rule'] for result in self.results})\n if self.scanner == CUSTOMS and 'matchedRules' in self.results:\n return self.results['matchedRules']\n # We do not have support for the remaining scanners (yet).\n return []", "def _generate_recommendation(self,\n query_analysis,\n db_name,\n collection_name):\n index_rec = '{'\n for query_field in query_analysis['analyzedFields']:\n if query_field['fieldType'] is EQUIV_TYPE:\n if len(index_rec) is not 1:\n index_rec += ', '\n index_rec += '\"' + query_field['fieldName'] + '\": 1'\n for query_field in query_analysis['analyzedFields']:\n if query_field['fieldType'] is SORT_TYPE:\n if len(index_rec) is not 1:\n index_rec += ', '\n index_rec += '\"' + query_field['fieldName'] + '\": 1'\n for query_field in query_analysis['analyzedFields']:\n if query_field['fieldType'] is RANGE_TYPE:\n if len(index_rec) is not 1:\n index_rec += ', '\n index_rec += '\"' + query_field['fieldName'] + '\": 1'\n index_rec += '}'\n\n # RECOMMENDATION\n return OrderedDict([('index',index_rec),\n ('shellCommand', self.generate_shell_command(collection_name, index_rec))])", "def version(self) -> str:\n return \" \".join(self._model_id(a) for a in self._analyzers)", "def gen_docs(jsons: [{}], char_wb: bool = False, add_taint: bool = False) -> [Learner.LabelledDocs]:\n docs = []\n taint_counts = 0\n for flow in jsons:\n line = Analyzer.filter_url_words(flow['url'])\n if '_' in flow['taint']:\n taint_counts += 1\n if add_taint:\n line = line + ' ' + 't_' + flow['taint']\n label = 1 if flow['label'] == '1' else 0\n real_label = 1 if flow['real_label'] == '1' else 0\n if real_label != label:\n logger.info(\"Flow's real label does not match the training label for %s, real_label = %d label = %d\",\n flow['url'], real_label, label)\n numeric = [flow[name] for name in Analyzer.numeric_features]\n docs.append(Learner.LabelledDocs(line, label, numeric, real_label, char_wb=char_wb))\n logger.info('The number of flows who have more than 1 taints: %d', taint_counts)\n return docs", "def GenerateACLString(self):\n target_string = ''\n app_id = 100 # variable in ACL sentences.\n\n for terms in self.silverpeak_terms:\n for term in terms:\n for unit in term.GenerateUnitList():\n if term.term.precedence:\n for precedence in term.term.precedence:\n target_string += self._GenerateACLLine(app_id, term,\n unit, precedence)\n app_id += 100\n else:\n target_string += self._GenerateACLLine(app_id, term, unit)\n app_id += 100\n # finalize the target string\n target_string = self._FinalizeString(target_string, self.pre_string,\n self.fixed_content)\n return target_string", "def init():\n # analyzer es utilizado para interactuar con el modelo\n analyzer = model.newAnalyzer()\n return analyzer", "def get_rules(cls) -> list:\n return [factory() for factory in cls._rules_factories]", "def _generate_recommendation(self,\r\n query_analysis,\r\n db_name,\r\n collection_name):\r\n index_rec = '{'\r\n for query_field in query_analysis['analyzedFields']:\r\n if query_field['fieldType'] is EQUIV_TYPE:\r\n if len(index_rec) is not 1:\r\n index_rec += ', '\r\n index_rec += '\"' + query_field['fieldName'] + '\": 1'\r\n for query_field in query_analysis['analyzedFields']:\r\n if query_field['fieldType'] is SORT_TYPE:\r\n if len(index_rec) is not 1:\r\n index_rec += ', '\r\n index_rec += '\"' + query_field['fieldName'] + '\": 1'\r\n for query_field in query_analysis['analyzedFields']:\r\n if query_field['fieldType'] is RANGE_TYPE:\r\n if len(index_rec) is not 1:\r\n index_rec += ', '\r\n index_rec += '\"' + query_field['fieldName'] + '\": 1'\r\n index_rec += '}'\r\n\r\n # RECOMMENDATION\r\n return OrderedDict([('index',index_rec),\r\n ('shellCommand', self.generate_shell_command(collection_name, index_rec))])", "def get_rules():\n rules = []\n\n for app_module in get_config('tipfy', 'apps_installed'):\n try:\n # Load the urls module from the app and extend our rules.\n app_rules = import_string('%s.urls' % app_module)\n rules.extend(app_rules.get_rules())\n except ImportError:\n pass\n\n return rules", "def build_corpus(self):\n # #############################\n\n doc = metapy.index.Document()\n tok = metapy.analyzers.ICUTokenizer(suppress_tags=True)\n tok = metapy.analyzers.LowercaseFilter(tok)\n tok = metapy.analyzers.LengthFilter(tok, min=3, max=1000)\n tok = metapy.analyzers.Porter2Filter(tok)\n tok = metapy.analyzers.ListFilter(tok, \"lemur-stopwords.txt\", metapy.analyzers.ListFilter.Type.Reject)\n collection = -1\n\n with open(self.documents_path) as file:\n for num, line in enumerate(file):\n l = line.strip()\n c = int(l[0])\n l = l[2:]\n doc.content(l)\n tok.set_content(doc.content())\n if c != collection:\n self.documents.append([])\n collection = c\n self.documents[c].append([token for token in tok])\n self.number_of_collections = len(self.documents)\n self.number_of_documents = len(self.documents[0])\n #print(self.number_of_collections)\n #print(self.number_of_documents)\n #print(self.documents[0])", "def hrules(self):\n ...", "def list_all_agencies():\n return JsonResponse.create(StatusCode.OK, get_all_agencies())", "def get_all_rules(self):\n\n rules = set()\n for a_dict in self.get_dicts():\n rules = keywords.union(a_dict['rules'])\n return sorted(keywords)", "def __all_Algs_ ( self ) :\n _algs = self.algorithms()\n\n algs = []\n for _a in _algs :\n algs += [ self.algorithm ( _a ) ]\n return algs", "def __all_Algs_ ( self ) :\n _algs = self.algorithms()\n\n algs = []\n for _a in _algs :\n algs += [ self.algorithm ( _a ) ]\n return algs", "def bookAlgorithms(config, visitor):\n import imp\n import os\n\n CLI = config.getFolder(\"CLI+\")\n # flag indicating to run a robust analysis\n robust = CLI.getTagBoolDefault(\"robust\",False)\n # flag indicating to run a dummy analysis\n dummy = CLI.getTagBoolDefault(\"dummy\",False)\n\n # book any algorithms\n for algorithmscript_TString in config.getTagVString(\"algorithms.snippets\"):\n QFramework.TQStringUtils.removeLeadingBlanks(algorithmscript_TString)\n QFramework.TQStringUtils.removeTrailingBlanks(algorithmscript_TString)\n QFramework.TQStringUtils.removeTrailingText(algorithmscript_TString, \".py\")\n algorithmscript = algorithmscript_TString.Data()\n found_modules = []\n algorithmsDirs = config.getTagVStandardString(\"algorithms.directories\")\n # search through the directories provided in the config\n for algorithmsPath in algorithmsDirs:\n module = QFramework.TQFolder.concatPaths(algorithmsPath, algorithmscript) + \".py\"\n module = common.findConfigPath(module, False)\n # findConfigPath returns \"\" if no module was found\n if len(module) > 0:\n # snippet was found in this directory -\n # add its absolute path and the directory it was found in\n # to a list in tuple form\n found_modules.append((module, algorithmsPath))\n if len(found_modules) == 0:\n # check CommonAnalysisHelpers for an algorithm snippet as fall-back\n CAHAlgorithmsDir = \"CommonAnalysisHelpers/share/algorithms\"\n algorithmsDirs.push_back(CAHAlgorithmsDir)\n module = QFramework.TQFolder.concatPaths(CAHAlgorithmsDir, algorithmscript) + \".py\"\n module = QFramework.TQPathManager.findFileFromEnvVarWithoutExecDir(module, \"CAFCOREDIR\")\n if len(module) > 0:\n found_modules.append((module, CAHAlgorithmsDir))\n print(len(found_modules))\n # continue only if there was one match found\n if len(found_modules) == 0:\n QFramework.BREAK(\"No module found for '{:s}' in the custom algorithm directories provided:\\n{:s}\\n\".format(algorithmscript,', '.join(algorithmsDirs))+\n \"Please make sure that there exists a snippet by the name of '{:s}.py' available in one of them.\\n\".format(algorithmscript))\n elif len(found_modules) > 1:\n QFramework.BREAK(\"Ambiguity detected while resolving custom algorithm snippet location. Multiple modules found for {:s} in the custom algorithm directories provided:\\n{:s}\\n\".format(algorithmscript,', '.join(algorithmsDirs))+\n \"Consider placing the {:s}.py snippet only in a common directory if it's used by more than one (sub)analysis.\".format(algorithmscript))\n abs_path = found_modules[0][0]\n module_name = os.path.basename(abs_path).rstrip(\".py\")\n relative_path = QFramework.TQFolder.concatPaths(found_modules[0][1], algorithmscript)+\".py\"\n QFramework.START(\"l.\",\"loading algorithms from '{:s}'\".format(str(relative_path)))\n try:\n addalgorithms = imp.load_source(module_name, abs_path)\n added = addalgorithms.addAlgorithms(visitor,config)\n if added:\n QFramework.END(QFramework.TQMessageStream.OK)\n else:\n QFramework.END(QFramework.TQMessageStream.FAIL)\n QFramework.BREAK(\"unable to properly setup custom algorithms\")\n except IOError as error:\n QFramework.END(QFramework.TQMessageStream.FAIL)\n QFramework.BREAK(\"unable to open file '{:s}' - please double-check!\\n\".format(abs_path)+\"Message from python:\\n\"+str(error))\n except NameError as error:\n QFramework.END(QFramework.TQMessageStream.FAIL)\n if not robust and not dummy:\n QFramework.BREAK(\"syntax error in algorithm snippet '{:s}' - please double-check!\\n\".format(abs_path)+\"Message from python:\\n\"+str(error))\n except AttributeError as error:\n QFramework.END(QFramework.TQMessageStream.FAIL)\n if not robust and not dummy:\n QFramework.BREAK(\"attribute error in algorithm snippet '{:s}' - please double-check!\\n\".format(abs_path)+\n \"If the message from python below is\\n'module' object has no attribute 'addAlgorithms'\\nplease make sure that the snippet has the function addAlgorithms() defined.\\n\"\n \"Message from python:\\n\"+str(error))\n\n # only try and do the xAOD skimming configuration below if we are running with the MCASV\n # since all channels should be considered at the same time (e.g. systematic variations)\n if isinstance(visitor,QFramework.TQMultiChannelAnalysisSampleVisitor):\n # TODO: these two lines are also done in bookAnalysisJobs\n xAODdumpingConfig = QFramework.TQTaggable()\n dumpXAODs = (xAODdumpingConfig.importTagsWithoutPrefix(config,\"xAODdumping.\") > 0)\n\n jobID = CLI.getTagStringDefault(\"jobID\",\"analyze\")\n\n #add xAODskimmingAlgorithm if requested (only for MCASV as we'd have event duplications otherwise!)\n #note: if we ever implement an option to limit the number of channels executed at the same time we must ensure this does not run in such a configuration!!!!\n if dumpXAODs:\n print(\"Setting up xAOD skimming Algorithm...\")\n xAODskimmingAlg = ROOT.TQxAODskimmingAlgorithm()\n xAODskimmingAlg.SetName(\"xAODdumper\")\n if xAODdumpingConfig.hasTag(\"flagName\"): xAODskimmingAlg.setFlagName(xAODdumpingConfig.getTagStringDefault(\"flagName\",\"\"))\n xAODskimmingAlg.setOutputDir( xAODdumpingConfig.getTagStringDefault(\"outputDir\",\"CAFxAODs\") )\n xAODskimmingAlg.setFilePrefix(jobID+\"_\")\n if config.hasTag(\"nameTagName\") : xAODskimmingAlg.setPrefix( config.getTagStringDefault( ROOT.TString(\"aliases.\")+config.getTagStringDefault(\"nameTagName\",\"\"), \"\" ) )\n visitor.addAlgorithm( xAODskimmingAlg )", "def genlangs(self):\r\n raise NotImplementedError", "def requires(self):\n return []", "def _compile_codecoolers(cls):\n cls.codecoolers = Student.get_students() + Mentor.get_mentors() + Admin.get_admins()", "def antonyms_pipeline(config: SettingConfig) -> None:\n raw_antonym_pairs = generate_antonym_pairs(config)\n for pos in config.pos.keys():\n processed_synonym_pairs = postprocess_pairs(raw_antonym_pairs[pos], config)\n write_pairs(processed_synonym_pairs, config.constraints_root_path, pos, \"antonyms\")", "def required(cls):\n return []", "def get_authorizers(self, ApiId: str, MaxResults: str = None, NextToken: str = None) -> Dict:\n pass", "def make_control_knowledge(self, horizon):\n\n \"\"\" *** YOUR CODE HERE *** \"\"\"\n\n # ADD_RULE1_COUNT = 0\n # ADD_RULE2_COUNT = 0\n # ADD_RULE3_COUNT = 0\n\n close = list()\n far = list()\n\n for g in self.problem.goal:\n for p in self.problem.propositions:\n if re.match(r'at\\spackage\\d+\\scity\\d+-\\d+', str(p)):\n p_split = str(p).split()\n g_split = str(g).split()\n\n # if \"at\" and \"package[oo]\" match\n if p_split[0] == g_split[0] and p_split[1] == g_split[1]:\n # also \"city[oo]-[xx]\" match\n if p_split[2][:-2] == g_split[2][:-2]:\n close.append(p)\n else:\n far.append(p)\n\n # Rule 1:\n # ===============================\n # If a package is at its goal location, then it must remain there.\n # p@t and goal@t) -> p@t+1), where p is at(package, location)\n # cnf: not p@t or not goal@t or p@t+1\n\n for g in self.problem.goal:\n for t in range(0, horizon):\n clause = list()\n clause.append(-self.proposition_fluent_codes[(g, t)])\n clause.append(self.proposition_fluent_codes[(g, t + 1)])\n self.add_clause(clause, \"control\")\n # ADD_RULE1_COUNT += 1\n\n for t in range(0, horizon):\n for a in self.problem.actions:\n\n # Rule 2\n # ===============================\n\n # RULE\n # close -> do not load airplane\n # p1: close@t\n # p2: at the location of an airport @t\n # p3: airplane at this location @t\n # p4: plane is not loaded\n # a: load this airplane\n #\n # p1@t and p2@t and p3@t and p4@t => a@t\n # not p1@t or not p2@t or not p3@t or not p4@t or a@t\n # cnf: not p@t or not a@t\n if str(a).startswith('load-airplane'):\n for i in close:\n package = str(i).split()[1]\n if str(a).split()[1] == package:\n clause = list()\n clause.append(\n -self.proposition_fluent_codes[(i, t)])\n clause.append(-self.action_fluent_codes[(a, t)])\n self.add_clause(clause, \"control\")\n # ADD_RULE2_COUNT += 1\n\n # Rule 3\n # ===============================\n\n # RULE\n # far -> do not unload airplane\n # p@t -> not a@t, where p is far, a is unload-airplane\n # cnf: not p@t or not a@t\n if str(a).startswith('unload-airplane'):\n for j in far:\n package = str(j).split()[1]\n if str(a).split()[1] == package:\n clause = list()\n clause.append(\n -self.proposition_fluent_codes[(j, t)])\n clause.append(-self.action_fluent_codes[(a, t)])\n self.add_clause(clause, \"control\")\n # ADD_RULE3_COUNT += 1\n\n # # RULE\n # # if an airplane has a package on it and the package's\n # # destination is close do not fly this airplane.\n # # in fact, if the destination of package is far,\n # # fly this plane to it.\n # #\n # # p1: package on airplane @ t\n # # p2: package at a place @ t\n # # p3: the place and the goal are in the same city\n # # rule: p1@t and p2@t and p@3 => not fly plane@t\n # # and unload the plane@t\n #\n # # not p1@t or not p2@t or not fly@t\n # # not p1@t or not p2@t or unload\n #\n # # rule: p1@t and p2@t and not p3@t => fly plane@t and not\n # # unload the plane@t\n #\n # if str(a).startswith('fly-airplane'):\n # plane = str(a).split()[1]\n # # loc_from = str(a).split()[2]\n # for p1 in self.problem.propositions:\n # if str(p1).startswith('in package') and str(p1).split()[2] == plane: # in package plane\n # package = str(p1).split()[1]\n # for p2 in self.problem.propositions:\n # if p2 in close and str(p2).split()[1] == package: # at package location\n # clause = list()\n # clause.append(-self.proposition_fluent_codes[p1, t])\n # clause.append(-self.proposition_fluent_codes[p2, t])\n # clause.append(-self.action_fluent_codes[a, t])\n # self.add_clause(clause, 'control')\n # ADD_RULE2_COUNT += 1\n #\n #\n # for g in self.problem.goal:\n # if str(g).split()[1] == package:\n # destination = str(g).split()[2]\n # for do in self.problem.actions:\n # # unload-airplane package00 plane00 city00-00\n # if str(do).startswith('unload') and str(do).split()[1] == package and str(do).split()[2] == plane and str(do).split()[3] == destination:\n # clause2 = list()\n # clause2.append(-\n # self.proposition_fluent_codes[\n # p1, t])\n # clause2.append(-\n # self.proposition_fluent_codes[\n # p2, t])\n # clause2.append(\n # self.action_fluent_codes[\n # do, t])\n # self.add_clause(clause2,\n # 'control')\n #\n # ADD_RULE3_COUNT += 1\n\n # RULE\n # if there is no package needs to be transferred at a location,\n # and the location has a truck\n # drive the truck to its airport\n\n # p1: (at package__ city__-__ /\\ (it is a goal)@t\n # p2: (at truck__ city__-__)@t\n # p3: (city__-__ is not airport)\n # not p1/\\p2/\\p3 => drive_truck_to_its_airport@t\n #\n #\n # CNF: p1 V not p2 V not p3 V drive_truck_to_its_airport@t\n # if str(a).startswith('DRIVE-TRUCK'):\n # for p1 in self.problem.goal:\n # city = str(p1).split()[2]\n # for p2 in self.problem.propositions:\n # if str(p2).startswith('at truck') and str(p2).split()[2] == city:\n # for p3 in self.problem.propositions:\n # if str(p3).startswith('airport') and str(p3).split()[1] == city:\n # clause = list()\n # clause.append(self.proposition_fluent_codes[(p1, t)])\n # clause.append(-self.proposition_fluent_codes[(p2, t)])\n # clause.append(-self.proposition_fluent_codes[(p3, t)])\n # clause.append(self.action_fluent_codes[(a, t)])\n # self.add_clause(clause, \"control\")\n\n # RULE\n # if there is an airplane is loaded with a package need\n # transfer (to another city), fly airplane to the corresponding\n # city.\n\n # p1: (at airplane__ city__-__)@t\n # p2: (in package__ airplane__)@t\n # p3: ( p2 is in far)\n # p1/\\p2/\\p3 => fly_airplane_to_its_airport@t\n #\n #\n # CNF: not p1@t V not p2@t V not p3@t V fly_plane_to_airport@t\n\n # print(\"ADDED RULE 1:\")\n # print(ADD_RULE1_COUNT)\n #\n # print(\"ADDED RULE 2:\")\n # print(ADD_RULE2_COUNT)\n #\n # print(\"ADDED RULE 3:\")\n # print(ADD_RULE3_COUNT)", "def generate(self):\n\n\t\tfor datapoint in self.dataSet[:]:\n\t\t\trule, degree = self.makeRule(datapoint)\n\t\t\tself.generatedRules.append((rule, degree))", "def get_argparser_ctor_args():\n\n data_files_dir_path = analyzer_context.get_context().data_files_dir_path\n config_dir_path = os.path.join(data_files_dir_path, 'config')\n return {\n 'prog': 'CodeChecker checkers',\n 'formatter_class': arg.RawDescriptionDefaultHelpFormatter,\n\n # Description is shown when the command's help is queried directly\n 'description': \"Get the list of checkers available and their enabled \"\n \"status in the supported analyzers.\",\n\n # Epilogue is shown after the arguments when the help is queried\n # directly.\n 'epilog': \"\"\"\nThe list of checkers that are enabled or disabled by default can be edited by\nediting the file '{}'.\n\nEnvironment variables\n------------------------------------------------\n CC_SEVERITY_MAP_FILE Path of the checker-severity mapping config file.\n Default: '{}'\n CC_GUIDELINE_MAP_FILE Path of the checker-guideline mapping config file.\n Default: '{}'\n CC_PROFILE_MAP_FILE Path of the checker-profile mapping config file.\n Default: '{}'\n\"\"\".format(os.path.join(config_dir_path, 'checker_profile_map.json'),\n os.path.join(config_dir_path, 'checker_severity_map.json'),\n os.path.join(config_dir_path, 'checker_guideline_map.json'),\n os.path.join(config_dir_path, 'checker_profile_map.json')),\n\n # Help is shown when the \"parent\" CodeChecker command lists the\n # individual subcommands.\n 'help': \"List the checkers available for code analysis.\"\n }", "def _get_installed_targets(target_types):\r\n lines = [TargetsHelp.INSTALLED_TARGETS_HEADER]\r\n for target_type in sorted(target_types.keys()):\r\n if target_types[target_type].__doc__ is None:\r\n desc = 'Description unavailable.'\r\n else:\r\n desc = target_types[target_type].__doc__.split('\\n')[0]\r\n lines.append(' %s: %s' % (\r\n TargetsHelp.TARGET_TO_ALIAS[target_type].rjust(TargetsHelp.MAX_ALIAS_LEN), desc))\r\n return lines", "def setup(self):\n declared = []\n for obj in Rt.objective:\n var_list = split(\"[+*/-]\", obj)\n for v in var_list:\n if v not in declared:\n self.add_input(v)\n declared.append(v)\n self.add_output(\"Objective function \" + obj)", "def get_rules(cls):\n raise NotImplementedError()", "def main():\n parser = argparse.ArgumentParser(description='investigate code health and random statistics')\n sub_parsers = parser.add_subparsers(dest='command_name', title='Commands', help='', metavar='<command>')\n\n sub = sub_parsers.add_parser('line-count', help='list line counts')\n sub.add_argument('files', nargs='+', help='files or folders to look in')\n sub.add_argument('--each', type=int, default=1)\n sub.add_argument('--show', action='store_true')\n sub.add_argument('--include-empty', dest='discard_empty', action='store_false')\n sub.set_defaults(func=handle_line_count)\n\n sub = sub_parsers.add_parser('include-list', help='list headers from files')\n cc.add_argument(sub)\n sub.add_argument('files', nargs='+')\n sub.add_argument('--print', dest='print_files', action='store_true')\n sub.add_argument('--print-stats', dest='print_stats', action='store_true')\n sub.add_argument('--print-max', dest='print_max', action='store_true')\n sub.add_argument('--no-list', dest='print_list', action='store_false')\n sub.add_argument('--count', default=2, type=int, help=\"only print includes that are more or equal to <count>\")\n sub.add_argument('--limit', nargs='+', help=\"limit search to theese files and folders\")\n sub.set_defaults(func=handle_list)\n\n sub = sub_parsers.add_parser('include-gv', help='generate a graphviz of the includes')\n cc.add_argument(sub)\n sub.add_argument('files', nargs='+')\n sub.add_argument('--limit', nargs='+', help=\"limit search to theese files and folders\")\n sub.add_argument('--group', action='store_true', help=\"group output\")\n sub.add_argument('--cluster', action='store_true', help=\"group output into clusters\")\n sub.set_defaults(func=handle_gv)\n\n sub = sub_parsers.add_parser('list-indents', help='list the files with the maximum indents')\n sub.add_argument('files', nargs='+')\n sub.add_argument('--each', type=int, default=1, help='group counts')\n sub.add_argument('--show', action='store_true', help='include files in list')\n sub.add_argument('--hist', action='store_true', help='show simple histogram')\n sub.add_argument('--include-empty', dest='discard_empty', action='store_false')\n sub.set_defaults(func=handle_list_indents)\n\n sub = sub_parsers.add_parser('missing-pragma-once', help='find headers with missing include guards')\n sub.add_argument('files', nargs='+')\n sub.set_defaults(func=handle_missing_include_guards)\n\n sub = sub_parsers.add_parser('missing-in-cmake', help='find files that existis on disk but missing in cmake')\n sub.add_argument('files', nargs='+')\n cc.add_argument(sub)\n sub.set_defaults(func=handle_missing_in_cmake)\n\n sub = sub_parsers.add_parser('list-no-project-folders', help='find projects that have not set the solution folder')\n sub.add_argument('files', nargs='+')\n cc.add_argument(sub)\n sub.set_defaults(func=handle_list_no_project_folder)\n\n sub = sub_parsers.add_parser('check-files', help=\"find files that doesn't match the name style\")\n sub.add_argument('files', nargs='+')\n cc.add_argument(sub)\n sub.set_defaults(func=handle_check_files)\n\n args = parser.parse_args()\n if args.command_name is not None:\n args.func(args)\n else:\n parser.print_help()", "def test_get_analyzer(self):\n analyzer_class = manager.AnalysisManager.get_analyzer('mockanalyzer')\n self.assertEqual(analyzer_class, MockAnalyzer)", "def get_rules(self):\n rules = []\n for item in self.name:\n rules.append(item)\n return rules", "def list_resolver_rule_associations(MaxResults=None, NextToken=None, Filters=None):\n pass", "def main():\n\t#ps = PackageScanner()\n\t#packages = ps.getInstalledPackages()\n\t#print(packages)\n\t#ps.saveScanResults()\n\n\tan = Analyzer()\n\tan.loadFromFile(config.PKG_SCAN_DIR / config.PKG_SCAN_FILE)\n\t#an.loadFromPackageCont(packages)\n\tan.analyze()\n\tan.saveAnalysisResults()", "def test_analyzer():\n import analyzer\n\n analyzer # Fake usage.", "def doc_analyzer(self, doc):\n\n if self.lowercase is None or self.lowercase == 'none':\n lowercase = set()\n elif self.lowercase in {'both', 'all'}:\n lowercase = {'char', 'word'}\n else: lowercase = {self.lowercase}\n\n # character n-grams\n if 'char' in lowercase:\n docfeat = self.get_ngrams(list(doc.lower()),\n self.c_ngmin, self.c_ngmax)\n else:\n docfeat = self.get_ngrams(list(doc),\n self.c_ngmin, self.c_ngmax)\n # word n-grams\n if 'word' in lowercase:\n docfeat.extend(self.get_ngrams(self.tokenizer(doc.lower()),\n self.w_ngmin, self.w_ngmax,\n suffix=\"⅏\", separator=\" \"))\n else:\n docfeat.extend(self.get_ngrams(self.tokenizer(doc),\n self.w_ngmin, self.w_ngmax,\n suffix=\"⅏\", separator=\" \"))\n return docfeat", "def get_checker_configs(self):\n LOG.debug(\"Tidy extra args: %s\", self.analyzer_extra_arguments)\n\n res = []\n\n # Get config from the extra arguments if there is any.\n try:\n tidy_config_parser = argparse.ArgumentParser()\n tidy_config_parser.add_argument('-config',\n dest='tidy_config',\n default='',\n type=str)\n\n args, _ = tidy_config_parser.parse_known_args(\n shlex.split(self.analyzer_extra_arguments))\n\n except Exception as ex:\n LOG.debug('No config found in the tidy extra args.')\n LOG.debug(ex)\n return res\n\n try:\n # Match for clang tidy analyzer names and attributes.\n clang_tidy_checker_rex = r'^(?P<checker_name>([^.]+))' \\\n r'\\.(?P<checker_attribute>([^.]+))$'\n\n tidy_pattern = re.compile(clang_tidy_checker_rex)\n tidy_config = json.loads(args.tidy_config)\n for checker_option in tidy_config.get('CheckOptions', []):\n value = checker_option['value']\n # We only store configs related to tidy checks. We run static\n # analyzer separately, so it does not affect the SA invocation.\n key_values_tidy = re.match(tidy_pattern, checker_option['key'])\n if key_values_tidy:\n checker_name = key_values_tidy.group('checker_name')\n checker_attr = key_values_tidy.group('checker_attribute')\n res.append((checker_name, checker_attr, value))\n else:\n LOG.debug('no match')\n except ValueError as verr:\n LOG.debug('Failed to parse config.')\n LOG.debug(verr)\n except Exception as ex:\n LOG.debug('Failed to process config.')\n LOG.debug(ex)\n\n return res", "def generate_schema_list():\n src = os.path.join(os.path.dirname(__file__), '../schemas')\n for root, dirs, files in os.walk(src):\n for fname in files:\n if not fname.endswith('.yaml'):\n continue\n if os.path.splitext(fname)[0] in (\n 'draft-01', 'asdf-schema-1.0.0'):\n continue\n yield os.path.join(root, fname)", "def getAnalyzerIndex(self, name):\n\n self.ensureNotCreated()\n\n if not name in self.analyzers:\n raise Exception('Analyzer %r is not present in the framework configuration' % name)\n\n return self.analyzers.index(name)", "def affixes(etymologies):\n for affix in AFFIXES:\n name = FIRST(affix.groupindex.keys())\n print('building {} dictionary...'.format(name))\n dictionary = defaultdict(list)\n with ProgressBar(maxval=len(etymologies)) as progress:\n for i, key in enumerate(etymologies):\n for definition in etymologies[key]:\n if definition.has_key(name):\n dictionary[key].append(definition)\n progress.update(i)\n yield name, dictionary", "def __init__(self):\n \n self.label = \"ArcSDM Tools\"\n self.alias = \"ArcSDM\" \n\n # List of tool classes associated with this toolbox\n self.tools = [PartitionNNInputFiles, CombineNNOutputFiles, NeuralNetworkOutputFiles, NeuralNetworkInputFiles, \n CalculateWeightsTool,SiteReductionTool,CategoricalMembershipToool,\n CategoricalAndReclassTool, TOCFuzzificationTool, CalculateResponse, LogisticRegressionTool, Symbolize, \n ROCTool, AgterbergChengCITest, AreaFrequencyTable, GetSDMValues, GrandWofe]", "def root_lex_analysis():\n\n return {\"cat\": \"VerbLex\",\n \"vform\": \"bare\",\n \"orthoForm\": [\"*RootForm\"]}", "def generateRules(L, support_data, min_confidence=0.5):\r\n rules = []\r\n for i in range(1, len(L)):\r\n for freqSet in L[i]:\r\n H1 = [frozenset([item]) for item in freqSet]\r\n if (i > 1):\r\n rules_from_conseq(freqSet, H1, support_data, rules, min_confidence)\r\n else:\r\n calc_confidence(freqSet, H1, support_data, rules, min_confidence)\r\n return rules", "def main():\n if len(sys.argv) == 1:\n print(\"No dependencies file to validate!\")\n return\n dependencies_file = sys.argv[1]\n try:\n dependencies = json.loads(open(dependencies_file, 'r').read())\n except json.decoder.JSONDecodeError:\n print(\"Invalid dependency file syntax! Make sure you don't have any commas at the end of your last dependency.\")\n return\n for dependency in dependencies:\n if 'target_path' in dependency and 'repository' in dependency:\n print(\"Validated {}\".format(dependency['target_path']))\n suggest_edits(dependency)\n elif 'target_path' not in dependency and 'repository' in dependency:\n print(\"Define target_path for dependency {}\".format(dependency['repository']))\n elif 'repository' not in dependency and 'target_path' in dependency:\n print(\"Define repository for dependency {}\".format(dependency['target_path']))\n else:\n print(\"Invalid format, missing repository and target_path for dependency {}\".format(dependencies.index(dependency)))", "def populate_lexical():\n in_dir = os.path.join(buildconfig.FORM_INDEX_DIR, 'refined')\n frequency_cutoff = buildconfig.FREQUENCY_CUTOFF\n\n taxonomy = LanguageTaxonomy()\n lemma_counter = 0\n definition_counter = 0\n\n for letter in string.ascii_lowercase:\n stdout.write('Inserting data for %s...\\n' % letter)\n blocks = []\n in_file = os.path.join(in_dir, letter + '.json')\n with open(in_file, 'r') as filehandle:\n for line in filehandle:\n data = json.loads(line.strip())\n blocks.append(BlockData(*data))\n\n lemmas = []\n wordforms = []\n definitions = []\n for i, block in enumerate(blocks):\n lang_node = taxonomy.node(language=block.language)\n if lang_node is None:\n language_id = None\n else:\n language_id = lang_node.id\n\n if block.definition and block.f2000 < frequency_cutoff:\n definition_counter += 1\n definitions.append(Definition(id=definition_counter,\n text=block.definition[:100]))\n definition_id = definition_counter\n else:\n definition_id = None\n\n lemma_counter += 1\n lemmas.append(Lemma(id=lemma_counter,\n lemma=block.lemma,\n sort=block.sort,\n wordclass=block.wordclass,\n firstyear=block.start,\n lastyear=block.end,\n refentry=block.refentry,\n refid=block.refid,\n thesaurus_id=block.htlink,\n language_id=language_id,\n definition_id=definition_id,\n f2000=_rounder(block.f2000),\n f1950=_rounder(block.f1950),\n f1900=_rounder(block.f1900),\n f1850=_rounder(block.f1850),\n f1800=_rounder(block.f1800),\n f1750=_rounder(block.f1750),))\n\n for typelist in (block.standard_types,\n block.variant_types,\n block.alien_types):\n for typeunit in typelist:\n wordforms.append(Wordform(sort=typeunit[0],\n wordform=typeunit[1],\n wordclass=typeunit[2],\n lemma_id=lemma_counter,\n f2000=_rounder(typeunit[4]),\n f1900=_rounder(typeunit[5]),\n f1800=_rounder(typeunit[6]),))\n\n if i % 1000 == 0:\n Definition.objects.bulk_create(definitions)\n Lemma.objects.bulk_create(lemmas)\n Wordform.objects.bulk_create(wordforms)\n definitions = []\n lemmas = []\n wordforms = []\n\n Definition.objects.bulk_create(definitions)\n Lemma.objects.bulk_create(lemmas)\n Wordform.objects.bulk_create(wordforms)", "def getChecks(self):\r\n raise AbstractError\r\n return []", "def get_all_relaxed_candidates_after_generation(self, gen):\n q = 'relaxed=1,extinct=0,generation<={0}'\n entries = self.c.select(q.format(gen))\n\n trajs = []\n for v in entries:\n t = self.get_atoms(id=v.id)\n t.info['confid'] = v.gaid\n t.info['relax_id'] = v.id\n trajs.append(t)\n trajs.sort(key=lambda x: get_raw_score(x),\n reverse=True)\n return trajs" ]
[ "0.69321376", "0.6514944", "0.57968", "0.5703347", "0.5684623", "0.5570689", "0.54497045", "0.53985465", "0.5392003", "0.5353222", "0.53327596", "0.52788645", "0.525477", "0.52528656", "0.5236998", "0.51388603", "0.49939647", "0.49732646", "0.4960888", "0.49380273", "0.48753262", "0.48728484", "0.4861302", "0.48188752", "0.47884384", "0.47407892", "0.47406742", "0.47404498", "0.47338605", "0.47313946", "0.4715155", "0.47123975", "0.4698403", "0.4697843", "0.4696087", "0.46957964", "0.46953154", "0.46946704", "0.4682991", "0.4677027", "0.46753502", "0.46704525", "0.46666133", "0.46565804", "0.46543464", "0.46539664", "0.46491423", "0.46209693", "0.4619807", "0.4589736", "0.45856437", "0.456377", "0.45616168", "0.45527118", "0.4550333", "0.45372045", "0.45067814", "0.45052773", "0.4500371", "0.44792753", "0.4474925", "0.44738615", "0.44725853", "0.44725853", "0.4466693", "0.4462817", "0.445399", "0.44502738", "0.44502738", "0.4446742", "0.44421113", "0.44419235", "0.4425579", "0.4412955", "0.44075367", "0.4406624", "0.43995237", "0.43971425", "0.43947482", "0.438944", "0.4375336", "0.43724695", "0.437204", "0.4371903", "0.4368499", "0.43678486", "0.43677548", "0.43668982", "0.43454027", "0.43410078", "0.43396497", "0.43342727", "0.43331215", "0.4323538", "0.4321125", "0.43126318", "0.43113416", "0.43108147", "0.43106082", "0.4309246" ]
0.46405792
47
Returns a list formed by all the analysis we need to calculate, without repetition, from the data in parameters.
def get_query_and_evaluation_analysis_types(self, parameters): queries = parameters["clustering"]["evaluation"]["query_types"] queries.extend(AnalysisPopulator.get_evaluation_analysis_types(parameters)) return list(set(queries))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_analysis_list(self):\n analysys_list = []\n\n analysis_types = AnalysisPopulator.get_query_and_evaluation_analysis_types(self.parameters)\n\n for analysis_type in analysis_types:\n if analysis_type in self.all_possible_analysis:\n analysys_list.append(self.all_possible_analysis[analysis_type])\n else:\n print \"[WARNING]\", analysis_type, \"is not an allowed analysis type\"\n\n return analysys_list", "def variables_used (self) :\r\n\t\treturn [i[0] for i in self.parameters]", "def parameters(self):\n return []", "def get_evaluation_analysis_types(self, parameters):\n eval_types =[]\n for evaluation_criteria_id in parameters[\"clustering\"][\"evaluation\"][\"evaluation_criteria\"]:\n# for subcriteria in parameters[\"clustering\"][\"evaluation\"][\"evaluation_criteria\"][evaluation_criteria_id]:\n# eval_types.append(subcriteria)\n eval_types.extend(parameters[\"clustering\"][\"evaluation\"][\"evaluation_criteria\"][evaluation_criteria_id].keys())\n return list(set(eval_types))", "def analyze():\n\n\t# Analyze the available data\n\tcoins = ''\n\tparams = []\n\tdone = False\n\twhile done != True:\n\t\tS_0_dat, K_dat, V_dat, T, coin = analysis.load()\n\t\ttheta, T = analysis.LM(S_0_dat, K_dat, V_dat, T)\n\t\tparams.append([theta, T, coin])\n\t\tif coins == '':\n\t\t\tcoins = coin\n\t\telse:\n\t\t\tcoins += ', ' + coin + '.'\n\t\ttry:\n\t\t\tprint(\"Current coins analyzed:\", coins)\n\t\t\tinp = input(\"Analyze another dataset? (y/n)\t\")\n\t\t\tif inp.lower() == 'y':\n\t\t\t\tcontinue\n\t\t\tif inp.lower() == 'n':\n\t\t\t\tprint(\"\\nFinal parameters:\", params)\n\t\t\t\tdone = True\n\t\texcept ValueError:\n\t\t\tprint(\"\\nUnable to interpret input. Please try again.\")\n\treturn params", "def algorithm(df, params):\n\n output = {}\n\n # PUT YOUR OWN IMPLEMENTATION HERE\n # STORE YOUR ANALYSIS OUTPUT IN OUTPUT\n\n return output", "def get_so_results(self, save=False):\n #Read through output files\n parameters=self.get_optimization_parameters(friendly=True)\n parameterRange = range(len(parameters))\n\n results = []\n\n for i in parameterRange:\n result = {\n 'name': parameters[i][0],\n 'max_result': '?',\n 'max_evals' : '?',\n 'max_cpu' : '?',\n 'min_result' : '?',\n 'min_evals' : '?',\n 'min_cpu' : '?',\n }\n #Read min and max files\n for max in [0, 1]:\n iterator = 0\n \n try:\n file = open(os.path.join(self.path, 'output_1.%d.txt' % (2*i + max)),'r')\n output=[None for r in range(4)]\n for f in file.readlines():\n value = f.rstrip('\\n') #Read the file line by line.\n #Line 0: seperator. Line 1: Evals. Line 2: Time. Line 3: result\n index=parameterRange.index(i)\n output[iterator] = value\n iterator = (iterator + 1)%4\n file.close()\n evals = output[1].split(' ')[2]\n cpu_time = output[2].split(' ')[2]\n sens_result = output[3]\n \n if max == 0:\n max_str = 'max'\n else:\n max_str = 'min'\n result[max_str + '_result'] = sens_result\n result[max_str + '_cpu'] = cpu_time\n result[max_str + '_evals'] = evals\n \n except:\n raise\n \n results.append(result)\n \n #Finally, if save==True, write these results to file results.txt\n if save:\n if not os.path.isfile(os.path.join(self.path, 'results.txt')):\n results_file = open(os.path.join(self.path, 'results.txt'), 'w')\n header_line = 'Parameter name\\tMin result\\tMax result\\tMin CPU time\\tMin Evals\\tMax CPU time\\tMax Evals\\n'\n results_file.write(header_line)\n for result in results:\n result_line = result['name'] + '\\t' + result['min_result'] + '\\t' + result['max_result'] + '\\t' + result['min_cpu'] + '\\t' + result['min_evals'] + '\\t' + result['max_cpu'] + '\\t' + result['max_evals'] + '\\n'\n results_file.write(result_line)\n results_file.close()\n return results", "def _build_parsed_values(self):\n\n results = []\n\n # Process each of the instrument particle parameters\n for (name, encoding) in INSTRUMENT_PARTICLE_ENCODING_RULES:\n results.append(self._encode_value(name, self.raw_data.group(name), encoding))\n\n # # Set the internal timestamp\n internal_timestamp_unix = numpy.float(self.raw_data.group(\n DostaAbcdjmCsppParserDataParticleKey.PROFILER_TIMESTAMP))\n self.set_internal_timestamp(unix_time=internal_timestamp_unix)\n\n return results", "def _get_parameters(self) -> list:\n return self.parameters", "def _generate_evaluaters(self):\n evaluators = []\n for para_key in self.parameter[1]:\n for value in self.parameter[1][para_key]:\n evaluators.append(evaluaterSearch.evaluaterSearch(self.parameter[2], [para_key, value]))\n self.evaluators = evaluators", "def config_params0(data,parameter):\n model = []\n #Range of value of p\n acf = sm.graphics.tsa.acf(data.diff().dropna())\n for i in range(len(acf)):\n acf[i] = abs(acf[i]*10)\n if (ceil(acf[i])) <= 2:\n p = range(ceil(acf[i])-1,ceil(acf[i])+2)\n break\n\n #range of value of q\n pacf = sm.graphics.tsa.pacf(data.diff().dropna())\n for i in range(len(pacf)):\n pacf[i] = abs(pacf[i]*10)\n if (ceil(pacf[i])) <= 2:\n q = range(ceil(pacf[i])-1,ceil(pacf[i])+2)\n break\n\n\t# define config lists\n p_params = p\n d_params = parameter['d']\n q_params = q\n m_params = parameter['m']\n #P_params = p\n #D_params = [0, 1]\n #Q_params = q\n \n pdq_m = list(itertools.product(p_params, d_params, q_params,m_params)) #Generate all different combinations of p, q and q triplets\n params = [[(x[0], x[1], x[2]),(x[0], x[1], x[2], x[3])] for x in pdq_m]\n return params", "def params(self):\n return [p for sublist in [o.params for o in self.obs] for p in sublist]", "def get_params(self):\n return []", "def get_data(self): \n self.improvement = []\n self.corrsq = []\n for filename in onlyfiles:\n mst = MST(filename, mypath=mypath)\n mst.estimate_correct_seqences()\n mst.estimate_improvement()\n self.mst.append(mst)\n\n self.corrsq.append(mst.corrsq)\n self.improvement.append(mst.improvement)\n\n\n\n print(f\"cor = {improvement}\")\n print(f\"improvement = {improvement}\")\n print(f\"mittelwert der improvement = {np.mean(improvement)}\")\n print(f\"Standardabweichung der lersteigung = {np.std(improvement)}\")", "def totalFitness(self):\n result = []\n for i in self.populasi:\n result.append(self.fungsi(*self.dekodeKromosom(i)))\n return result", "def _collect_params(self) -> np.ndarray:\n res = np.array([0.]*(self.dimensions))\n res[0] = self.model.rbf.variance\n res[1:-1] = self.model.rbf.lengthscale\n res[-1] = self.model.Gaussian_noise.variance\n return res", "def get_parameters_with_expert_knowledge(self) -> List[str]:\n return sorted(list(set([p for p, _ in self.expert_knowledge])))", "def generate_parameter_list(self) -> None:\n\n # simulation parameters from model\n model_parameter_ids = np.array(self.amici_model.getParameterIds())\n write_string_array(self.f, \"/parameters/modelParameterNames\",\n model_parameter_ids)\n print(Fore.CYAN + \"Number of model parameters:\",\n len(model_parameter_ids))\n\n print(Fore.CYAN + \"Number of optimization parameters:\",\n len(self.parameter_df))\n write_string_array(self.f, \"/parameters/parameterNames\",\n self.parameter_df.index.values[\n (self.parameter_df.estimate == 1)\n & ~self.parameter_df.index.isin(\n self.amici_model.getFixedParameterIds())])\n\n self.generate_simulation_to_optimization_parameter_mapping()\n\n self.f.flush()", "def science_parameter_list(cls):\n result = []\n for key in cls.list():\n if key not in GliderParticleKey.list():\n result.append(key)\n\n return result", "def getResults():\n return declList", "def computeAnsSets(self, param_dict):\n self.writeSolverConfigFile(param_dict)\n # run the process\n process = subprocess.Popen(self.bash_cmd.split(), stdout=subprocess.PIPE)\n output = process.communicate()[0]\n return self.parseGeneratedProblems(output)", "def getResults():", "def _build_parsed_values(self):\r\n\r\n results = []\r\n\r\n # Process each of the instrument particle parameters\r\n for (name, index, encoding) in INSTRUMENT_PARTICLE_ENCODING_RULES:\r\n\r\n results.append(self._encode_value(name, self.raw_data.group(index), encoding))\r\n\r\n port_timestamp_unix = numpy.float(self.raw_data.group(\r\n DataMatchesGroupNumber.PROFILER_TIMESTAMP))\r\n self.set_port_timestamp(unix_time=float(port_timestamp_unix))\r\n\r\n self.contents[DataParticleKey.PREFERRED_TIMESTAMP] = DataParticleKey.PORT_TIMESTAMP\r\n\r\n timestamp_str= self.raw_data.group(DataMatchesGroupNumber.DATE) + \" \" + self.raw_data.group(DataMatchesGroupNumber.TIME)\r\n self.set_internal_timestamp(timestamp=timestamp_mmddyyhhmmss_to_ntp(timestamp_str))\r\n\r\n return results", "def _get_all_data(self, start_date, end_date):\n return [self._prep_data(self._get_input_data(var, start_date,\n end_date),\n self.var.func_input_dtype)\n for n, var in enumerate(self.variables)]", "def parameter_space():\n return [list(range(7, 17)),\n list(range(17, 27)),\n list(range(27, 37)),\n list(permutations(range(1, 5), 4))]", "def movies(self):\n return self.data.groupby('Parameters')", "def get_parameters(self):\n run_parameters = []\n max_clusters = self.parameters[\"clustering\"][\"evaluation\"][\"maximum_clusters\"]\n min_clusters = self.parameters[\"clustering\"][\"evaluation\"][\"minimum_clusters\"]\n hierarchicalAlgorithm = HierarchicalClusteringAlgorithm(self.distance_matrix)\n clusters_and_cutoff = hierarchicalTools.get_clusters_with_ranged_search(\n hierarchicalAlgorithm,\n 0.,\n self.distance_matrix.calculateMean(),\n min_clusters,\n max_clusters,\n ParametersGenerator.HIERARCHICAL_REFINEMENT_VALUE)\n clusterings = []\n cutoffs = []\n for numclusters in clusters_and_cutoff:\n clusterings.append(clusters_and_cutoff[numclusters][1])\n cutoffs.append(clusters_and_cutoff[numclusters][0])\n\n for cutoff in cutoffs:\n run_parameter = ParametersGenerator.get_base_parameters()\n run_parameter[\"method\"] = 'complete'\n run_parameter[\"cutoff\"] = cutoff\n run_parameters.append(run_parameter)\n\n return run_parameters, clusterings", "def solution(self):\n return [(\"simple 1\", 1.),\n (\"simple 2\", 1.),\n (\"simple 3\", 1.),\n (\"simple 4\", 1.),\n (\"simple 5\", 1.),\n (\"simple 10\", 1.),\n (\"simple 15\", 1.),\n (\"thai 1\", 1.),\n (\"thai 2\", 1.),\n (\"thai 3\", 1.),\n (\"thai 4\", 1.),\n (\"thai 5\", 1.),\n (\"thai 10\", 1.),\n (\"thai 15\", 1.),\n ]", "def _get_params(self, fluents):\n objects_all = set()\n for fluent in fluents:\n objects = fluent.replace(\"(\",\"\").replace(\")\",\"\").split(\" \")[1:]\n objects_all.update(objects)\n\n return objects_all", "def showAll(self):\n (filteredInterpreters, filteredLanguages) = (self.getFilteredInterpreters(self.vertices), self.getFilteredLanguages(self.vertices))\n output = '--------Function showAll--------'\n output += '\\n\\nTotal no. of candidates: {totalCandidates}\\nTotal no. of languages: {totalLangugages}'.format(totalCandidates = len(filteredInterpreters), totalLangugages = len(filteredLanguages))\n output += '\\n\\nList of candidates:'\n for candidate in filteredInterpreters:\n output += '\\n\\n'+ candidate['value'].title()\n output += '\\n\\n\\nList of languages:'\n for language in filteredLanguages:\n output += '\\n\\n'+ language['value'].title()\n analysisOutput = '\\n\\n--------Function showAll--------\\n\\nWorst Complexity will be O(n) where n is the total no of nodes\\n\\n 3 iterations were executed twice to filter data for language and interpreters and third loop for creating output which was divided as a sum of no interpreters and the languages that can be spoken'\n analysisOutput += '\\n\\nHence, Runtime Complexity here will be = {n}'.format(n=len(self.vertices))\n self.printOutput(output)\n self.printAnalysis(analysisOutput)", "def hetaira_results(self):\n \n if self.descriptors is not None:\n results = [[str(self.items[i]), str(self.ivalue(i)),\n str(self.jvalue(i))] for i in range(len(self.items))]\n else:\n results = [[str(self.items[i]), str(self.ivalue(i))]\n for i in range(len(self.items))]\n\n results.append(['dset', str(self.dset)])\n return results", "def __benchmark__(cls):\n results = []\n used_argsets = []\n for args in cls.arguments:\n used_argsets.append(args)\n # for each given argument\n for method in cls.get_methods():\n # append an empty list for the results with this argument\n method_results = []\n # for each repetition\n for n in xrange(cls.repetitions):\n # append the results to the list for this argument set\n trial_results = cls._trial(method, args)\n method_results.append(trial_results)\n # append a Result to `results`.\n results.append(Result(method, args, method_results))\n return results, used_argsets", "def _get_aggregated_results(self):\n gradients = self.gradients\n client_traj_infos = flatten_lists(self.client_traj_infos)\n client_opt_infos = self._combine_client_opt_infos(self.client_opt_infos)\n \n self.gradients = []\n self.client_traj_infos = []\n self.client_opt_infos = []\n\n return gradients, client_traj_infos, client_opt_infos", "def get_analytically_computed_optimization_parameter_indices(self):\n indices = []\n if '/offsetParameterIndices' in self.f:\n indices.extend(self.f['/offsetParameterIndices'])\n\n if '/scalingParameterIndices' in self.f:\n indices.extend(self.f['/scalingParameterIndices'])\n\n if '/sigmaParameterIndices' in self.f:\n indices.extend(self.f['/sigmaParameterIndices'])\n\n return list(set(indices))", "def parameters(self):\n return [o.parameters for o in self.obs]", "def statistify(criteria):\n final = []\n for degree in criteria.keys():\n if degree == 'total':\n continue\n for num in range(0,criteria[degree]):\n final.append(int(degree.split('degree')[1]))\n return final", "def get_results(self):\n result = [round(self.mr / self.test_size, 1), round(self.mrr / self.test_size, 3),\n round(self.hits1 / self.test_size, 3), round(self.hits3 / self.test_size, 3),\n round(self.hits5 / self.test_size, 3), round(self.hits10 / self.test_size, 3)]\n return result", "def parameters(self):\n return [term.parameter for term in self.terms]", "def get_parameter_values(self):\n obsPars = numpy.zeros(self.get_num_parameters())\n i = 0\n for p in self.parameters:\n obsPars[i] = p.read_value_in_fmu(self.fmu)\n i += 1\n return obsPars", "def param(self):\n return []", "def param(self):\n return []", "def get_all_variables(self):\n return []", "def get_pars(self):\n return [self.z, self.b, self.logN]", "def reduce_data(self, ctx):\n self.baselines_type = ctx.get(\"baselines_type\")\n visibilities = ctx.get(\"visibilities\")\n p_signal = self.compute_power(visibilities)\n\n # Remember that the results of \"simulate\" can be used in two places: (i) the computeLikelihood method, and (ii)\n # as data saved to file. In case of the latter, it is useful to save extra variables to the dictionary to be\n # looked at for diagnosis, even though they are not required in computeLikelihood().\n return [dict(p_signal=p_signal, baselines=self.baselines, frequencies=self.frequencies,\n u=self.u, eta=self.eta)]\n #, nbl_uv=self.nbl_uv, nbl_uvnu=self.nbl_uvnu, nbl_u=self.nbl_u, grid_weights=self.grid_weights)]", "def formatdata(data,Params):\n\tmndata = dict()\n\talltrials = np.array([])\n\tfor k in range(len(Params[\"conditions\"])):\n\t\tconditionmean = data[0,k].mean(axis = 0)\n\t\tmndata.update({Params[\"conditions\"][k]: {'data' : data[0,k].mean(axis = 0), 'cmax' : conditionmean.max(), 'cmin' : conditionmean.min()}})\n\treturn mndata", "def getParameters(self):\n params = []\n for m in [self.ix, self.ih, self.fx, self.fh, self.ox, self.oh, self.ux, self.uh]:\n # we do not get param of output module\n l = list(m.parameters())\n params.extend(l)\n\n one_dim = [p.view(p.numel()) for p in params]\n params = F.torch.cat(one_dim)\n return params", "def getParameters(self):\n params = []\n for m in [self.ix, self.ih, self.fx, self.fh, self.ox, self.oh, self.ux, self.uh]:\n # we do not get param of output module\n l = list(m.parameters())\n params.extend(l)\n\n one_dim = [p.view(p.numel()) for p in params]\n params = F.torch.cat(one_dim)\n return params", "def getParameterList(self):\n inputList = []\n for name, n in zip(self._names, self._inputs):\n inputList += ['%s.x%d' % (name, i) for i in range(n)]\n return inputList", "def parameters(self):\n return self.pars", "def get_parameter_names(self):\n parNames = []\n # for par in self.variables: # TODO: LIKELY A BUG! DOES THE SAME AS get_variable_names()\n for par in self.parameters: # TRYING TO SOLVE THE ISSUE\n # EstimationVariable\n parNames.append(par.name)\n return parNames", "def get_required_parameters(self) -> list:\n results = []\n if self.no_params or self.params_optional:\n return []\n else:\n for parameter, parameter_details in self.parameters.items():\n # Fixing issue #92\n # if parameter == \"effect\":\n # continue\n if not parameter_details.default_value:\n results.append(parameter_details.name)\n return results", "def param(self):\r\n\r\n return []", "def _fit_params(cls) -> List[str]:\n fsigs = set()\n for series_def in cls.__series__:\n fsigs.add(inspect.signature(series_def.fit_func))\n if len(fsigs) > 1:\n raise AnalysisError(\n \"Fit functions specified in the series definition have \"\n \"different function signature. They should receive \"\n \"the same parameter set for multi-objective function fit.\"\n )\n\n # remove the first function argument. this is usually x, i.e. not a fit parameter.\n fit_params = list(list(fsigs)[0].parameters.keys())[1:]\n\n # remove fixed parameters\n if cls.__fixed_parameters__ is not None:\n for fixed_param in cls.__fixed_parameters__:\n try:\n fit_params.remove(fixed_param)\n except ValueError as ex:\n raise AnalysisError(\n f\"Defined fixed parameter {fixed_param} is not a fit function argument.\"\n \"Update series definition to ensure the parameter name is defined with \"\n f\"fit functions. Currently available parameters are {fit_params}.\"\n ) from ex\n\n return fit_params", "def parameters(self):\n #print \"in instrument.parameter()\"\n return self._params", "def curate_filter_info(self):\n filter_list = [\n self.sample_name, self.final_id, self.all_variant_count,\n self.filter_min_depth_count, self.filter_max_depth_count,\n self.filter_common_var_count, self.log_mut_count,\n self.cosmic_variant_counts, self.unknown_maf_count\n ]\n return filter_list", "def _analyseVariables(self):\n self.unused_vars = []\n ffis_limited = False\n\n highest_rank = -1\n best_var = None\n count = 0\n\n # Need to get highest ranked variable (most dimensions) so that we can work out FFI\n for var in self.vars:\n msg = f\"Analysing: {var.name}\"\n self.output_message.append(msg)\n count = count + 1\n\n # get rank\n rank = len(var.shape)\n\n # Deal with singleton variables\n if rank == 0: \n self.rank_zero_vars.append(var)\n self.rank_zero_var_ids.append(var.name)\n continue\n\n # Update highest if highest found or if equals highest with bigger size\n try:\n var.size = var.size()\n best_var.size = best_var.size()\n except:\n pass\n\n if rank > highest_rank or (rank == highest_rank and var.size > best_var.size):\n highest_rank = rank\n best_var = var\n best_var_index = count - 1\n\n # If all are zero ranked variables or no vars identified/found then we cannot write any to NASA Ames and return ([], [])\n if len(self.rank_zero_vars) == len(self.vars) or best_var is None: \n return ([], [])\n\n # Now start to sort the variables into main and auxiliary \n vars_for_na = [best_var]\n aux_vars_for_na = []\n shape = best_var.shape\n number_of_dims = len(shape)\n self.na_dict[\"NIV\"] = number_of_dims\n\n # If 2D then do a quick test to see if 2310 is feasible (i.e. uniformly spaced 2nd axis)\n if number_of_dims == 2:\n\n ffis_limited = [2010, 2110]\n axis = xarray_utils.get_coord_by_index(best_var, 1)\n\n if xarray_utils.isUniformlySpaced(axis):\n ffis_limited.append(2310)\n\n # Get the axes for the main variable being used\n best_var_axes = xarray_utils.getAxisList(best_var)\n \n # Get other variables into a list and analyse them\n rest_of_the_vars = self.vars[:best_var_index] + self.vars[(best_var_index + 1):]\n\n for var in rest_of_the_vars:\n\n if var.name in self.rank_zero_var_ids: continue\n\n # What to do with variables that have different number of dimensions or different shape\n if len(var.shape) != number_of_dims or var.shape != shape: \n # Could it be an auxiliary variable?\n if len(var.shape) != 1: \n self.unused_vars.append(var)\n continue\n\n first_axis = xarray_utils.get_coord_by_index(var, 0)\n # Check if axis is identical to first axis of main best variable, if so, can be auxiliary var\n if not xarray_utils.areAxesIdentical(best_var_axes[0], first_axis):\n\n # If not identical, then it might still qualify as an auxiliary every n time points - valid for 1020\n if len(var.shape) == 1:\n nvpm = xarray_utils.isAxisRegularlySpacedSubsetOf(first_axis, best_var_axes[0])\n\n # NVPM is the number of implied values which is equal to (len(ax2)/len(ax1))\n if nvpm:\n ffis_limited = [1020]\n self.na_dict[\"NVPM\"] = nvpm\n else: # if returned False, i.e. not regular subset axis\n self.unused_vars.append(var)\n\n else:\n self.unused_vars.append(var)\n continue\n\n else:\n # This could be used as a standard auxiliary variable\n if ffis_limited in ([1020],):\n # Already fixed on 1020 and cannot collect incompatible FFI vars so do not use\n self.unused_vars.append(var)\n else:\n aux_vars_for_na.append(var) \n\n else:\n this_var_axes = xarray_utils.getAxisList(var)\n\n # Loop through dimensions\n for i in range(number_of_dims): \n\n if not xarray_utils.areAxesIdentical(best_var_axes[i], this_var_axes[i]):\n self.unused_vars.append(var)\n break\n else:\n # OK, I think the current variable is compatible to write with the best variable along with a NASA Ames file \n vars_for_na.append(var)\n\n # Send vars_for_na AND aux_vars_for_na to a method to check if they have previously been mapped \n # from NASA Ames. In which case we'll write them back in the order they were initially read from the input file.\n (vars_for_na, aux_vars_for_na) = \\\n self._reorderVarsIfPreviouslyNA(vars_for_na, aux_vars_for_na)\n\n # Get the FFI\n self.na_dict[\"FFI\"] = \\\n self._decideFileFormatIndex(number_of_dims, aux_vars_for_na, ffis_limited)\n\n return vars_for_na, aux_vars_for_na", "def mes_fidelity_helper(X, parameters, output_file):\n\n pop_fractions = output_file['Population_Fraction']\n iterations = output_file['Iterations']\n\n chosen_fidelity = X[:, parameters['Dimension'][0]:]\n chosen_fidelity = np.sum(chosen_fidelity, axis=1).tolist()\n\n parameters['Population_Fraction'] = \\\n [pop_fractions[int(idx)] for idx in chosen_fidelity]\n parameters['Iterations'] = \\\n [iterations[int(idx)] for idx in chosen_fidelity]\n\n parameters = create_parameters(X, parameters)\n\n return parameters", "def param(self):\r\n return []", "def processParam(self,paramInfo):\n modelicaParam = []\n for eachline in paramInfo:\n eachline = eachline.split('.param')\n #Include ',' in between parameter\n #Removing leading and trailing space\n line = eachline[1].strip()\n line = line.split()\n final_line = ','.join(line)\n stat = 'parameter Real ' + final_line + ';'\n stat = stat.translate(maketrans('{}', ' '))\n modelicaParam.append(stat)\n return modelicaParam", "def classifyParameters(self):\n\n arguments = []\n options = []\n outputs = []\n for parameter in self.parameters():\n if parameter.channel == 'output' and not (\n parameter.isExternalType() or parameter.typ == 'file'):\n outputs.append(parameter)\n elif parameter.index is not None:\n arguments.append(parameter)\n if parameter.flag is not None or parameter.longflag is not None:\n logger.warning(\"Parameter %s has both index=%d and flag set.\" % (\n parameter.identifier(), parameter.index))\n else:\n options.append(parameter)\n arguments.sort(key = lambda parameter: parameter.index)\n return (arguments, options, outputs)", "def X(self)->list:", "def parameter_optimization(self):\n out = open(self.csv_dir + self.strategy_id + '_gridsearch.csv', \"w\")\n spl = len(self.para_list)\n for i, sp in enumerate(self.para_list):\n print(\"Strategy %s out of %s...\" % (i + 1, spl))\n self._generate_trading_instances(sp)\n self._run_backtest()\n stats = self.portfolio.get_statistics()\n tot_profit = float(stats[0][1])\n sharpe = float(stats[1][1])\n max_dd = float(stats[2][1])\n win_rate = float(stats[7][1].replace(\"%\", \"\"))\n profit_factor = float(stats[8][1])\n\n out.write(\n \"%s,%s,%s,%s,%s,%s,%s\\n\" %\n (sp[\"takeprofit\"], sp[\"period\"], tot_profit, sharpe, max_dd, win_rate, profit_factor)\n )\n out.close()", "def get_params_as_list(self):\n\n\t\tparams = [self.shape_slope, self.z_thick, self.thick, self.length]\n\t\treturn params", "def variables_used (self) :\r\n\t\t## These names do not contain dimension specification (everything in brackets\r\n\t\t## that comes after a name is am array index - either the arry was declared\r\n\t\t## correctly or it is wrong anyway, there is no implicit declaration of arrays) !\r\n\r\n\t\tresult = []\r\n\r\n\t\tfor l in self.equ_lists :\r\n\t\t\tfor var_name in l :\r\n\t\t\t\tresult.append(var_name[0])\r\n\t\treturn result", "def analyse(self):\n pass", "def _build_parsed_values(self):\n\n # Set the base metadata parsed values to the results to return\n results = self._build_metadata_parsed_values()\n\n data_match = self.raw_data[MetadataRawDataKey.DATA_MATCH]\n\n # Process each of the non common metadata particle parameters\n for (name, encoding) in NON_COMMON_METADATA_PARTICLE_ENCODING_RULES:\n results.append(self._encode_value(name, data_match.group(name), encoding))\n\n # Set the internal timestamp\n internal_timestamp_unix = numpy.float(data_match.group(\n DostaAbcdjmCsppParserDataParticleKey.PROFILER_TIMESTAMP))\n self.set_internal_timestamp(unix_time=internal_timestamp_unix)\n\n return results", "def get_parList(self):\n parList = []\n for modelName in self._modelList:\n model = self.__modelDict[modelName]\n modelParDict = model.parFitDict\n for parName in modelParDict.keys():\n parList.append(modelParDict[parName][\"value\"])\n return parList", "def prms(widget: QWidget) -> List:\n parameters = BaseTrain.prms(widget)\n return parameters", "def get_measures(self) -> List[str]:\n result = []\n for elements in self._get_results_list():\n result.append(elements[2])\n return result", "def parameters(self):", "def get_all_variables(self):\n out = []\n for i in self.items:\n out += i.get_all_variables()\n return out", "def get_all_variables(self):\n out = []\n for i in self.items:\n out += i.get_all_variables()\n return out", "def get_all_variables(self):\n out = []\n for i in self.items:\n out += i.get_all_variables()\n return out", "def extract_parameters(self) -> Dict[str, Set[str]]:\n regex = \"\\{([A-Za-z0-9_]+)\\}\"\n reserved_parameters = [\n \"output\",\n \"input\",\n \"output_vec\",\n \"input_vec\",\n \"df\",\n \"vec_open\",\n \"vec_close\",\n ]\n parameters = {}\n for scope in self.scopes:\n parameters[scope] = set(\n [\n x\n for x in re.findall(regex, self.call)\n if x not in reserved_parameters\n ]\n )\n return parameters", "def analyze(data):\n ## Do welch periodogram here\n pass", "def _build_results(self):\n results = {}\n cols = []\n for pol in POLLUTANTS:\n for adj in ADJUSTMENTS:\n cols.append(get_rate_column(pol, adjustment=adj, generated=False))\n cols.append(get_column(pol, adjustment=adj))\n cols.append(\"net_consumed_mwh\")\n for ba in self.regions:\n results[ba] = pd.DataFrame(\n index=self.generation.index, columns=cols, dtype=np.float64\n )\n return results", "def config_params1(parameter):\n\n p = parameter['p']\n q = parameter['q']\n d = parameter['d']\n m = parameter['m']\n pdq_m = list(itertools.product(p, d, q,m)) #Generate all different combinations of p, q and q triplets\n params = [[(x[0], x[1], x[2]),(x[0], x[1], x[2], x[3])] for x in pdq_m]\n return params", "def _core_init_params(self) :\n\t\ta_list,b_list = [],[]\n\t\tg_list,h_list = [],[]\n\t\t\n\t\t\n\t\tfor eqnid,eqn in enumerate(self.equations) : \n\t\t\treg_p = self.regressors[eqnid]['prod']\n\t\t\treg_d = self.regressors[eqnid]['degrad']\n\t\t\th_eqn = self.initsol['h'][eqn-1]\n\t\t\tg_eqn = self.initsol['g'][eqn-1]\n\n\n\t\t\ta_list.append(self.initsol['alpha'][eqn-1])\n\t\t\tb_list.append(self.initsol['beta'][eqn-1])\n\t\t\t\n\t\t\tg_eqn = np.array([g_eqn[reg-1] for reg in reg_p])\n\t\t\th_eqn = np.array([h_eqn[reg-1] for reg in reg_d])\n\t\t\th_list.append(h_eqn)\n\t\t\tg_list.append(g_eqn)\n\t\n\t\treturn (a_list,b_list,g_list,h_list)", "def parameters(self):\n ps = super().parameters()\n exclude = set(self.estimator.parameters())\n ps = (p for p in ps if not p in exclude)\n return ps", "def _build_parsed_values(self):\n # need to exclude sci times\n return self._parsed_values(EngineeringTelemeteredDataParticle.keys_exclude_sci_times)", "def _build_parsed_values(self):\n # need to exclude sci times\n return self._parsed_values(EngineeringRecoveredDataParticle.keys_exclude_sci_times)", "def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale,self.period))", "def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale,self.period))", "def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale,self.period))", "def _build_parsed_values(self):\n # need to exclude m times\n return self._parsed_values(EngineeringScienceRecoveredDataParticle.keys_exclude_times)", "def get_parameter_estimation_parameters(self, friendly=True):\n #Get the sensitivities task:\n fitTask=self._getTask('parameterFitting')\n fitProblem = fitTask.find(xmlns + 'Problem')\n optimizationItems = fitProblem.find(xmlns + 'ParameterGroup')\n parameters = []\n for subGroup in optimizationItems:\n name = None\n lowerBound = None\n upperBound = None\n startValue = None\n \n for item in subGroup:\n if item.attrib['name'] == 'ObjectCN':\n name = item.attrib['value']\n elif item.attrib['name'] == 'UpperBound':\n upperBound = item.attrib['value']\n elif item.attrib['name'] == 'LowerBound':\n lowerBound = item.attrib['value']\n elif item.attrib['name'] == 'StartValue':\n startValue = item.attrib['value']\n assert name !=None\n assert lowerBound != None\n assert upperBound != None\n assert startValue != None\n \n if friendly:\n #Construct a user-friendly name for the parameter name using regexs\n #Look for a match for global parameters: Vector=Values[Test parameter],\n global_string = r'.*Vector=Values\\[(?P<name>.*)\\].*'\n global_string_re = re.compile(global_string)\n global_match = re.match(global_string_re, name)\n \n if global_match:\n name = global_match.group('name')\n \n #else check for a local match.\n #Vector=Reactions[Reaction] Parameter=k1\n local_string = r'.*Vector=Reactions\\[(?P<reaction>.*)\\].*Parameter=(?P<parameter>.*),Reference=Value.*'\n local_string_re = re.compile(local_string)\n local_match = re.match(local_string_re, name)\n \n if local_match:\n reaction = local_match.group('reaction')\n parameter = local_match.group('parameter')\n name = '(%s).%s'%(reaction, parameter)\n\n parameters.append((name, lowerBound, upperBound, startValue))\n\n return parameters", "def analyse(self, data=None):\n pass", "def all_numeric_accumulations(request):\n return request.param", "def get_visitor_analysis(self,params=['2017-7-13','2017-7-15','www.att.com']):\n result =[]\n res_from_query1 = []\n\n query_params = {'domain_name':params[2],'start_date':params[0],'end_date':params[1]}\n logging.debug(\"Query params are: end date {} and start date {} for domain {}\".format(params[1],params[0],params[2]))\n query1 = \"\"\"select\n A.pt_log_date,\n TO_CHAR(count(*),'fm999999999.00') as total,\n ROUND(count(A.adobe_visid_high_low),2) as mcVisIdHigh,\n count(case when D.parameter_value = '0' then null else 1 end) as mcVisIdHigh,\n count(B.parameter_value) as uuid,\n count(case when instr(B.parameter_value,'-') > 0 then null else B.parameter_value end) as auth_uuid\n from wt_logs A left outer join wt_log_parts B on A.key = B.key and A.distribution_key = B.distribution_key and B.parameter_name = 'prop48' and B.pt_log_date between :start_date and :end_date\n left outer join wt_log_parts D on A.key = D.key and A.distribution_key = D.distribution_key and D.parameter_name = 'mcVisIdHigh' and D.pt_log_date between :start_date and :end_date\n where A.pt_log_date between :start_date and :end_date\n and A.domain_name = :domain_name\n group by A.pt_log_date\"\"\".replace('\\n',' ')\n logging.info(\"The first query is {}\".format(query1))\n with vertica_python.connect(**conn_info) as connection:\n cur = connection.cursor()\n cur.execute(query1,query_params)\n for row in cur.iterate():\n res_from_query1.append(row)\n\n return(res_from_query1)", "def getResultAll(i=None):", "def evaluate(self, parameters):\n [f, df] = self.linear_model(parameters)\n return OrderedDict([('f', f-self.data_f), ('df', df-self.data_df)])", "def get_analytical_parameter_table(\n self,\n hierarchical_candidate_ids: list,\n parameter_type: str) -> list:\n\n condition_id_to_index = {name: idx for idx, name in\n enumerate(self.condition_ids)}\n # need list, not ndarray\n condition_map_list = [list(x) for x in self.condition_map]\n\n use = []\n for index, row in self.measurement_df.iterrows():\n # TODO : must handle sigmas separately\n if parameter_type == 'observable':\n overrides = petab.split_parameter_replacement_list(\n row.observableParameters)\n elif parameter_type == 'noise':\n overrides = petab.split_parameter_replacement_list(\n row.noiseParameters)\n else:\n raise ValueError(\n \"type must be noise or observable, but got\" + parameter_type)\n\n sim_cond_idx = \\\n condition_id_to_index[row.simulationConditionId]\n preeq_cond_idx = self.NO_PREEQ_CONDITION_IDX\n if not np.isnan(row.preequilibrationConditionId):\n preeq_cond_idx = condition_id_to_index[\n row.preequilibrationConditionId]\n\n for s in overrides:\n #print(s, parametersForHierarchical)\n try:\n scalingIdx = hierarchical_candidate_ids.index(s)\n except ValueError:\n continue # current parameter not in list\n\n conditionIdx = condition_map_list.index(\n [preeq_cond_idx, sim_cond_idx])\n observableIdx = self.observable_ids.index(row.observableId)\n tup = (scalingIdx, conditionIdx, observableIdx)\n\n # Don't add a new line for each timepoint\n # We don't allow separate parameters for individual time-points\n # (Can be implemented via different observables)\n if not tup in use:\n use.append(tup)\n\n if not len(use):\n raise AssertionError(\"Candidates were: \" + str(hierarchical_candidate_ids) + \" but nothing usable found\")\n\n return use", "def parameters(self):\n return [p for _, a in vars(self).items() for p in self._params(a)]", "def compute_statistics(self):", "def getMeasures():", "def lego_sets():\n \n \n data_test=data_specific\n\n\n\n \n \n\n \n print(data_test)\n print(\"The size of the data is: \",len(data_test))\n \n \n \n # you must replace this line and return your own list\n return data_test", "def _build_parsed_values(self):\n # need to exclude m times\n return self._parsed_values(EngineeringScienceTelemeteredDataParticle.keys_exclude_times)", "def parameter_list(self):\n return [\n [encut, kpoint_mesh]\n for encut, kpoint_mesh in zip(\n self._job.iteration_frame.ENCUT, self._job.iteration_frame.KPOINT_MESH\n )\n ]", "def params(self):\n params = []\n\n for v in vars(self).values():\n params.extend(self.__computeParams(v))\n\n if isinstance(v, list):\n for p in v:\n params.extend(self.__computeParams(p))\n\n return params", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"rAfb[1.0,-5.0, 5.0]\");\n self.modelBuilder.doVar(\"rA0[1.0, -5.0, 5.0]\");\n self.modelBuilder.doSet(\"POI\",\"rAfb,rA0\")\n self.modelBuilder.factory_('expr::mAfb(\"@0*@1\",eAfb,rAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0*@1)\",eA0,rA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')" ]
[ "0.62758255", "0.6011133", "0.5986083", "0.59858525", "0.5896374", "0.58353883", "0.58151305", "0.57914615", "0.57429975", "0.5728949", "0.571652", "0.57030505", "0.5637215", "0.5610428", "0.55925506", "0.559063", "0.5552268", "0.5549528", "0.5538644", "0.5536552", "0.55355585", "0.5533471", "0.553217", "0.552021", "0.55054367", "0.5495071", "0.5490576", "0.54834175", "0.5474648", "0.5459539", "0.54557353", "0.5441871", "0.5438413", "0.5432206", "0.5424039", "0.54140013", "0.54105157", "0.5410438", "0.5400653", "0.5398416", "0.5398416", "0.53980523", "0.53824043", "0.5380607", "0.5377599", "0.5377157", "0.5377157", "0.5373567", "0.53653973", "0.5361568", "0.5355108", "0.53504586", "0.5350315", "0.53487027", "0.5344209", "0.5342283", "0.53416526", "0.5335663", "0.5334152", "0.5330392", "0.5318386", "0.53163725", "0.53119683", "0.5311308", "0.53099865", "0.5300712", "0.5290973", "0.5290932", "0.52906805", "0.5288584", "0.5286454", "0.5286454", "0.5286454", "0.528501", "0.52828187", "0.527604", "0.52728236", "0.526726", "0.5256161", "0.5255839", "0.52547604", "0.5244705", "0.5244705", "0.5244705", "0.52413064", "0.5238148", "0.5221344", "0.5220702", "0.52155274", "0.52067006", "0.5204912", "0.5204108", "0.51914275", "0.51906705", "0.51899236", "0.51864463", "0.5184155", "0.5182448", "0.51750374", "0.5174901" ]
0.5309142
65
Returns a list formed by the evaluation types present in criteria.
def get_evaluation_analysis_types(self, parameters): eval_types =[] for evaluation_criteria_id in parameters["clustering"]["evaluation"]["evaluation_criteria"]: # for subcriteria in parameters["clustering"]["evaluation"]["evaluation_criteria"][evaluation_criteria_id]: # eval_types.append(subcriteria) eval_types.extend(parameters["clustering"]["evaluation"]["evaluation_criteria"][evaluation_criteria_id].keys()) return list(set(eval_types))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_query_and_evaluation_analysis_types(self, parameters):\n queries = parameters[\"clustering\"][\"evaluation\"][\"query_types\"]\n queries.extend(AnalysisPopulator.get_evaluation_analysis_types(parameters))\n return list(set(queries))", "def getResultDefs(self, type=None):\n results = self.results.values()\n\n if type:\n results = filter(lambda result: result.type == type, results)\n\n return results", "def getTypesList():\n return Gw2Spidy._request('types')['results']", "def report_type_choices():\n\n rts = report_types()\n rcs = report_categories()\n return [(c, [(rt.report_type, rt.name) for rt in rts if rt.category == c]) for c in rcs]", "def filter_evaluations_by_type(self, type_):\n from .evaluation import Evaluation\n from .code_component import CodeComponent\n\n joined_eval = join(\n Evaluation.t, CodeComponent.t,\n ((Evaluation.m.trial_id == CodeComponent.m.trial_id) &\n (Evaluation.m.code_component_id == CodeComponent.m.id))\n )\n joined = join(\n Activation.t, joined_eval,\n ((Evaluation.m.trial_id == Activation.m.trial_id) &\n (Evaluation.m.activation_id == Activation.m.id))\n )\n query = (\n select([CodeComponent.m.name, Evaluation.m.repr])\n .select_from(joined)\n .where((Activation.m.trial_id == self.trial_id) &\n (Activation.m.id == self.id) &\n (CodeComponent.m.type == type_))\n )\n for result in relational.session.execute(query):\n yield result", "def getType(self, terms):\n\n\t\treturn [i for i in xrange(len(self.toTYPE)) if terms in self.toTYPE[i]]", "def to_criteria(self):\r\n c = []\r\n if self.minmax_criteria is not None:\r\n c.extend(self.minmax_criteria.values())\r\n\r\n return c", "def statistify(criteria):\n final = []\n for degree in criteria.keys():\n if degree == 'total':\n continue\n for num in range(0,criteria[degree]):\n final.append(int(degree.split('degree')[1]))\n return final", "def get_evaluators(categories):\n eval_metric_fn_keys = [EVAL_DEFAULT_METRIC]\n evaluators_list = []\n for eval_metric_fn_key in eval_metric_fn_keys:\n evaluators_list.append(\n EVAL_METRICS_CLASS_DICT[eval_metric_fn_key](categories=categories))\n return evaluators_list", "def get_data_types(self):\n data_types = set()\n for er in self.exercise_recordings:\n for data_type in er.data_types:\n if data_type not in data_types:\n data_types.add(data_type)\n return list(data_types)", "def get_evaluators(eval_config, categories):\n eval_metric_fn_key = eval_config.metrics_set\n if eval_metric_fn_key not in EVAL_METRICS_CLASS_DICT:\n raise ValueError('Metric not found: {}'.format(eval_metric_fn_key))\n return [\n EVAL_METRICS_CLASS_DICT[eval_metric_fn_key](\n categories=categories)\n ]", "def _inferred_type_levels(self) -> list[str]:\n return [i.inferred_type for i in self.levels]", "def get_rules_for_type(type):\n\n rules = get_db().execute('SELECT * FROM ruleset WHERE type=?', (type,)).fetchall()\n\n return rules", "def getTypes():\n\t\n\ttranslationTable = []\n\tfor x in typePrimitive:\n\t\ttranslationTable.extend(x[0])\n\t\n\tid = 0\n\ttypes = []\n\tmax = 0\n\tfor x in typePrimitive:\n\t\t\n\t\tbinds = []\n\t\tfor y in x[2]:\n\t\t\tbinds.append(translationTable.index(y))\n\t\t\n\t\tif (x[4] != False) and (x[4] > max):\n\t\t\tmax = x[4]\n\t\t\t\n\t\t\n\t\ttypes.append({'name':x[0],'nSites':x[1],'binds':binds,'sym':x[3],'id':id,'max':x[4]})\n\t\tid+=1\n\t\n\treturn (max,types)", "def types(self) -> list:\n if self._types is None:\n fdist = self.fdist # ranked order\n types_ = list(fdist.type.values)\n self._types = types_\n return self._types", "def test_for_criteria(self):\n ignore = ['interpreter_method', 'average_by_sample_or_site', 'include_nrm']\n values = ([dic['value'] for dic in self.acceptance_criteria.values() if (dic['criterion_name'] not in ignore and dic['value'] != -999)])\n return values", "def filter_criteria(self):\n return self.filter_nodes('//Validation/Criteria')", "def get_types(self) -> List[str]:\n return sorted(list(self._radii.keys()))", "def load_criterias():\r\n l = [ (p.id, p.name) for p in StockProperty.objects.all() ]\r\n l.insert(0, ('', 'Select to add criteria ...'))\r\n return l", "def get_criteria(self):\n\n\t\treturn self.__criteria", "def items(self):\n return self._rules_by_lhs.items()", "def gettypes(self):\n return [str(self.sd.xlate(t[0])) for t in self.sd.types]", "def get_check_types():", "def getCriteriaItems( self ):\n # filter out empty strings\n result = []\n\n value = tuple( filter( None, self.value ) )\n if not value:\n return ()\n result.append( ( self.field, self.value ), )\n\n if self.operator is not None:\n result.append( ( '%s_operator' % self.field, self.operator ) )\n\n return tuple( result )", "def get_filter_types(verbose=False):\n if verbose:\n pprint(filter_types)\n return filter_types", "def getTypes(quizScores):\n challenger = 0\n collaborator = 0\n communicator = 0\n contributor = 0\n for x in range(18):\n questionScore = quizScores[x]\n challenger += int(questionScore[0])\n collaborator += int(questionScore[1])\n communicator += int(questionScore[2])\n contributor += int(questionScore[3])\n return [ challenger, collaborator, communicator, contributor]", "def getProposalTypesVocab(self):\n list = DisplayList()\n # Acquire the types\n types = self.aq_inner.aq_parent.getProposalTypes()\n for type in types:\n list.add(type, type)\n return list", "def opinion_type_list():\n for type_ in orm.DataFlagOpinionType.select():\n click.echo(type_.name)", "def __get_condition_types(condition):\n branch_condition = condition.get(\"specification\")\n condition_type, condition_value = branch_condition.split(\".\")\n condition_types = condition_type.split(\" \")\n return condition_types", "def criterion_type(self) -> str:\n return pulumi.get(self, \"criterion_type\")", "def criterion_type(self) -> str:\n return pulumi.get(self, \"criterion_type\")", "def criteria(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Criterion]:", "def types(self) -> List[str]:\n return self._types", "def get_results(self, context):\n results = [x for x in self.select_results(context)]\n if len(results) == 1:\n res = results[0]\n if isinstance(res, (bool, int, float, Decimal)):\n return res\n elif isinstance(res, tuple) or is_etree_element(res) or is_document_node(res):\n return results\n elif is_schema_node(res):\n return results\n elif self.symbol in ('text', 'node'):\n return results\n elif self.label in ('function', 'literal'):\n return res\n else:\n return results\n else:\n return results", "def query_rule_types(self: object, parameters: dict = None, **kwargs) -> dict:\n # [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/query-rule-types\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id=\"query_rule_types\",\n keywords=kwargs,\n params=parameters\n )", "def getTypes():\n\n\t\tquery = \"\\\n\t\t\tSELECT\\\n\t\t\t\tid_item_container_type,\\\n\t\t\t\tlabel\\\n\t\t\tFROM\\\n\t\t\t\titem_container_type\\\n\t\t\"\n\n\t\treturn {t['id_item_container_type']: t['label'] for t in Model.fetchAllRows(query)}", "def get_evaluations(self):\r\n return self.evaluations", "def get_quality_checks(dict):\n\n quality_checks = {}\n for type in dict:\n list = []\n for qc in dict[type]:\n list.append(const.get_id(qc))\n quality_checks[type] = list\n\n return quality_checks", "def sorter(row):\n criteria = []\n for value in row[1]: # Ignore enumeration\n criteria.append(\n (\n value is not None,\n \"\" if isinstance(value, Number) else type(value).__name__,\n value,\n )\n )\n return criteria", "def search_functional_identifiers(self, criterion_type, criteria_list):\n check_type(value=criterion_type, allowed_types=str, var_name=\"criterion_type\", raise_exception=True)\n check_type(value=criteria_list, allowed_types=list, var_name=\"criteria_list\", raise_exception=True)\n\n my_filter = dict()\n my_filter[criterion_type] = criteria_list\n\n response = self.send(root_url=self.session.dm_url + self.root_url,\n verb=GenericClient.VERB.POST,\n template=TEMPLATES['search_functional_identifier_list'],\n data=my_filter,\n files=None)\n check_http_code(response)\n\n return response.json", "def get_op_types_by_precision(self, precision):\n assert precision in list(self.cur_config['ops'].keys())\n\n return self.cur_config['ops'][precision]", "def _get_criteria(self):\n for molecule in self.values():\n molecule.get_criterion()", "def get_object_types(default_val=False):\n result = []\n if g.user:\n if default_val:\n result.append((\"1\", \"Please Select\"))\n\n for name, data in entity.Entity.__dict__.items():\n if not isinstance(data, str):\n continue\n if \"__\" in name[:2]:\n continue\n result.append((name, name))\n\n result.sort()\n\n return result", "def relevant_classifications(self):\n return self.relevant_classes", "def all_qtypes(cls):\n return sorted([subcls.QTYPE for subcls in all_subclasses(cls)])", "def listVerificationTypes(self):\n return self.get_json('/verificationType')", "def get_analysis_list(self):\n analysys_list = []\n\n analysis_types = AnalysisPopulator.get_query_and_evaluation_analysis_types(self.parameters)\n\n for analysis_type in analysis_types:\n if analysis_type in self.all_possible_analysis:\n analysys_list.append(self.all_possible_analysis[analysis_type])\n else:\n print \"[WARNING]\", analysis_type, \"is not an allowed analysis type\"\n\n return analysys_list", "def _generate_evaluaters(self):\n evaluators = []\n for para_key in self.parameter[1]:\n for value in self.parameter[1][para_key]:\n evaluators.append(evaluaterSearch.evaluaterSearch(self.parameter[2], [para_key, value]))\n self.evaluators = evaluators", "def get_types(*args, **kwargs) -> list:\n arg_types = []\n for arg in args:\n arg_types.append(type(arg))\n for values in kwargs.values():\n arg_types.append(type(values))\n return arg_types", "def get_subterms(expr):\n av_expr = []\n expr_types = []\n if isinstance(expr, Term):\n if expr.subterms:\n for s in expr.subterms:\n new_av, new_type = get_subterms(s)\n av_expr += new_av\n expr_types += new_type\n new_type = expr.type\n expr_types.append(new_type)\n av_expr.append(expr)\n else:\n av_expr.append(expr)\n expr_types.append(expr.type)\n elif type(expr) != str:\n if expr.term:\n new_av, new_type = get_subterms(expr.term)\n av_expr += new_av\n expr_types += new_type\n return av_expr, expr_types", "def evaluators(self) -> List[Evaluator]:\n return self._evaluators", "def etypes(self): # -> list[str]:\n ...", "def evaluate(self, csls, evals, mode=\"csls\"):\n metrics = {}\n for eval_func in evals:\n assert hasattr(self, eval_func), \\\n \"Eval Function {0} not found\".format(eval_func)\n metrics = getattr(self, eval_func)(csls, metrics, mode=mode)\n return metrics", "def get_list(self):\n return self._FF_TYPES", "def get_list(self):\n return self._FF_TYPES", "def answer_types(cls, channel):\n return set(\n AnswerAccessDefinition.objects.filter(\n channel=channel).values_list(\n 'answer_type', flat=True))", "def list_formulae():\n return _list_tindyb_unique_values(\"formula\", dbpath=__dbpath__)", "def winners_per_type(self):\n winners = [winner[1] for winner in self.result]\n # making a list of the type of winners\n return Counter(winners)\n # Using the Counter tool from the standard library to count the\n # types in a dictionary", "def truth_values(formula, models):\r\n # Task 2.3\r\n list_of_truth_values = []\r\n for model in models:\r\n list_of_truth_values.append(evaluate(formula, model))\r\n return list_of_truth_values", "def types():\n sql = \"\"\"SELECT DISTINCT sample_type\n FROM barcodes.sample\n ORDER BY sample_type\"\"\"\n with pm.sql.TRN:\n pm.sql.TRN.add(sql)\n return pm.sql.TRN.execute_fetchflatten()", "def getTypes(self):\n return self._doRequest(self.httpClient.getTypes)", "def get_types(self):\n return self.types", "def evaluate(self, context=None):\n return [x for x in self.select(context)]", "def get_catalog_search_record_types(self):\n return TypeList([])", "def get_type_lists(frame, rejects=['Id', 'ID','id'],frame_type='spark'):\n\n #Handle spark type data frames\n if frame_type == 'spark':\n nums, cats = [], []\n for key, val in frame.dtypes:\n if key not in rejects:\n if val == 'string' or val == 'boolean':\n cats.append(key)\n else: # ['int','double']\n nums.append(key)\n print('Numeric =', nums)\n print()\n print('Categorical =', cats)\n return nums, cats\n else:\n nums, cats = [], []\n for key, val in frame.types.items():\n if key not in rejects:\n if val == 'enum':\n cats.append(key)\n else:\n nums.append(key)\n\n print('Numeric =', nums)\n print()\n print('Categorical =', cats)\n\n return nums, cats", "def get_comment_search_record_types(self):\n return TypeList([])", "def ntypes(self): # -> list[str]:\n ...", "def get_rules(self):\n return [phi for psi in self._Psi for phi in psi]", "def getListOfSpeciesTypes(self, *args):\n return _libsbml.Model_getListOfSpeciesTypes(self, *args)", "def get_safety_evaluator(self):\n constraint_names = self._meta['safety_constraints']\n safety_stats = self._stats['safety_stats']\n violations = np.sum(safety_stats['total_violations'], axis=0)\n evaluator_results = collections.OrderedDict([\n (key, violations[idx]) for idx, key in enumerate(constraint_names)\n ])\n return evaluator_results", "def test_ticket_type_list_ok(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('ticket_type list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def get_field_occurrences(field_type: int, game_map: Map, **other_conditions: dict) -> list:\n found = []\n for x, column in enumerate(game_map):\n for y, field in enumerate(column):\n if field.get('field') == field_type:\n found.append((x, y))\n if not other_conditions:\n return found\n\n found_with_conditions = []\n for position in found:\n field = game_map[position]\n if len(other_conditions) == len(field.items() & other_conditions.items()):\n found_with_conditions.append(position)\n return found_with_conditions", "async def getEmergencyTypes(self):\n types_list = []\n\n data = await self.director.getItemInfo(self.item_id)\n jsonDictionary = json.loads(data)\n\n if jsonDictionary[0][\"capabilities\"][\"has_fire\"]:\n types_list.append(\"Fire\")\n if jsonDictionary[0][\"capabilities\"][\"has_medical\"]:\n types_list.append(\"Medical\")\n if jsonDictionary[0][\"capabilities\"][\"has_panic\"]:\n types_list.append(\"Panic\")\n if jsonDictionary[0][\"capabilities\"][\"has_police\"]:\n types_list.append(\"Police\")\n\n return types_list", "def ListPropertyValuesOfType(res_dict, prop, res_type):\n return [r['properties'][prop] for r in res_dict if r['type'] == res_type]", "def _match_val_type(vals, bounds):\n vals_new = []\n\n for i, bound in enumerate(bounds):\n _type = bound['_type']\n if _type == \"choice\":\n # Find the closest integer in the array, vals_bounds\n # pylint: disable=cell-var-from-loop\n vals_new.append(min(bound['_value'], key=lambda x: abs(x - vals[i])))\n elif _type in ['quniform', 'randint']:\n vals_new.append(np.around(vals[i]))\n else:\n vals_new.append(vals[i])\n\n return vals_new", "def get_predicates(self):\n query = read_query('structure exploration/predicates')\n response = self._submit_query(query)\n\n return [elem['p']['value'].split('/')[-1] for elem in response]", "def get_rules(cls) -> list:\n return [factory() for factory in cls._rules_factories]", "def get_rule_types(self: object, parameters: dict = None, **kwargs) -> dict:\n # [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/custom-ioa/get-rule-types\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id=\"get_rule_types\",\n keywords=kwargs,\n params=parameters\n )", "def get_all_servers_types():\n ret = _get_list(\n lambda server: server.type if server.type not in ['vanilla.winter', 'vanilla.desert', 'pvp'] else False,\n lambda server: server.type_name\n )\n\n # Extra server type filters\n ret.append({\n 'value': 'pacific+edelweiss',\n 'label': 'RWR: WWII DLCs'\n })\n\n return ret", "def get_sub_query_types():\n sub_query_types = self._config.results_db.get_unique_query_values(\n atomic_fields_and_functions=[\n (t1s.DBA_CAT, t1s.Ta1ResultsSchema().get_complex_function(\n t1s.DBA_TABLENAME, t1s.DBA_CAT, is_list=True))],\n constraint_list=self._config.get_constraint_list() + [\n (t1s.DBF_TABLENAME, t1s.DBF_CAT, cat_string),\n (t1s.DBF_TABLENAME, t1s.DBF_SUBCAT, subcat_string)])\n sub_query_types = list(set(sum(sub_query_types, ())))\n sub_query_types_string = \",\".join(sub_query_types)\n return sub_query_types_string", "def getAllDecisionRules(self):\n\n #check this shit lol?\n thetas = self.getAllTheta()\n human_actions = self.getAllHumanActions()\n return [list(zip(thetas, item)) for item in itertools.product(human_actions, repeat=len(thetas))]", "def get_standard_evaluators(self):\n evaluators = collections.OrderedDict(\n offline=None,\n efficiency=None,\n safety=self.get_safety_evaluator(),\n robustness=None,\n discernment=None)\n return evaluators", "def etypes(self) -> Sequence[str]:\n\n return [can_etype[1] for can_etype in self.canonical_etypes]", "def regression_suites(self):\n return [c for c in self.suites.all() if TestSuite.TS_REGRESSION == c.xtype]", "def regression_suites(self):\n return [c for c in self.suites.all() if TestSuite.TS_REGRESSION == c.xtype]", "def doc_types(self):\n return self._extract_set('doc_type')", "def getTypes(self):\n re_list = []\n for index, t in enumerate(self.types):\n if t > 0:\n re_list.append(index + 1)\n return re_list", "def get_types(example_row):\n types = []\n for v in example_row:\n value_type = ctype_text[v.ctype]\n if value_type == 'text':\n types.append(text_type)\n elif value_type == 'number':\n types.append(number_type)\n elif value_type == 'xldate':\n types.append(date_type)\n else:\n types.append(text_type)\n return types", "def get_op_types(self):\n return self.cur_config['ops']", "def listEnabledTypes(self):\n actual_type = self.request.get('portal_type', None)\n collage_options = getCollageSiteOptions()\n ttool = getToolByName(self.context, 'portal_types', None)\n if ttool is None:\n return None\n return [\n {\n 'id': pt.getId(),\n 'title': p_(pt.Title()),\n 'selected': pt.getId() == actual_type and 'selected' or None\n }\n for pt in ttool.listTypeInfo()\n if collage_options.enabledAlias(pt.getId())\n ]", "def get_model_evaluations(self):\n return self._model_evaluations", "def get_all_criteria_names(group):\n return (\n criterion.findtext(\"name\")\n for criterion in group.findall(\"criteria/criterion\") if\n criterion.findtext(\"search_type\") != \"member of\")", "def _get_revisions_by_type():\n valid_types = [model.__name__ for model in all_models.all_models]\n revisions_table = all_models.Revision.__table__\n id_query = select([\n func.max(revisions_table.c.id),\n ]).group_by(\n revisions_table.c.resource_type,\n revisions_table.c.resource_id,\n )\n ids = [row for (row,) in db.session.execute(id_query)]\n query = select([\n revisions_table.c.id,\n revisions_table.c.resource_type,\n revisions_table.c.resource_id,\n ]).where(\n revisions_table.c.resource_type.in_(valid_types)\n ).where(\n revisions_table.c.action != \"deleted\"\n ).where(\n revisions_table.c.id.in_(ids)\n ).order_by(\n revisions_table.c.resource_type,\n )\n\n rows_by_type = defaultdict(list)\n for row in db.session.execute(query):\n rows_by_type[row.resource_type].append(row)\n\n return rows_by_type", "def truth_values(formula: Formula, models: Iterable[Model]) -> Iterable[bool]:\n # Task 2.3\n arr = []\n for model in models:\n arr.append(evaluate(formula, model))\n return arr", "def TypeSpecs(self) -> Dict[str, tf.TypeSpec]:\n return self._type_specs", "def find_node_by_op_type(self, op_type: str) -> List[Operator]:\n return list(self.__op_type_list[op_type])", "def get_rules(self):\n rules = []\n for item in self.name:\n rules.append(item)\n return rules", "def get_texttypes(self):\n texttypes = set()\n for reference in self.references:\n texttypes.update(reference.ref_cell.get_textypes())\n for label in self.labels:\n texttypes.add(label.texttype)\n return texttypes", "def find_ops(optype):\n gd = tf.get_default_graph()\n return [var for var in gd.get_operations() if var.type == optype]", "def type_set(self) -> Set[str]:\n typs = {self.type}\n for s in self.segments:\n typs |= s.type_set()\n return typs" ]
[ "0.67036015", "0.615326", "0.59587413", "0.5896585", "0.5799256", "0.5777026", "0.5747461", "0.5745", "0.5661408", "0.5641381", "0.5631738", "0.551762", "0.551464", "0.5496893", "0.5494026", "0.54865164", "0.54589295", "0.5457875", "0.5433231", "0.54234606", "0.5407233", "0.5399143", "0.53881145", "0.5371538", "0.5361982", "0.53485054", "0.53307325", "0.53098816", "0.5306773", "0.53030264", "0.53030264", "0.5268983", "0.5242798", "0.52377343", "0.52264804", "0.52253985", "0.5220846", "0.5202331", "0.5161947", "0.5150967", "0.5139736", "0.51312625", "0.51133865", "0.5108703", "0.5104739", "0.51041317", "0.50851154", "0.50839263", "0.50814134", "0.50812155", "0.5079961", "0.5074679", "0.50703007", "0.50642467", "0.50642467", "0.50546175", "0.5052617", "0.5046964", "0.5027737", "0.5009724", "0.5006178", "0.50028676", "0.49981827", "0.49850237", "0.49813107", "0.4969319", "0.4966408", "0.49606746", "0.49591026", "0.4949414", "0.4945897", "0.49428666", "0.4937976", "0.49372634", "0.49271062", "0.49264964", "0.49208784", "0.4918888", "0.49088645", "0.4896935", "0.48932207", "0.4887983", "0.4886744", "0.48846096", "0.48846096", "0.48841065", "0.4881436", "0.48798034", "0.48775786", "0.487058", "0.48657903", "0.48562163", "0.48545307", "0.48535421", "0.4849249", "0.484831", "0.48415288", "0.4835694", "0.4831066", "0.48286054" ]
0.76133484
0
Returns the 'details' field of a clustering.
def analysis_function_details(self,clustering): return clustering.details
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_details(self):\n return self.details", "def get_details(self):\n return self.details", "def get_details(self):\n return self.details", "def details(self) -> \"dict\":\n return self._attrs.get(\"details\")", "def details(self):\n return self._details", "def detail(self):\n info = self.info()\n return info", "def describe(self) -> None:\n return {\n 'cluster_metadata': self.cluster_metadata,\n 'master_url': self.master_url\n }", "def get_cluster_info(self) -> Dict[str, Any]:\n pass", "def details(self):\n pass", "def detailedInfo(cls):\n return 'tbd'", "def detailedInfo(cls):\n return 'tbd'", "def detail(self):\n return self.status[\"health\"][\"detail\"]", "def get_details(self):", "def getDetailsJSON(self):\n return self.__detailsJSON", "def detail_cluster(cluster_name, znode):\n\n _cluster_info = dict()\n _cluster_info.update(app.clusters[cluster_name].__dict__)\n _cluster_info.pop(\"auth_data\", None)\n _cluster_info[\"connection\"] = app.managers[cluster_name]._client.state\n resp = Response(json.dumps(_cluster_info),\n status=200,\n mimetype=\"application/json\")\n return resp", "def get_cluster_entry(self):\n\n cert_data = self.cluster_description.get(\"certificateAuthority\", {}).get(\"data\", \"\")\n endpoint = self.cluster_description.get(\"endpoint\")\n arn = self.cluster_description.get(\"arn\")\n\n return OrderedDict([\n (\"cluster\", OrderedDict([\n (\"certificate-authority-data\", cert_data),\n (\"server\", endpoint)\n ])),\n (\"name\", arn)\n ])", "def details(self):\n raise NotImplementedError()", "def cluster_description(self):\n if self._cluster_description is None:\n if self._parsed_globals is None:\n client = self._session.create_client(\"eks\")\n else:\n client = self._session.create_client(\n \"eks\",\n region_name=self._parsed_globals.region,\n endpoint_url=self._parsed_globals.endpoint_url,\n verify=self._parsed_globals.verify_ssl\n )\n full_description = client.describe_cluster(name=self._cluster_name)\n self._cluster_description = full_description[\"cluster\"]\n\n if \"status\" not in self._cluster_description:\n raise EKSClusterError(\"Cluster not found\")\n if self._cluster_description[\"status\"] not in [\"ACTIVE\", \"UPDATING\"]:\n raise EKSClusterError(\"Cluster status is {0}\".format(\n self._cluster_description[\"status\"]\n ))\n\n return self._cluster_description", "def _get_details(self, details):\n details['DoT'] = \"Yes\" if self.static else \"No\"\n details['device'] = self.device\n details['volume_id'] = self.volume_id\n details['from_snap'] = \"No\" if not self.from_snapshot_id else self.from_snapshot_id\n details['from_archive'] = \"No\" if not self.from_archive else self.from_archive['url']\n details['snapshot_progress'] = self.snapshot_progress\n details['snapshot_status'] = self.snapshot_status\n # TODO: keep track of any errors\n details['err_msg'] = None if details.get('err_msg', '') == '' else details['err_msg']\n details['snapshots_created'] = self.snapshots_created\n return details", "def __repr__(self):\n return (\n f'GalaxyCluster {self.unique_id}: '\n f'(ra={self.ra}, dec={self.dec}) at z={self.z}'\n f'\\n> with columns: {self._str_colnames()}'\n f'\\n> {len(self.galcat)} source galaxies'\n )", "def details (self):\n return six.text_type(self)", "def info(self):\n for key, value in self.dataset['info'].items():\n print('{}: {}'.format(key, value))", "def details(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:\n return pulumi.get(self, \"details\")", "def get_discovery_summary():\n pass", "def get_details(disease):\n\treturn d_desc_map[disease]", "def get_details(self):\n raise Exception(\"bad details\")", "def get_details(self):\n return self.__config_data", "def raw_detail_dicts(self):\n # TODO(chris): validate columns using column headers.\n details = []\n selector = '#ae-instances-details-table tbody tr'\n for element in self.doc.cssselect(selector):\n children = list(element)\n assert len(children) == 9, [child.text for child in children]\n details.append({\n 'instance_id': element.attrib['id'].strip(),\n 'qps': children[0].text.strip(),\n 'latency': children[1].text.strip(),\n 'requests': children[2].text.strip(),\n 'errors': children[3].text.strip(),\n 'age': children[4].text.strip(),\n 'memory': children[5].text.strip()\n })\n return details", "def __repr__(self):\n s = f'sample:\\n{self.sample}\\n'\n s += f'cluster:\\n{self.cluster}\\n'\n s += f'largest_cluster:\\n{self.get_largest_cluster()}'\n return s", "def info(self):\n ss = \"\\nSummary EffectiveArea2D info\\n\"\n ss += \"----------------\\n\"\n # Summarise data members\n ss += array_stats_str(self.energy, 'energy')\n ss += array_stats_str(self.offset, 'offset')\n ss += array_stats_str(self.eff_area, 'dispersion')\n\n return ss", "def __str__(self):\n return \"Clustering\"", "def get_info(self) -> str:\n return self.info", "def details(self) -> Optional[pulumi.Input['SolutionDetailsArgs']]:\n return pulumi.get(self, \"details\")", "def get_summary(self) -> str:\n connected = self.is_connected()\n info = '[{} - {}]'.format(self._index,\n 'Connected' if connected else 'Disconnected')\n if connected:\n info += ' {} ({})'.format(self.get_model_name(), self.get_serial())\n return info", "def info(self):\n return self._info", "def __repr__(self):\n outs = [\"Cluster Expansion Summary\"]\n outs += repr(self.cluster_subspace).split(\"\\n\")[1:]\n\n if self.regression_data is not None:\n outs += [\n f\"Regression Data : estimator={self.regression_data.estimator_name}\"\n f\" module={self.regression_data.module}\",\n f\" parameters={self.regression_data.parameters}\",\n f\"Target Property : \"\n f\"mean={np.mean(self.regression_data.property_vector):0.4f} \"\n f\"std={np.std(self.regression_data.property_vector):0.4f}\",\n ]\n fit_var = sum(\n self._subspace.function_total_multiplicities[1:] * self.eci[1:] ** 2\n )\n outs += [\n f\"ECI-based Property : mean={self.eci[0]:0.4f} std={np.sqrt(fit_var):0.4f}\"\n ]\n return \"\\n\".join(outs)", "def info(self):\n ss = \"\\nSummary ARF info\\n\"\n ss += \"----------------\\n\"\n # Summarise data members\n ss += array_stats_str(self.energy_lo, 'Energy lo')\n ss += array_stats_str(self.energy_hi, 'Energy hi')\n ss += array_stats_str(self.effective_area.to('m^2'), 'Effective area')\n ss += 'Safe energy threshold lo: {0:6.3f}\\n'.format(self.energy_thresh_lo)\n ss += 'Safe energy threshold hi: {0:6.3f}\\n'.format(self.energy_thresh_hi)\n\n return ss", "def get_info(self) -> str:\n raise NotImplementedError()", "def describe_cluster_response():\n return {\n \"cluster\": {\n \"status\": \"ACTIVE\",\n \"endpoint\": \"https://endpoint.amazonaws.com\",\n \"name\": EXAMPLE_NAME,\n \"certificateAuthority\": {\n \"data\": \"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpWR1Z6ZEdsdVp5QkVZWFJoRFFwVVpYTjBhVzVuSUVSaGRHRU5DbFJsYzNScGJtY2dSR0YwWVEwS2EzVmlaWEp1WlhSbGN6QWVGdzBLVkdWemRHbHVaeUJFWVhSaERRcFVaWE4wYVc1bklFUmhkR0ZWQkFNVERRcHJkV0psY201bGRHVnpNQUVpTUEwS1ZHVnpkR2x1WnlCRVlYUmhEUXBVWlhOMGFXNW5JRVJoZEdFTkNsUmxjM1JwYm1jZ1JHRjBZY3UvR1FnbmFTcDNZaHBDTWhGVVpYTjBhVzVuSUVSaGRHRXl3clZqeEpWNjNwNFVHRmpZdHdGR1drUldJVkV1VkdWemRHbHVaeUJFWVhSaGJzT0MxSVJiTDhPd0lpMVhiWGg2VkdWemRHbHVaeUJFWVhSaFpXVndTTk9VVUZKNmN5QWJaaFpnWVNkTUV3MEtGMVJsYzNScGJtY2dSR0YwWVFZRFZSMFBBUUVFQkFNQ0FsUmxjM1JwYm1jZ1JHRjBZUUV3RFFvR0NTcElEUXBVWlhOMGFXNW5JRVJoZEdGcEgxc1pPRTNMa3lrMU9DWUNHUloyTEZjM3paOCtHell3WEZSbGMzUnBibWNnUkdGMFlYMUR5NjFNMVlGV1AxWVRIMVJsYzNScGJtY2dSR0YwWVd0aE5oMVphM2dWUDBGaGNSWjdKaW9oZVc4N1JsUmxjM1JwYm1jZ1JHRjBZUVpIVHd4NE9IdzZmZz09DQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t\"\n },\n \"roleArn\": \"arn:aws:iam::111222333444/eksRole\",\n \"resourcesVpcConfig\": {\n \"subnetIds\": [\n \"subnet-00000000000000000\",\n \"subnet-00000000000000001\",\n \"subnet-00000000000000002\"\n ],\n \"vpcId\": \"vpc-00000000000000000\",\n \"securityGroupIds\": [\n \"sg-00000000000000000\"\n ]\n },\n \"version\": \"1.10\",\n \"arn\": \"arn:aws:eks:region:111222333444:cluster/\" + EXAMPLE_NAME,\n \"createdAt\": 1500000000.000\n }\n }", "def describe_cluster_no_status_response():\n return {\n \"cluster\": {\n \"endpoint\": \"https://endpoint.amazonaws.com\",\n \"name\": EXAMPLE_NAME,\n \"certificateAuthority\": {\n \"data\": \"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpWR1Z6ZEdsdVp5QkVZWFJoRFFwVVpYTjBhVzVuSUVSaGRHRU5DbFJsYzNScGJtY2dSR0YwWVEwS2EzVmlaWEp1WlhSbGN6QWVGdzBLVkdWemRHbHVaeUJFWVhSaERRcFVaWE4wYVc1bklFUmhkR0ZWQkFNVERRcHJkV0psY201bGRHVnpNQUVpTUEwS1ZHVnpkR2x1WnlCRVlYUmhEUXBVWlhOMGFXNW5JRVJoZEdFTkNsUmxjM1JwYm1jZ1JHRjBZY3UvR1FnbmFTcDNZaHBDTWhGVVpYTjBhVzVuSUVSaGRHRXl3clZqeEpWNjNwNFVHRmpZdHdGR1drUldJVkV1VkdWemRHbHVaeUJFWVhSaGJzT0MxSVJiTDhPd0lpMVhiWGg2VkdWemRHbHVaeUJFWVhSaFpXVndTTk9VVUZKNmN5QWJaaFpnWVNkTUV3MEtGMVJsYzNScGJtY2dSR0YwWVFZRFZSMFBBUUVFQkFNQ0FsUmxjM1JwYm1jZ1JHRjBZUUV3RFFvR0NTcElEUXBVWlhOMGFXNW5JRVJoZEdGcEgxc1pPRTNMa3lrMU9DWUNHUloyTEZjM3paOCtHell3WEZSbGMzUnBibWNnUkdGMFlYMUR5NjFNMVlGV1AxWVRIMVJsYzNScGJtY2dSR0YwWVd0aE5oMVphM2dWUDBGaGNSWjdKaW9oZVc4N1JsUmxjM1JwYm1jZ1JHRjBZUVpIVHd4NE9IdzZmZz09DQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t\"\n },\n \"roleArn\": \"arn:aws:iam::111222333444/eksRole\",\n \"resourcesVpcConfig\": {\n \"subnetIds\": [\n \"subnet-00000000000000000\",\n \"subnet-00000000000000001\",\n \"subnet-00000000000000002\"\n ],\n \"vpcId\": \"vpc-00000000000000000\",\n \"securityGroupIds\": [\n \"sg-00000000000000000\"\n ]\n },\n \"version\": \"1.10\",\n \"arn\": \"arn:aws:eks:region:111222333444:cluster/\" + EXAMPLE_NAME,\n \"createdAt\": 1500000000.000\n }\n }", "def info(self) -> str:\n return self._info", "def info(self) -> str:\n return self._info", "def __repr__(self):\n\n return \"<Cluster id=%s>\" % (self.id)", "def describe(self):\r\n mdataset_description = {\r\n 'kind': \"HConteiner\",\r\n 'compliance': self._compliance,\r\n 'has_encryption': self.has_encryption,\r\n 'encryption': self._encryption,\r\n 'sensitive': self._sensitive,\r\n 'license': self._license,\r\n }\r\n verbose_event()\r\n return mdataset_description", "def get_display_info(self):\n return self.display_info", "def info(self):\n return self.info_text", "def __str__(self):\n return \"Cluster\"", "def get_details():\r\n return run_operations.get_run_details(experiment_name, job_name).as_dict(key_transformer=camel_case_transformer)", "def mychem_info(self):\n return self._mychem_info", "def detail(self):\n return self.uniform(\"detail\",\n self.img_scale * .05,\n self.img_scale * .2)", "def info(self):\n return self._info", "def info(self):\n ss = \"\\nSummary PSF3D info\\n\"\n ss += \"---------------------\\n\"\n ss += array_stats_str(self.energy_lo, \"energy_lo\")\n ss += array_stats_str(self.energy_hi, \"energy_hi\")\n ss += array_stats_str(self.offset, \"offset\")\n ss += array_stats_str(self.rad_lo, \"rad_lo\")\n ss += array_stats_str(self.rad_hi, \"rad_hi\")\n ss += array_stats_str(self.psf_value, \"psf_value\")\n\n # TODO: should quote containment values also\n\n return ss", "def print_details(self):\n print(\"[{}]\".format(self.name))\n print(\"ID: \" + str(self.id))\n print(\"name: %s\" % self.name)\n print(\"URL: %s\" % self.url)\n print(\"CPUs: \" + str(self.cpus) + \" cores\")\n print(\"Mem: \" + self.memory_str)\n print(\"Tasks: \" + str(self.tasks_len))\n print(\"Uptime %s\" + self.uptime)\n print(\"Uptime Descriptive %s\" + self.uptime_descriptive)\n print(\" \")", "def description(self):\n return self._hdr", "def description(self):\n return self._hdr", "def info(self) -> dict:", "def get_details(self):\n print(self.name)\n print(10 * \"-\" + \"\\n\")\n print(self.description)\n for direction in self.linked_rooms:\n room = self.linked_rooms[direction]\n print(\"The \" + room.get_name() + \" is \" + direction)\n print(\"\\n\")", "def summary(self):\n print('DistanceMatrix (n=%s)' % len(self))\n print('Distance metric = %s' % self.distance_metric.__name__)\n print(self)", "def info(self):\n attr_list = []\n for name in self._metadata:\n attr_list.append(name + \": \" + str(getattr(self, name, None)) + \"\\n\")\n print(f\"{self.__class__}\\n\" + \"\".join(attr_list))", "def info(self):\n attr_list = []\n for name in self._metadata:\n attr_list.append(name + \": \" + str(getattr(self, name, None)) + \"\\n\")\n print(f\"{self.__class__}\\n\" + \"\".join(attr_list))", "def info(self):\n # define the return string\n bigstring = ''\n\n # assemble the information on the column\n bigstring += 'Column name: ' + self.colname + '\\n'\n bigstring += 'Column type: ' + str(self._type) + '\\n'\n bigstring += 'Column format: ' + str(self._format) + '\\n'\n bigstring += 'Column null value : ' + str(self._null) + '\\n'\n if self.unit:\n bigstring += 'Column unit : ' + self.unit + '\\n'\n if self.colcomment:\n bigstring += 'Column comment : ' + self.colcomment + '\\n'\n\n # return the result\n return bigstring", "def get_info(self):\n return \"TODO !\"", "def info(self):\r\n return self._get('info', {})", "def product_details(self) -> MqexsProductDetails:\n return self.__product_details", "def details(self, details: \"dict\"):\n self._attrs[\"details\"] = details", "def info(self):\n print 'A= ', self.application\n print 'C= ', self.city\n print 'D= ', self.dataset.shape", "def full_info(self, object, name, value):\n return self.info()", "def info(self):\n print self.id, self.type, self.xyz.get_xyz", "def getInfo(self):\n return self.info", "def get_details_title(mat_dict):\n title = \"# Detail section for {} (COF {}) v{}\".format(mat_dict['name_conventional'], mat_dict['mat_id'],\n mat_dict['workflow_version'])\n return title", "def desc(self):\n kpi_name = 'id:{idx}, resource: {rsc}, group: {grp}, metric: {mtr}'\\\n .format(idx=self.idx,\n rsc=self.resource,\n grp=self.group,\n mtr=self.metric)\n return kpi_name", "def describe(self):\n return str(self)", "def _info(self) -> str:\n return (\n f\"<{self.__class__.__name__}\\n\"\n f\" spark type = {self._spark_type_class.__name__}\\n\"\n f\" nullable = {self._is_nullable}\\n\"\n f\" name = {self._resolve_field_name()} <- {[self.__name_explicit, self.__name_contextual]}\\n\"\n f\" parent = {self._parent}\\n\"\n f\" metadata = {self._metadata}\\n\"\n \">\"\n )", "def info(self, name):\n if isinstance(self._state[name], dict):\n attrs = self._state[name]['attrs']\n return '{} {}({}), {} records: {}'.format(\n attrs['type_str'], name, ','.join(attrs['domain']),\n attrs['records'], attrs['description'])\n else:\n return repr(self[name])", "def __repr__(self):\n return self._metadata.__str__()", "def rvdist_info(self):\n return \"\"", "def info(self):\n return {\n \"dimension_x\": self.dimension_x,\n \"dimension_y\": self.dimension_y,\n \"api_level\": self.api_level,\n \"device_model\": self.model,\n }", "def product_details(self):\n return self._product_details", "def get_info(self):\n out = ''\n for k in sorted(self.components.keys()):\n out += '{:s}: {:s}'.format(k, self.info[k]) + '\\n'\n return(out)", "def describe_cluster_creating_response():\n return {\n \"cluster\": {\n \"status\": \"CREATING\",\n \"name\": EXAMPLE_NAME,\n \"certificateAuthority\": {},\n \"roleArn\": \"arn:aws:iam::111222333444/eksRole\",\n \"resourcesVpcConfig\": {\n \"subnetIds\": [\n \"subnet-00000000000000000\",\n \"subnet-00000000000000001\",\n \"subnet-00000000000000002\"\n ],\n \"vpcId\": \"vpc-00000000000000000\",\n \"securityGroupIds\": [\n \"sg-00000000000000000\"\n ]\n },\n \"version\": \"1.10\",\n \"arn\": \"arn:aws:eks:region:111222333444:cluster/\" + EXAMPLE_NAME,\n \"createdAt\": 1500000000.000\n }\n }", "def info(self):", "def info(self):", "def getInfo(self):\n return self._info", "def contact_details(self):\n return self.data.get(\"contactDetails\")", "def get_description(self):\n return self['host_name']", "def client_details(self):\n return self._client_details", "def get_info(self):\n return None", "def details(self) -> str:\n return f\"- **language**: [{self.language}]\\n\" \\\n f\"- **opengame**: [{self.opengame}]\\n\" \\\n f\"- **system**: [{self.system}]\\n\" \\\n f\"- **mode**: [{self.mode}]\\n\" \\\n f\"- **attributes**: [{self.attributes}]\\n \" \\\n f\"- **score_threshold**: [{self.score_threshold}]\\n \" \\\n f\"- **monsters**: [{self.monsters}]\\n\"", "def printDetails(self):\n print str(self.number) + \": \" + self.title\n print \"URL: \" + self.URL\n print \"domain: \" + self.domain\n print \"score: \" + str(self.score) + \" points\"\n print \"submitted by: \" + self.submitter\n print \"# of comments: \" + str(self.commentCount)\n print \"'discuss' URL: \" + self.commentsURL\n print \"HN ID: \" + str(self.id)\n print \" \"", "def description(self):\n return self.data[\"attributes\"][\"description\"]", "def summary(self):\n res = \", \".join(\n elem[\"summary\"] for elem in self.status[\"health\"][\"summary\"]\n )\n if res:\n return res\n elif self.detail:\n return self.detail[0]\n return \"\"", "def get_perfect_information(self):\n raise NotImplementedError", "def summary(self):\n return \"%s: %s\" % (self.attribute.name, self.value_as_text)", "def getInfo():", "def get_node_details(self, node):\n node_details = self.parser.find_server_by_ip(node.get('ip')) or \\\n self.parser.find_server_by_hostname(node.get('host'))\n\n return node_details", "def get_description(self):", "def description(self):\n return self.visual_desc", "def info(self):\n return self.__dict__[self.sid]", "def disks_and_size_details(self) -> Mapping[str, int]:\n return pulumi.get(self, \"disks_and_size_details\")", "def get_info(self):\n pass" ]
[ "0.6636939", "0.6636939", "0.6636939", "0.6543469", "0.64725894", "0.63629097", "0.62274104", "0.6106484", "0.60739094", "0.6035391", "0.6035391", "0.60135996", "0.6000487", "0.5963064", "0.59448", "0.5934605", "0.5902071", "0.5895396", "0.58460885", "0.5807865", "0.57522964", "0.57400453", "0.5730994", "0.5702703", "0.56945956", "0.5692235", "0.56831974", "0.5671745", "0.5614645", "0.5606766", "0.5574266", "0.55707467", "0.5570531", "0.5564634", "0.5560177", "0.5547312", "0.553102", "0.5526748", "0.5517361", "0.55122256", "0.5508997", "0.5508997", "0.5505632", "0.5493975", "0.54908127", "0.5484838", "0.5482763", "0.5476762", "0.54646707", "0.5456636", "0.5451693", "0.5443182", "0.5420189", "0.5418747", "0.5418747", "0.54152805", "0.54118997", "0.54086506", "0.54046136", "0.54046136", "0.53980136", "0.5395534", "0.539425", "0.53867245", "0.538585", "0.5379079", "0.5377226", "0.53640336", "0.5357346", "0.53547925", "0.5341225", "0.5330063", "0.5324583", "0.5321568", "0.53178966", "0.5315936", "0.53077203", "0.5307586", "0.5302969", "0.5301386", "0.52992725", "0.52992725", "0.5298606", "0.52983505", "0.5297835", "0.52977175", "0.5292285", "0.5290902", "0.52884704", "0.527069", "0.5270553", "0.5269551", "0.52572894", "0.52537", "0.52535516", "0.5253419", "0.52526796", "0.5250931", "0.52437073", "0.5236383" ]
0.7595597
0
Returns the number of cluster a clustering has.
def analysis_function_num_clusters(self,clustering): return len(clustering.clusters)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cluster_count(self) -> int:\n return len(self.get_all_cluster_ids())", "def cluster_count(self) -> int:\n cluster_count = max(1, round(16**3 * (self.vein.purity / 100.0) / self.cluster_size))\n return self.distribution.scale_cluster_count(cluster_count)", "def n_clusters(self):\n return len(self.clusters)", "def n_clusters(self):\n return self.model.n_clusters", "def count(self):\n\n if self.cluster:\n return self.cluster.count()\n\n return super().count()", "def get_cluster_idx(_cluster):\n\n return _cluster.cluster_idx", "def cluster_counter(self):\n return Counter(self.model.labels_.tolist())", "def num_links(self):\n count=0.0\n for cluster in self.clusters:\n if self.clusters[cluster] == self.clusters[cluster].antecessor:\n numberofmembers=self.clusters[cluster].number_of_members\n count+=numberofmembers\n return count", "def node_count(self) -> int:\n return pulumi.get(self, \"node_count\")", "def gal_count(clusters):\n sum = 0\n for x in clusters:\n sum += x.ngal\n return sum", "def getNbClusters( model):\r\n\r\n\tlabels = model.labels_\r\n\tlabelValues = []\r\n\tfor label in labels:\r\n\t\tif label not in labelValues and label != -1: labelValues.append(label)\r\n\tnbClusters = len( labelValues)\r\n\treturn nbClusters", "def cluster_obs_count(self):\n return(self.merged_data.groupby(\n 'labels').count().transpose().iloc[0, :])", "def node_count(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"node_count\")", "def node_count(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"node_count\")", "def set_nb_clusters(self):\n \n print(\"Finding the optimal number of clusters...\")\n \n sample = ro.r.matrix(self.df[self.df[\"filename\"].between(1, 4)][\"active_power\"].to_numpy())\n \n r=ro.r(\"\"\"\n check = function(matrix) {\n n_clust = fviz_nbclust(matrix, kmeans, k.max = 15)\n\n n_clust = n_clust$data\n\n max_cluster = as.numeric(n_clust$clusters[which.max(n_clust$y)])\n return(max_cluster)\n }\n \"\"\")\n\n result = r(sample)\n self.conf[\"nb_clust\"] = int(result[0])\n \n print(f\"Optimal number of clusters is {self.conf['nb_clust']}\\n\")", "def get_cluster_sizes(self,sc):\n\n clusterSizes = []\n clusters = sc.labels\n for k in np.arange(clusters.max()):\n clusterSizes.append(len(np.where(clusters==k)[0])) \n \n return clusterSizes", "def number_of_nodes(self):\n return int(self._data['number_of_nodes'])", "def data_center_count(self) -> int:\n return pulumi.get(self, \"data_center_count\")", "def extract_cluster_size(line):\r\n cluster_size = line.split(\":\")[-1]\r\n\r\n try:\r\n cluster_size = int(cluster_size)\r\n except ValueError:\r\n return 0\r\n return cluster_size", "def node_count(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"node_count\")", "def node_count(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"node_count\")", "def get_num_nodes(self):\n\n return sum(self.topology)", "def get_count_all(cls, context, cluster_id):\n return cls.dbapi.get_cluster_nodegroup_count(context, cluster_id)", "def clustering_factor(self):\n return self.unpack_dword(0x2C)", "def num_nodes(self) -> int:\n return pulumi.get(self, \"num_nodes\")", "def _num_nodes(self):\n return int(self._node_map[-1])", "def node_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"node_count\")", "def get_cluster_indices(self,dataset, cluster_number):\n\t\tself.__init__(dataset, self.k)\n\t\tself.e_step() #got responsibilities\n\t\tmax_cluster = np.argmax(self.w, axis = 1)\n\t\tindices = []\n\t\tfor i in range(dataset.shape[0]):\n\t\t\tif max_cluster[i] == cluster_number:\n\t\t\t\tindices.append(i)\n\t\treturn indices", "def _choose_clusters_num(database_type: str, synthetic_data_dim: int) -> int:\n data_dim: int = 1\n if database_type == DatabaseType.Synthetic:\n data_dim = synthetic_data_dim\n elif database_type in [DatabaseType.ThreeDRoadNetwork, DatabaseType.IndividualHouseholdElectricPowerConsumption]:\n data_dim = 2\n elif database_type == DatabaseType.HouseSalesInKingCounty:\n data_dim = 8\n return 2 * (data_dim + 1) ** 2 + 2", "def analysis_function_total_elements(self,clustering):\n return clustering.total_number_of_elements", "def node_count(self) -> int:\n return int(self.graph_tuple_stats.node_count or 0)", "def cluster_id(self):\n return self._cluster_id", "def cluster(self):\n return self._cluster", "def cluster(self):\n return self._cluster", "def node_count(self):\n return self._node_count", "def get_number_of_clusters(df, use_pca, n_components):\n n_clusters = 10\n cluster_with_distances = []\n for i in range(n_clusters):\n pipe = _build_model(df, use_pca, n_components, use_kmeans=True, n_clusters=i + 1)\n cluster_with_distances.append(pipe.named_steps['kmeans'].inertia_)\n plt.figure(6, figsize=(12, 6))\n plt.plot(range(1, 11), cluster_with_distances, 'o')\n plt.plot(range(1, 11), cluster_with_distances, '-', alpha=0.5)\n plt.title('The Elbow Criterion')\n plt.xlabel('number of cluster')\n plt.ylabel('Sum of squared distances of samples to their closest cluster center')\n plt.show()", "def number_of_nodes(self) -> int:\n return self.graph.number_of_nodes()", "def test_count_reads_per_cluster(self):\n \n bedtool = pybedtools.BedTool(clipper.test_file(\"clip_analysis_test_peak_results.bed\"))\n \n total_reads, reads_per_cluster = count_reads_per_cluster(bedtool, None)\n \n self.assertListEqual([147,52, 239, 85, 47, 119, 58, 588, 92, 59, 196, 36], reads_per_cluster)\n self.assertEqual(sum([147,52, 239, 85, 47, 119, 58, 588, 92, 59, 196, 36]), total_reads)", "def numNodes(self):\n res = 0\n for n in self.iternodes():\n res += 1\n return res", "def number_of_data_nodes(self):\n return int(self._data['number_of_data_nodes'])", "def get_sectors_per_cluster(self):\n\n\t\tsectors_per_cluster_base = struct.unpack('B', self.boot_sector_data[13 : 14])[0]\n\t\tif sectors_per_cluster_base == 0:\n\t\t\traise BootSectorException('Invalid cluster size (zero)')\n\n\t\tif sectors_per_cluster_base <= 0x80: # Although 0x80 is a signed value, it's used as an unsigned one.\n\t\t\tsectors_per_cluster_real = sectors_per_cluster_base\n\t\telse:\n\t\t\tsectors_per_cluster_base = struct.unpack('b', self.boot_sector_data[13 : 14])[0] # Read this again as a signed value.\n\t\t\tsectors_per_cluster_real = 1 << abs(sectors_per_cluster_base)\n\n\t\treturn sectors_per_cluster_real", "def getNumberOfNeighbors(self, vertexNumber): \n\n return self.__degreeCount[vertexNumber]", "def node_count(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"node_count\")", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def num_nodes(self):\n return len(self.successors)", "def get_cluster_id(self):\n cmd = \"svcinfo lscluster -delim :\"\n\n output = self._svc_command(cmd)[0]\n\n if len(output) != 2:\n return None\n\n header = output[0].split(':')\n values = output[1].split(':')\n index = header.index(SVC_CLUSTER_ID)\n cluster_id = values[index]\n return cluster_id", "def carn_count(self):\n return len(self.carnivores)", "def get_neighbors_count(self, atom):\n return self._graph.get_connected_vertices_count(atom)", "def get_cluster_status(boto3_client, cluster_identifier):\n return boto3_client.describe_clusters(\n ClusterIdentifier=cluster_identifier\n )", "def cluster_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cluster_id\")", "def count_nodes(self):\n\t\treturn self.__count_nodes(self)", "def clustering(self) -> 'outputs.ClusteringResponse':\n return pulumi.get(self, \"clustering\")", "def num_nodes(self) -> Optional[int]:\n return pulumi.get(self, \"num_nodes\")", "def current_node_count(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"current_node_count\")", "def get_num_nodes(self):\n return len(self._nodes)", "def get_num_nodes(self):\n return len(self._nodes)", "def num_of_node(self):\n \n try:\n return self.num_node\n except:\n print(\"ERROR: No graph exists\")", "def num_shards(self) -> int:\n return self.db_nodes", "def add_new_cluster(self):\n self.result.append(Cluster.Cluster())\n return len(self.result) - 1", "def get_clusters(self):\r\n\r\n return self.__clusters", "def analysis_function_num_clusters_to_percent(self,clustering,percent):\n return clustering.number_of_clusters_to_get_percent(percent)", "def numa_nodes(self):\n return int(self.num_numa_nodes) # type: ignore", "def node_count(self):\n return self._root.count()", "def get_clust_num_perc(model, vis_perc=0.9):\n\tnc = len(np.where(model.allocmodel.Nk > 0)[0])\n\tidx = np.argsort(-model.allocmodel.Nk)[0:nc]\n\n\ttot = model.allocmodel.Nk[idx].sum()\n\tcursum = 0\n\ti = 0\n\twhile cursum < tot*vis_perc:\n\t cursum += model.allocmodel.Nk[idx][i]\n\t i+=1\n\n\treturn i", "def num_nodes(self):\n return self._grid", "def cluster_cal(self):\n self.Cluster = []\n for i in range(self.nodenum):\n neighborhood_node = self.neighbor_node(i)\n Node_num = len(neighborhood_node)\n Count = self.neighbor_edge(neighborhood_node)\n if(Node_num == 0 or Node_num == 1):\n self.Cluster.append(0.5)\n else:\n self.Cluster.append(Count/(Node_num*(Node_num - 1)))\n \n self.cluster_coeff = np.average(self.Cluster)", "def _num_conn_comp(graph):\n\n return nx.number_connected_components(graph)", "def gke_cluster(self) -> Optional['outputs.MembershipEndpointGkeCluster']:\n return pulumi.get(self, \"gke_cluster\")", "def finding_the_number_of_clusters(self, HC_tree_terminal_cosine, Dist_tree_terminal_cosine, which_one):\n max_clusters = min(11, len(self.sequences))\n ress_sil = []\n for i in tqdm(range(2, max_clusters), desc=f\"Finding the best number of clusters ({which_one})\", disable=1-self.verbose):\n assignment_tree_terminal_cosine = cluster.hierarchy.cut_tree(HC_tree_terminal_cosine,i).ravel() #.ravel makes it 1D array.\n ress_sil.append((silhouette_score(squareform(Dist_tree_terminal_cosine),\n assignment_tree_terminal_cosine,metric='cosine').round(3)*1000)/1000)\n if which_one == 'DT':\n self.C_DT = ress_sil.index(max(ress_sil)) + 2\n elif which_one == 'RF':\n self.C_RF = ress_sil.index(max(ress_sil)) + 2\n elif which_one == 'DT_position':\n self.C_DT_p = ress_sil.index(max(ress_sil)) + 2\n elif which_one == 'RF_position':\n self.C_RF_p = ress_sil.index(max(ress_sil)) + 2", "def count_all_cluster_instances(cluster_name, predictive=False, exclude_node_label_keys=app_config[\"EXCLUDE_NODE_LABEL_KEYS\"]):\n\n # Get the K8s nodes on the cluster, while excluding nodes with certain label keys\n k8s_nodes = get_k8s_nodes(exclude_node_label_keys)\n\n count = 0\n asgs = get_all_asgs(cluster_name)\n for asg in asgs:\n instances = asg['Instances']\n if predictive:\n count += asg['DesiredCapacity']\n else:\n # Use the get_node_by_instance_id() function as it only returns the node if it is not excluded by K8s labels\n for instance in instances:\n instance_id = instance['InstanceId']\n try:\n get_node_by_instance_id(k8s_nodes, instance_id)\n count += 1\n except Exception:\n logger.info(\"Skipping instance {}\".format(instance_id))\n logger.info(\"{} asg instance count in cluster is: {}. K8s node count should match this number\".format(\"*** Predicted\" if predictive else \"Current\", count))\n return count", "def Test_NumNodes(Graph_MD):\n N_Knoten = Graph_MD.number_of_nodes()\n \n return N_Knoten", "def get_clust_cent(self):\r\n\r\n return self.__clust_cent", "def num_nodes(self, ntype: str = None) -> int:\n if ntype:\n return self.num_nodes_dict[ntype]\n else:\n return self.total_number_of_nodes", "def compute_num_nodes(graph):\n return len(graph.keys()) # return the number of nodes in the graph", "def get_num_arcs(self):\n num_arcs = 0\n for node in self._nodes.values(): num_arcs += node._deg\n return (num_arcs / 2) + 1", "def get_num_arcs(self):\n num_arcs = 0\n for node in self._nodes.values(): num_arcs += node._deg\n return (num_arcs / 2) + 1", "def NodesCount(self):\n return len(self.nodes)", "def max_node_count(self) -> int:\n return pulumi.get(self, \"max_node_count\")", "def count(self):\n\t\treturn len(list(self.nodes))", "def _num_nodes(self):\n return len(self._nid2partid)", "def cluster_size(result, var):\n df=calculate_cluster_size(result, var)\n df['cus']=df.index\n return df", "def num_node_groups(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"num_node_groups\")", "def get_n_workers(self):\n return self.df.worker.nunique()", "def get_ncores(self):\n return self._ncores", "def calc_Nw(cluster_labels):\n\n cluster_labels = np.array(cluster_labels)\n labels_set = set(cluster_labels)\n n_labels = len(labels_set)\n\n Nw = []\n for label in labels_set:\n n_examples = np.sum(np.where(cluster_labels == label, 1, 0))\n n_cluster_pairs = n_examples * (n_examples - 1) / 2 # Combinations\n Nw.append(n_cluster_pairs)\n\n return int(np.sum(Nw))", "def degree(self) -> int:\n return len(self.neighbours)", "def degree(self) -> int:\n return len(self.neighbours)", "def degree(self) -> int:\n return len(self.neighbours)", "def get_cluster_id(options):\n cluster = options.cluster\n datacenter = get_datacenter(options)\n for item in datacenter.hostFolder.childEntity:\n if (item.name == cluster):\n return item._GetMoId()", "def get_number_of_cheeses(self):\n number = 0\n for i in range(len(self._stools)):\n number += len(self._stools[i])\n return number", "def get_cluster_indices(dataset, cluster_number, GMM_model):\n\tGMM_model.data = dataset.copy()\n\tGMM_model.m, GMM_model.n = dataset.shape\n\tGMM_model.w = np.asmatrix(np.empty((GMM_model.m, GMM_model.k), dtype=float))\n\tGMM_model.e_step() #got responsibilities\n\tmax_cluster = np.argmax(GMM_model.w, axis = 1)\n\tindices = []\n\tfor i in range(dataset.shape[0]):\n\t\tif max_cluster[i] == cluster_number:\n\t\t\tindices.append(i)\n\treturn indices", "def num_slaves(self) -> int:\n raise NotImplementedError", "def num_neighbors(self):\n return self._num_neighbors" ]
[ "0.8762908", "0.84918106", "0.80784905", "0.77352", "0.71900326", "0.7170602", "0.7130262", "0.70022637", "0.6933635", "0.68955094", "0.6862841", "0.6851633", "0.68233424", "0.68233424", "0.6755365", "0.6752718", "0.66931707", "0.66912186", "0.6680638", "0.66151196", "0.66151196", "0.6610706", "0.65861267", "0.6539488", "0.6513963", "0.649287", "0.6439858", "0.6420169", "0.6417206", "0.64149934", "0.6413359", "0.64035124", "0.64019495", "0.64019495", "0.6382798", "0.6356664", "0.6350437", "0.6315022", "0.62987363", "0.6288998", "0.6269595", "0.6236746", "0.6226667", "0.621742", "0.621742", "0.621742", "0.621742", "0.621742", "0.6215246", "0.6206762", "0.61992633", "0.61982834", "0.61785", "0.6176574", "0.6176574", "0.6176574", "0.6176574", "0.61754096", "0.61500186", "0.61457163", "0.6131266", "0.6126058", "0.6126058", "0.6125346", "0.61201376", "0.61135364", "0.61032087", "0.6099808", "0.6083013", "0.60783225", "0.6071571", "0.60682833", "0.6066472", "0.6062612", "0.60620993", "0.60593003", "0.6055207", "0.60528344", "0.60498375", "0.60378444", "0.60370386", "0.6021506", "0.6021506", "0.6019029", "0.60128736", "0.60126686", "0.6000916", "0.5997344", "0.5995471", "0.599225", "0.59873134", "0.59751415", "0.59690964", "0.59690964", "0.59690964", "0.59677", "0.59660614", "0.5963094", "0.5960791", "0.59604096" ]
0.81173706
2
Returns the number of elements that are clusterized in this clustering (which may not be the total number of elements of the dataset if there were noisy elements)
def analysis_function_total_elements(self,clustering): return clustering.total_number_of_elements
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def n_clusters(self):\n return len(self.clusters)", "def get_cluster_count(self) -> int:\n return len(self.get_all_cluster_ids())", "def count_elements_in_dataset(dataset):\n return dataset.count()", "def cluster_count(self) -> int:\n cluster_count = max(1, round(16**3 * (self.vein.purity / 100.0) / self.cluster_size))\n return self.distribution.scale_cluster_count(cluster_count)", "def analysis_function_num_clusters(self,clustering):\n return len(clustering.clusters)", "def num_elements(self):\n return self.subset.num_elements()", "def cluster_obs_count(self):\n return(self.merged_data.groupby(\n 'labels').count().transpose().iloc[0, :])", "def element_size(self):\n vecs = (\n self.nodes[self.elements[:, :4], :][:, 1:, :]\n - self.nodes[self.elements[:, :4], :][:, 0, None, :]\n )\n return np.abs(np.linalg.det(vecs)) / 6", "def gal_count(clusters):\n sum = 0\n for x in clusters:\n sum += x.ngal\n return sum", "def n_clusters(self):\n return self.model.n_clusters", "def nsites(self) -> int:\n return len(self.A)", "def valency(self):\n return len(self.neighbors())", "def n_points(self):\n\n if self.data_reduced:\n return len(self.data_reduced[0])\n else:\n return 0", "def elements_count(self):\n return self.__elements_count", "def getNNodesTot(self):\n nNodesTot = 0\n for iElt in Elements._all:\n nNodesTot += len(iElt.coord)\n return nNodesTot", "def count(self):\n\t\treturn len(list(self.nodes))", "def __len__(self):\n return len(self.dataset) * self.samples_per_pair", "def dissimilarity(clusters):\n totDist = 0\n for c in clusters:\n totDist += c.variability()\n return totDist", "def __len__(self):\n return len(self.centroid_vector)", "def count(self):\n\n if self.cluster:\n return self.cluster.count()\n\n return super().count()", "def get_cluster_sizes(self,sc):\n\n clusterSizes = []\n clusters = sc.labels\n for k in np.arange(clusters.max()):\n clusterSizes.append(len(np.where(clusters==k)[0])) \n \n return clusterSizes", "def n_good_features_(self):\n return np.sum(self.important_features_)", "def GetNumberOfElements(self, assoc):\n result = 0\n for dataset in self:\n result += dataset.GetNumberOfElements(assoc)\n return int(result)", "def n_elements(self) -> int:\n n_elem = np.prod(self.shape)\n if self.n_timesteps > 1:\n n_elem = int(n_elem / self.n_timesteps)\n return n_elem", "def __len__(self):\n return len(np.where(np.logical_not(self.data.mask))[0])", "def count(self):\n return len(self._elements)", "def count_nodes(self):\n\t\treturn self.__count_nodes(self)", "def voxel_count(self):\n return self.cols * self.rows * self.sections", "def size(self):\n\t\treturn len(self.nodes)", "def element_count(self):\n return self._internal.get_element_count()", "def __len__(self):\r\n if self.is_superset:\r\n length = 0\r\n for ds in self.data:\r\n length += len(ds)\r\n return length\r\n else:\r\n return len(self.data)", "def size(self):\n try:\n return len(self._adjacency_list)\n except Exception as error:\n print(f'An error occurred: {error}')", "def size(self) -> Tuple[groupable, pdarray]:\n return self.count()", "def count(self):\r\n return self.data_array.size", "def numnems(self):\n count = 0\n for o in self._objs.values():\n count += len(o.netifs())\n return count", "def N(self):\n return len(self.cavity_grid.cavities) + 1", "def noOfElem(classObj, index):\r\n return len(classObj.dataSet[:, index])", "def __len__(self):\n return np.size(self.A,0)", "def get_nodes_pixel_count(self):\n sum_count = self.pixel_count\n for i in range(8):\n node = self.children[i]\n if node:\n sum_count += node.pixel_count\n return sum_count", "def getNumElements(self):\n return 0", "def __len__(self) -> float:\n return len(self.elements)", "def get_num_nodes(self):\n\n return sum(self.topology)", "def getNbClusters( model):\r\n\r\n\tlabels = model.labels_\r\n\tlabelValues = []\r\n\tfor label in labels:\r\n\t\tif label not in labelValues and label != -1: labelValues.append(label)\r\n\tnbClusters = len( labelValues)\r\n\treturn nbClusters", "def getNumElements(self):\n return 1", "def number_of_electrodes(self):\n return self._pre_kernel.shape[1]", "def n_thres(self):\n return np.size(self.thres)", "def get_num_instances(im, non_building_labels):\n return np.setdiff1d(im, non_building_labels)", "def __len__(self):\n return len(self.train) + len(self.val) + len(self.test)", "def num_complementary_regions(self):\n g = self._get_puncturefinder_graph()\n # return g.connected_components_number()\n return nx.number_connected_components(g)", "def dimension(self):\r\n a = 0\r\n for x in self.faces():\r\n if (len(x) > a):\r\n a = len(x) \r\n return a-1", "def n_cs(self):\n return np.size(self._cs, 0)", "def get_num_features(self):\r\n \r\n return len(self[0]['x'])", "def Nnodes(self):\n return len(self.nodes)", "def size(self):\n return len(self._adjacency_list)", "def nspatials(self):\n return int(len(self)/2)", "def size(self):\n return len(self.edges())", "def size(self):\n return sum(elem.size for elem in self)", "def number_of_data_nodes(self):\n return int(self._data['number_of_data_nodes'])", "def num_actual_nodes(tree):\n return (tree.n_node_samples > 0).sum()", "def getNumElements(self):\n return 1", "def purity_score(clusters, classes):\n clusters = np.array(clusters)\n classes = np.array(classes)\n A = np.c_[(clusters,classes)]\n\n n_accurate = 0.\n\n for j in np.unique(A[:,0]):\n z = A[A[:,0] == j, 1]\n x = np.argmax(np.bincount(z))\n n_accurate += len(z[z == x])\n\n return n_accurate / A.shape[0]", "def NodesCount(self):\n return len(self.nodes)", "def __len__(self) -> int:\n return len(self.nodes)", "def cluster_counter(self):\n return Counter(self.model.labels_.tolist())", "def dim(self):\n if self._classifier is None:\n with self:\n return self._classifier.features_dim\n\n return self._classifier.features_dim", "def __len__(self):\n\n if self.is_finite_set:\n size = 0\n for set in self.sets:\n size += len(set)\n return size\n else:\n raise ValueError(\"'%s' is not a finite set.\" % self)", "def return_num_edges(self):\n return sum(map(lambda x: len(x),self.__adj))", "def _num_nodes(self):\n return len(self._nid2partid)", "def num_links(self):\n count=0.0\n for cluster in self.clusters:\n if self.clusters[cluster] == self.clusters[cluster].antecessor:\n numberofmembers=self.clusters[cluster].number_of_members\n count+=numberofmembers\n return count", "def get_length(self):\n if self.opt.num_buckets > 1:\n return sum([len(bucket) for bucket in self.data])\n else:\n return len(self.data)", "def get_total_item_size(dataset):\n total_items = 0\n for element in dataset:\n total_items += 1\n return total_items", "def get_nb_element_per_dimension(recipe):\n return len(recipe[\"r\"]), len(recipe[\"c\"]), len(recipe[\"z\"])", "def __len__(self) -> int:\r\n return len(self._nodes)", "def __len__(self):\n return self._dataset.size(dirs=self._dirs)", "def get_node_count(self) -> Iterable:\n return len([i for i in self.all_nodes_as_iterable()])", "def element_count(self):\n return len(self.elements) + len(self.virtual_elements)", "def num_nodes(self):\n return len(self.successors)", "def num_nodes(self):\n return len(self.nodes)", "def size(self):\r\n return len(self._train_datas)", "def essential_node_count(self) -> int:\n return sum(\n 1 for n in self.graph.nodes() if n.kind() not in self._EXCLUDED_NODE_KINDS\n )", "def size(self):\n return len(self._adjacency_list.keys())", "def n_cf(self):\n return np.size(self._ref_ii, 0)", "def interval_cardinality(self):\n return len(list(self.lower_contained_intervals()))", "def size(self):\n\n return len(self._adjacency_list)", "def get_number_of_atoms_to_optimize(self):\n v = self.c.get(simulation_cell=True)\n return len(v.data.stoichiometry)", "def data_size(self) -> int:\n return len(self.__labels)", "def numpoints(self):\n return len(self.pars) + 1 # so dof is 1", "def calc_Nw(cluster_labels):\n\n cluster_labels = np.array(cluster_labels)\n labels_set = set(cluster_labels)\n n_labels = len(labels_set)\n\n Nw = []\n for label in labels_set:\n n_examples = np.sum(np.where(cluster_labels == label, 1, 0))\n n_cluster_pairs = n_examples * (n_examples - 1) / 2 # Combinations\n Nw.append(n_cluster_pairs)\n\n return int(np.sum(Nw))", "def numNodes(self):\n res = 0\n for n in self.iternodes():\n res += 1\n return res", "def __len__(self):\n if self._train:\n return len(self._train_data)\n return len(self._test_data)", "def __len__(self):\n if self._train:\n return len(self._train_data)\n return len(self._test_data)", "def __len__(self):\n\n return len(self.labels)", "def count_unique_features(self):\n return N_UNIQUE_FEATS", "def _N(self):\n return len(self._array)", "def size(self):\n ret = 0\n for ii in self.__data:\n ret += int(ii.get_size())\n return ret", "def __len__(self):\n return self.count_of(CUBA.NODE)", "def countEdges(self):\n return numpy.count_nonzero(self.supportArray) / 2", "def num_nodes(self):\n return ((len(self.tensor_u)+1) * (len(self.tensor_v)+1) *\n (len(self.tensor_w)+1))", "def __len__(self) -> int:\n return len(self.__elements)", "def length(self):\n # TODO: Count number of key-value entries in each of the buckets\n return self.size\n # for bucket in self.buckets():" ]
[ "0.7475261", "0.73660713", "0.7281601", "0.7209829", "0.7184535", "0.71797055", "0.71275187", "0.7005667", "0.6999668", "0.69551873", "0.68624055", "0.68511283", "0.67983764", "0.6792057", "0.6780994", "0.6774845", "0.6771099", "0.66886616", "0.66632557", "0.66320395", "0.66132414", "0.6564767", "0.6555667", "0.6509118", "0.6487408", "0.64816576", "0.64807916", "0.6468132", "0.64678425", "0.64630216", "0.64625275", "0.6456934", "0.6443169", "0.6433817", "0.64320695", "0.6430267", "0.6425387", "0.64124244", "0.64114904", "0.64039147", "0.6394186", "0.6386561", "0.6381392", "0.6377842", "0.63628834", "0.63565457", "0.635478", "0.6352624", "0.6344664", "0.6340512", "0.63376904", "0.6329972", "0.6325249", "0.6319735", "0.6308754", "0.63025564", "0.6300754", "0.62910634", "0.629054", "0.62886375", "0.62876904", "0.62873334", "0.62842995", "0.6280591", "0.62802994", "0.62797505", "0.6278884", "0.62764573", "0.62726575", "0.62720245", "0.6270812", "0.6267656", "0.6264363", "0.62582725", "0.62497246", "0.6246999", "0.6246279", "0.6238815", "0.62386554", "0.6237936", "0.62348354", "0.6232878", "0.6229474", "0.6227602", "0.6226656", "0.622173", "0.62187964", "0.620858", "0.62078416", "0.620067", "0.620067", "0.61803454", "0.6176319", "0.6169932", "0.61662686", "0.61651", "0.6163326", "0.61632836", "0.61628973", "0.6160111" ]
0.74841356
0
Returns the percentage of elements of the clustering that are in the 4 bigger clusters.
def analysis_function_top_4(self,clustering): clustering.sort_clusters_by_size() total = 0 percents = clustering.get_population_percent_of_n_bigger_clusters(4) for p in percents: total = total+p return total
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dissimilarity(clusters):\n totDist = 0\n for c in clusters:\n totDist += c.variability()\n return totDist", "def purity_score(clusters, classes):\n clusters = np.array(clusters)\n classes = np.array(classes)\n A = np.c_[(clusters,classes)]\n\n n_accurate = 0.\n\n for j in np.unique(A[:,0]):\n z = A[A[:,0] == j, 1]\n x = np.argmax(np.bincount(z))\n n_accurate += len(z[z == x])\n\n return n_accurate / A.shape[0]", "def analysis_function_num_clusters_to_percent(self,clustering,percent):\n return clustering.number_of_clusters_to_get_percent(percent)", "def purity(clusters, classes):\n\n d = defaultdict(list)\n\n # Get a list of class numbers of all examples in a cluster.\n for k, v in zip(clusters, classes):\n d[k].append(v)\n\n mayority = 0\n\n # Count the mayority class number and add it up over all clusters.\n for k in d:\n mayority += Counter(d[k]).most_common(1)[0][1]\n\n return float(mayority) / len(clusters)", "def gal_count(clusters):\n sum = 0\n for x in clusters:\n sum += x.ngal\n return sum", "def test_clusters(trained_data, centroids):\n\n for c in range(len(centroids)):\n count_1 = 0\n count_0 = 0\n for p in range(len(trained_data)):\n if trained_data[p][-2] == 0 and trained_data[p][-1] == centroids[c]:\n count_0 += 1\n if trained_data[p][-2] == 1 and trained_data[p][-1] == centroids[c]:\n count_1 += 1\n print (\"Centroid \", c+1, \":\", centroids[c])\n print(\"Number of 1's: \", count_1)\n print(\"Number of 0's: \", count_0)\n print(\"Percent 1's: \", round((count_1/(count_1 + count_0))*100,2))\n print(\"Percent 0's: \", round((count_0 / (count_1 + count_0)) * 100,2))\n print(\"****************\")", "def _calculate_cluster_measures(\n arr4d,\n threshold,\n bin_struct,\n two_sided_test=False,\n):\n n_regressors = arr4d.shape[3]\n\n max_sizes = np.zeros(n_regressors, int)\n max_masses = np.zeros(n_regressors, float)\n\n for i_regressor in range(n_regressors):\n arr3d = arr4d[..., i_regressor].copy()\n\n if two_sided_test:\n arr3d[np.abs(arr3d) <= threshold] = 0\n else:\n arr3d[arr3d <= threshold] = 0\n\n labeled_arr3d, _ = label(arr3d > 0, bin_struct)\n\n if two_sided_test:\n # Label positive and negative clusters separately\n n_positive_clusters = np.max(labeled_arr3d)\n temp_labeled_arr3d, _ = label(\n arr3d < 0,\n bin_struct,\n )\n temp_labeled_arr3d[temp_labeled_arr3d > 0] += n_positive_clusters\n labeled_arr3d = labeled_arr3d + temp_labeled_arr3d\n del temp_labeled_arr3d\n\n clust_vals, clust_sizes = np.unique(labeled_arr3d, return_counts=True)\n assert clust_vals[0] == 0\n\n clust_vals = clust_vals[1:] # First cluster is zeros in matrix\n clust_sizes = clust_sizes[1:]\n\n # Cluster mass-based inference\n max_mass = 0\n for unique_val in clust_vals:\n ss_vals = np.abs(arr3d[labeled_arr3d == unique_val]) - threshold\n max_mass = np.maximum(max_mass, np.sum(ss_vals))\n\n # Cluster size-based inference\n max_size = 0\n if clust_sizes.size:\n max_size = np.max(clust_sizes)\n\n max_sizes[i_regressor], max_masses[i_regressor] = max_size, max_mass\n\n return max_sizes, max_masses", "def get_clust_num_perc(model, vis_perc=0.9):\n\tnc = len(np.where(model.allocmodel.Nk > 0)[0])\n\tidx = np.argsort(-model.allocmodel.Nk)[0:nc]\n\n\ttot = model.allocmodel.Nk[idx].sum()\n\tcursum = 0\n\ti = 0\n\twhile cursum < tot*vis_perc:\n\t cursum += model.allocmodel.Nk[idx][i]\n\t i+=1\n\n\treturn i", "def compute_cluster_class_fractions(k_means_model, y):\n\n n_classes = y.shape[1]\n class_labels = utils.one_hot_to_index(y)\n cluster_labels = k_means_model.labels_\n\n class_clustroid_counts = np.zeros((n_classes, K))\n for i in range(len(class_labels)):\n class_clustroid_counts[class_labels[i], cluster_labels[i]] += 1\n\n class_clustroid_fractions = class_clustroid_counts / np.sum(class_clustroid_counts, axis=1).reshape(n_classes, 1)\n\n print(\"\\n---- Class Clustroid Distribution ----\")\n for i in range(n_classes):\n print(\"Class {}: {}\".format(i, class_clustroid_fractions[i, :]))", "def calculate_cluster_size(result, var):\n \n cluster_results=pd.DataFrame(result[var].value_counts())\n ratio=np.round(cluster_results/cluster_results.sum()*100, 2).rename(columns={var:\"ratio\"})\n return cluster_results.join(ratio)", "def clust_strength(mat,groups):\n cluster_strengths = []\n for group in range(len(np.unique(groups))):\n this_cluster = mat[groups==group,:]\n this_cluster_mean = np.mean(this_cluster,axis=0)\n all_dists = mat - this_cluster_mean\n out_dists = np.linalg.norm(all_dists[groups!=group],axis=1)\n in_dists = np.linalg.norm(all_dists[groups==group],axis=1)\n this_strength = np.mean(out_dists)/np.mean(in_dists)\n cluster_strengths.append(this_strength)\n \n return np.mean(cluster_strengths)", "def clusterAlgorithm(values):\n clusterMap = dict()\n for value in values:\n if value[2] not in clusterMap.keys():\n clusterMap[value[2]] = []\n clusterMap[value[2]].append(value)\n frequency = [float(len(clusterMap[value[2]])) for value in values]\n total = sum(frequency)\n weightValues = [freq / total for freq in frequency]\n print sum(weightValues)\n lightValues = [value[1] for value in values]\n return np.average(lightValues, weights = weightValues)", "def get_susceptibility(clusters):\n\n # If there is no or only one cluster then there is no finite cluster\n if len(clusters) <= 1:\n return np.nan\n\n # Remove largest, i.e. infinite, cluster\n clusters.remove(max(clusters))\n\n sizes = np.array(list(set(clusters)))\n n_s = []\n\n for size in sizes:\n n_s.append(clusters.count(size))\n\n temp = sizes * n_s\n S = np.sum(sizes * temp) / np.sum(temp)\n\n return S", "def clusterSize(l, scheme, clustertype='fluid'):\n clist = findClusters(l, scheme, clustertype)\n \n avglists=[]\n for i in clist:\n avglist=[]\n for l in i:\n avglist.append(np.mean(l))\n avglists.append(np.mean(avglist))\n return avglists", "def assign_labels_to_centroids(self):\n labelled_centroids = []\n for i in range(len(self.final_clusters)):\n labels = map(lambda x: x[0], self.final_clusters[i])\n # pick the most common label\n most_common = Counter(labels).most_common(1)[0][0]\n c = np.round(len([item for item in self.final_clusters[i] if item[0]==1])/len(self.final_clusters[i]),2)\n if c>=0.46:\n most_common = 1.0\n centroid = (most_common, self.final_centroids[i])\n labelled_centroids.append(centroid)\n\n self.labelled_centroids = labelled_centroids\n print(\"cluster_0: \", np.round(len([item for item in self.final_clusters[0] if item[0]==1])/len(self.final_clusters[0]),2), \"size_0: \", len(self.final_clusters[0]))\n print(\"cluster_1: \", np.round(len([item for item in self.final_clusters[1] if item[0]==1])/len(self.final_clusters[1]),2), \"size_1: \", len(self.final_clusters[1]))\n #print(\"cluster_2: \", np.round(len([item for item in self.final_clusters[2] if item[0]==1])/len(self.final_clusters[2]),2), \"size_2: \", len(self.final_clusters[2]))\n #print(\"cluster_3: \", np.round(len([item for item in self.final_clusters[3] if item[0]==1])/len(self.final_clusters[3]),2), \"size_2: \", len(self.final_clusters[3]))", "def evaluateClusters( features, labels):\r\n\r\n\treturn silhouette_score( features, labels)", "def compute_total_bipartiteness(hypergraph, clusters):\n bipartiteness_sum = 0\n for i in range(len(clusters)):\n for j in range(i + 1, len(clusters)):\n bipartiteness_sum += hypergraph_bipartiteness(hypergraph, clusters[i], clusters[j])\n return bipartiteness_sum", "def davies_bouldin_score(self):\r\n print(colored(\"The davies bouldin score of the clustering is %0.002f\\n\" %(davies_bouldin_score(self.X, self.labels)),color = 'red', attrs=['bold']))\r\n print()\r\n print(colored(\"The points in each cluster are : \",color = 'yellow', attrs=['bold']))\r\n print(collections.Counter(self.labels))", "def get_optimal_clusters(cell,threshold=140):\n\n\t#\tTurn image to numpy array\n\tpic = image_to_matrix(cell)\n\n\t#\tGet the array of coordinates of dark dots\n\tdots = get_threshold_dots(pic,threshold)\n\n\tscores = []\n\n\tfor n_clusters in range(1,10):\n\t\tclusters = kmeans.kmeans(pic,pic.shape[0],pic.shape[1],50,n_clusters,threshold)\n\t\tprint clusters\n\n\t\tsquare_sum_array = [0]*n_clusters\n\t\tcount_array = [0]*n_clusters\n\n\t\tfor dot in dots:\n\t\t\tdistance_array = [kmeans.euclid_distance(dot,cluster) for cluster in clusters]\n\t\t\tmin_index = distance_array.index(min(distance_array))\n\t\t\tsquare_sum_array[min_index] += kmeans.euclid_distance(clusters[min_index],dot)\n\t\t\tcount_array[min_index] += 1\n\n\t\tvariances = [square_sum/(count+0.001) for square_sum, count in zip(square_sum_array,count_array)]\n\n\t\tprint variances\n\t\tscores.append(sum(variances)/len(variances))\n\n\treturn scores", "def diversion_score(X, offspring_list):\r\n similarity_sum = 0\r\n if len(offspring_list[0]) == 2:\r\n offspring_list = [(parent_a, offspring, parent_a) for (parent_a, offspring) in offspring_list]\r\n for (parent_a, offspring, parent_b) in offspring_list:\r\n similarity_sum += max(icc(parent_a, offspring), icc(parent_b, offspring))\r\n return (1 - (((similarity_sum / len(offspring_list)) + 1) / 2)) * 100 # move from [-1,1] to [0,2], then to [0,1], then inverse, finally move to [0,100]\r", "def ds_ratio(group):\n nix_count = (group=='nix').sum()\n top_count = (group=='top').sum()\n ratio = nix_count/(nix_count+top_count) #could smooth this\n return ratio", "def percenter(rank, max_rank):\n\treturn 100 * (rank/(max_rank or 1))", "def analysis_function_top_percent(self,clustering):\n clustering.sort_clusters_by_size()\n return clustering.get_population_percent_of_cluster(0)", "def calculate_priors(trainingLabels):\r\n sum = 0\r\n priors = {}\r\n totalSamples = len(trainingLabels)\r\n classes = set(trainingLabels)\r\n for cls in classes:\r\n numCls = len(filter(lambda x: x == cls, trainingLabels))\r\n sum += numCls\r\n priors[cls] = float(numCls) / float(totalSamples)\r\n \r\n # Sanity check: valid partitioning\r\n assert(sum == totalSamples)\r\n\r\n return priors", "def cluster_cal(self):\n self.Cluster = []\n for i in range(self.nodenum):\n neighborhood_node = self.neighbor_node(i)\n Node_num = len(neighborhood_node)\n Count = self.neighbor_edge(neighborhood_node)\n if(Node_num == 0 or Node_num == 1):\n self.Cluster.append(0.5)\n else:\n self.Cluster.append(Count/(Node_num*(Node_num - 1)))\n \n self.cluster_coeff = np.average(self.Cluster)", "def get_cluster_to_split(clusters):\n\treturn max(clusters.items(), key=lambda x: x[1].get_distortion())[1]", "def avg_sil_width(cluster_vec, centroid):\n tot_dis = 0\n for i in xrange(len(cluster_vec)):\n cos_dis = cos_sim(cluster_vec[i], centroid)\n tot_dis = tot_dis + cos_dis\n return tot_dis/len(cluster_vec)", "def contains_percentage_of(self, other: 'Interval') -> float:\n if other.length == 0:\n return other.a in self\n intersection = Interval.intersection([self, other])\n return intersection.length / other.length if intersection else 0.0", "def cluster_mcc_ratio(result, cluster_names, var, n=5):\n rel1=get_cluster_country_distr(result, var)\n clusters=calculate_cluster_size(result, var)\n hours=hours_tusc(result, var)\n res=\"\"\n for i in zip(clusters.index, cluster_names[:len(clusters)]):\n res=res+f\"By the number of unique visitors the {i[1]} cluster's top 5 countries are; \"\n rel=rel1.sort_values(i[0],ascending=False)[:n]\n for j in range(0,5):\n if j!=n-1:\n res=res+f'{rel[i[0]].index[j]} ({rel[i[0]][j]}%), '\n else:\n res=res+f'and {rel[i[0]].index[j]} ({rel[i[0]][j]}%). '\n res=res+f'This cluster spends on average {int(hours.hrs_in_tusc[i[0]])} days in Tuscany, '\n res=res+get_places_at_least4_hours(result, i[0], var)\n res=res+ cluster_airport_result(result, i[0], var)\n return res", "def compute_clusters(self, p: float):\n pass", "def get_utilization(self):\n child_prefixes = Prefix.objects.filter(prefix__net_contained_or_equal=str(self.prefix))\n # Remove overlapping prefixes from list of children\n networks = cidr_merge([c.prefix for c in child_prefixes])\n children_size = float(0)\n for p in networks:\n children_size += p.size\n return int(children_size / self.prefix.size * 100)", "def calculate_cost(self, medoids, clusters):\n cost = 0.0\n for i in range(0, len(medoids)):\n for j in range(0, len(clusters[i])):\n cost += distance.sqeuclidean(medoids[i], clusters[i][j])\n return cost\n pass", "def average_consensus(self, cluster):\n\t\tcenterk = 0\n\t\tindex = 0\n\t\tfor value in cluster:\n\t\t\tcenterk += value\n\t\t\tindex += 1\n\t\tcenterk = centerk / index\n\t\treturn centerk", "def analysis_function_total_elements(self,clustering):\n return clustering.total_number_of_elements", "def calculate_sample_silhouette(self):\n sum_samples = 0\n for cluster in self.cluster_lst:\n sum_samples += self.sum_silhouette(cluster)\n sample_size = len(self.samples)\n return sum_samples/sample_size", "def get_modularity(adjacency, clusters):\n total_weight = np.sum(adjacency)\n e = get_clusters_adjacencies(adjacency, clusters)\n e = e / total_weight\n a = np.sum(e, axis=1)\n return np.sum(e.diagonal() - np.power(a, 2))", "def _assign_clusters(self):\n\n dist = np.zeros((self.k, ))\n distortion = 0\n\n for index in range(0, self.data.shape[0]):\n for i in range(0, self.k):\n dist[i] = np.linalg.norm(self.data[index] - self.centroids[i])\n\n self.assigned_clusters[index] = np.argmin(dist)\n distortion += np.min(dist)\n\n return distortion", "def get_modularity3(adjacency, clusters):\n\n rows, cols = adjacency.shape\n num_ids = adjacency.shape[0]\n id_to_cluster = get_id_to_cluster(clusters, num_ids)\n degrees = np.sum(adjacency, axis=1)\n total_weight = np.sum(adjacency)\n sum = 0\n for i in range(rows):\n for j in range(cols):\n if id_to_cluster[i] == id_to_cluster[j]:\n sum += adjacency[i, j] - (degrees[i] * degrees[j]) / total_weight\n sum = sum / total_weight\n return sum", "def compute_variance(\n data, cluster_labels, centroids, alpha=10, debug=False, num_class=None\n):\n\n k = len(centroids) if num_class is None else num_class\n phis = torch.zeros(k)\n for c in range(k):\n cluster_points = data[cluster_labels == c]\n c_len = len(cluster_points)\n if c_len == 0:\n phis[c] = -1\n elif c_len == 1:\n phis[c] = 0.05\n else:\n phis[c] = torch.sum(torch.norm(cluster_points - centroids[c], dim=1)) / (\n c_len * np.log(c_len + alpha)\n )\n if phis[c] < 0.05:\n phis[c] = 0.05\n\n if debug:\n print(\"size-phi:\", end=\" \")\n for i in range(k):\n size = (cluster_labels == i).sum().item()\n print(f\"{size}[phi={phis[i].item():.3f}]\", end=\", \")\n print(\"\\n\")\n\n return phis", "def cluster_membership_occupancy(data):\n \n \n \n n_clusters = len(set(data['clusters'])-{-1}) # since -1 element denotes noice\n\n if n_clusters == 0:\n membership=[Cluster_Membership_Features()]\n membership = pd.DataFrame([o.__dict__ for o in membership])\n areas=[Cluster_Area_Features()]\n areas = pd.DataFrame([o.__dict__ for o in areas])\n density=[Cluster_Density_Features()]\n density = pd.DataFrame([o.__dict__ for o in density])\n all_features = pd.concat([membership.reset_index(drop=True), areas.reset_index(drop=True),\n density], axis=1)\n \n elif n_clusters ==1:\n #obtain_total_cluster_areas_set_everything_else_to_default\n membership=[Cluster_Membership_Features()]\n membership = pd.DataFrame([o.__dict__ for o in membership])\n d = dict(tuple(data.groupby('clusters')))\n d.pop(-1, None)\n \n try:\n cluster_chull_areas=[ss.ConvexHull(np.column_stack([d[i]['X'].array,d[i]['Y'].array])).volume for i in d.keys()]\n except:\n cluster_chull_areas=[0,0,0]\n \n Total_cluster_area=np.sum(cluster_chull_areas)\n areas=[Cluster_Area_Features([Total_cluster_area,0,0,0,0,0,0,0,0])]\n areas = pd.DataFrame([o.__dict__ for o in areas])\n density=[Cluster_Density_Features()]\n density = pd.DataFrame([o.__dict__ for o in density])\n all_features = pd.concat([membership.reset_index(drop=True), areas.reset_index(drop=True),\n density], axis=1)\n \n elif n_clusters >1:\n #Summarizing the cluster membership distribution characteristics\n cluster_size_nums=np.delete(np.array(data.groupby(['clusters']).size()),0)\n (cluster_size_nums_avg,cluster_size_nums_min,cluster_size_nums_max,\n cluster_size_nums_std,cluster_size_nums_cv,cluster_size_nums_cd,\n cluster_size_nums_IQR,cluster_size_nums_Quartile_CD)= distribution_statistics(cluster_size_nums)\n\n #For each cluster calculate the area by calculating the area of the convex hull of cluster members\n # Note: concavehull implementation here might be a good addition as it will provide more imformative values. \n\n d = dict(tuple(data.groupby('clusters')))\n d.pop(-1, None)\n try:\n cluster_chull_areas=[ss.ConvexHull(np.column_stack([d[i]['X'].array,d[i]['Y'].array])).volume for i in d.keys()]\n except:\n cluster_chull_areas=[0,0,0,0,0]\n \n\n (avg_cluster_area,min_cluster_area,max_cluster_area,\n std_cluster_area,CV_cluster_area,CD_cluster_area,\n IQR_cluster_area,Quartile_CD_cluster_area)= distribution_statistics(cluster_chull_areas)\n Total_cluster_area=np.sum(cluster_chull_areas)\n\n #Calculate cluster density: number of nuclei/ convex area of cluster\n cluster_density=np.divide(cluster_size_nums,cluster_chull_areas)\n (avg_cluster_density,min_cluster_density,max_cluster_density,\n std_cluster_density,CV_cluster_density,CD_cluster_density,\n IQR_cluster_density,Quartile_CD_cluster_density)= distribution_statistics(cluster_density)\n\n #return dataframe of features\n membership=[Cluster_Membership_Features([cluster_size_nums_avg,cluster_size_nums_min,cluster_size_nums_max,\n cluster_size_nums_std,cluster_size_nums_cv,cluster_size_nums_cd,\n cluster_size_nums_IQR,cluster_size_nums_Quartile_CD])]\n membership = pd.DataFrame([o.__dict__ for o in membership])\n areas=[Cluster_Area_Features([Total_cluster_area,\n avg_cluster_area,min_cluster_area,max_cluster_area,\n std_cluster_area,CV_cluster_area,CD_cluster_area,\n IQR_cluster_area,Quartile_CD_cluster_area])]\n areas = pd.DataFrame([o.__dict__ for o in areas])\n density=[Cluster_Density_Features([avg_cluster_density,min_cluster_density,max_cluster_density,\n std_cluster_density,CV_cluster_density,CD_cluster_density,\n IQR_cluster_density,Quartile_CD_cluster_density])]\n density = pd.DataFrame([o.__dict__ for o in density])\n\n all_features = pd.concat([membership.reset_index(drop=True), areas.reset_index(drop=True),\n density], axis=1)\n return all_features", "def get_cluster_sizes(self,sc):\n\n clusterSizes = []\n clusters = sc.labels\n for k in np.arange(clusters.max()):\n clusterSizes.append(len(np.where(clusters==k)[0])) \n \n return clusterSizes", "def calc_priors(categories, data):\n counts = np.zeros(categories)\n for val in range(categories):\n counts[val] = np.count_nonzero(data.labels == val)\n return counts / len(data.labels)", "def calc_Nw(cluster_labels):\n\n cluster_labels = np.array(cluster_labels)\n labels_set = set(cluster_labels)\n n_labels = len(labels_set)\n\n Nw = []\n for label in labels_set:\n n_examples = np.sum(np.where(cluster_labels == label, 1, 0))\n n_cluster_pairs = n_examples * (n_examples - 1) / 2 # Combinations\n Nw.append(n_cluster_pairs)\n\n return int(np.sum(Nw))", "def test_determine_k(self):\n test_dir_name = os.path.dirname(__file__)\n feat_array_fn = os.path.join(\n test_dir_name, \"data\", \"four_clusters.csv\")\n df = pd.read_csv(feat_array_fn)\n feat_array = df[[\"x\", \"y\"]].values\n\n clusterer = Clusterer(feat_array_fn, \"/dev/null\", [])\n best_k = clusterer._determine_k(feat_array, 9)\n\n self.assertEqual(best_k, 4)\n\n feat_array_fn = os.path.join(\n test_dir_name, \"data\", \"iris.csv\")\n df = pd.read_csv(feat_array_fn)\n feat_array = df[[\n \"Sepal.Length\", \"Sepal.Width\", \"Petal.Length\",\n \"Petal.Width\"]].values\n\n clusterer = Clusterer(feat_array_fn, \"/dev/null\", [])\n best_k = clusterer._determine_k(feat_array, 9)\n\n self.assertEqual(best_k, 2)", "def group_size_dist(self):\n return self.group_sizes() / self.group_sizes().sum()", "def purity_score(label, pred):\n \n df = pd.concat([label, pd.DataFrame(pred)], axis=1)\n df.set_axis(['label', 'pred'], axis=1, inplace=True)\n \n s = 0\n\n for x, cluster in df.groupby('pred'):\n s += cluster['label'].value_counts().iloc[0] # adding the most occuring class in a cluster\n\n return s / label.shape[0]", "def d50(clones, num_Reads): \n\n\n d50_amount = num_Reads/2\n read_count=0\n for i in clones:\n read_count+=clones[i].num_reads\n if read_count>=d50_amount:\n return i/float(len(clones))", "def fraction_mislabeled_nodes(labels, labels_pred):\n G1 = partition_indicator(labels)\n G2 = partition_indicator(labels_pred)\n\n # cost is minimized, overlap maximized\n cost_matrix = -G1.T.dot(G2)\n row_ind, col_ind = linear_sum_assignment(cost_matrix.A)\n cost = -cost_matrix[row_ind, col_ind].sum()\n\n return 1 - (cost / len(labels))", "def calculate_enrichment_factor_optimal(molecules, ranked_dataset_percentage_cutoff, pic50_cutoff):\n\n ratio = sum(molecules[\"pIC50\"] >= pic50_cutoff) / len(molecules) * 100\n if ranked_dataset_percentage_cutoff <= ratio:\n enrichment_factor_optimal = round(100 / ratio * ranked_dataset_percentage_cutoff, 1)\n else:\n enrichment_factor_optimal = 100.0\n return enrichment_factor_optimal", "def leaf_nodes_to_search_percent(self) -> Optional[int]:\n return pulumi.get(self, \"leaf_nodes_to_search_percent\")", "def _calculate_cluster_probs(self, dist_mat, temperature):\n dist_mat = np.square(dist_mat) # euclidean distance -> squared euclidean distance\n n_samples = dist_mat.shape[0]\n n_clusters = dist_mat.shape[1]\n cluster_probs = np.zeros(shape=(n_samples, n_clusters))\n for i in range(n_clusters):\n for j in range(n_samples):\n normalizer = sum(self.marginal_probs[k] * np.exp(-dist_mat[j, k] / temperature) for k in range(n_clusters))\n cluster_probs[j, i] = self.marginal_probs[i] * np.exp(-dist_mat[j, i] / temperature) / normalizer\n return cluster_probs", "def berger_parker_d(counts):\n return counts.max()/float(counts.sum())", "def solidity(cnt):\n\tarea = cv2.contourArea(cnt)\n\thull = cv2.convexHull(cnt)\n\thull_area = cv2.contourArea(hull)\n\treturn float(area) / hull_area", "def find_avg(centroids, short_cut=False, sim_scores=None):\n \n total_sim = 0.0\n total_comparisons = 0\n \n if short_cut:\n total_comparisons = len(sim_scores)\n \n for score in sim_scores:\n total_sim += score\n \n return (total_sim / total_comparisons)\n\n length = len(centroids)\n\n for i in xrange(0, length):\n for j in xrange(i + 1, length):\n total_sim += similarity(centroids[i], centroids[j])\n total_comparisons += 1\n\n return (total_sim / total_comparisons)", "def evaluate_clusters(self, cluster_formulas, value='weighted_sum'):\n num_elems = len(self.labels)\n total_val = {}\n num_cl = len(cluster_formulas)\n clustered_points_num = 0\n print(\"\\n\\n\")\n print(\"Sufficiently big clusters: {}\".format(num_cl))\n for c, formula, val in cluster_formulas:\n c_size = len([l for l in self.labels if l == c])\n clustered_points_num += c_size\n\n if value == 'weighted_sum':\n total_val[c] = val * c_size / num_elems\n elif value == 'sum':\n total_val[c] = val * 1\n\n clust_val = sum(total_val.values())\n self.clustering_value = total_val\n print(\"Value of clustering: {}\".format(clust_val))\n return clust_val", "def calculate_cluster_fitness(self, cluster_id: ObjectId):\n\n genomes = self.genome_repository.get_genomes_in_cluster(cluster_id)\n\n cluster_fitness = 0\n\n for genome in genomes:\n cluster_fitness += genome.fitness\n if cluster_fitness == 0:\n return 0\n\n return cluster_fitness / len(list(genomes))", "def element_size(self):\n vecs = (\n self.nodes[self.elements[:, :4], :][:, 1:, :]\n - self.nodes[self.elements[:, :4], :][:, 0, None, :]\n )\n return np.abs(np.linalg.det(vecs)) / 6", "def calculate_cluster_silhouette(self, cluster):\n cluster_size = len(cluster.get_samples())\n return self.sum_silhouette(cluster) / cluster_size", "def clustering_coefficient(graph):\r\n count = 0\r\n sumOfClusteringCoefficients = 0\r\n for vertex in graph:\r\n count += 1\r\n sumOfClusteringCoefficients += local_clustering_coefficient(graph, vertex)\r\n return sumOfClusteringCoefficients / count", "def _cluster_hitprobability(self, x, y):\n hm_count = np.zeros_like(y).astype(float)\n hm = np.zeros_like(y).astype(float)\n #skf = StratifiedShuffleSplit(n_splits=self.n_iter, test_size=self.shuffle_test_split, random_state=self.random_state)\n\n ind = self._cluster(x, 35)\n\n for cluster_id in np.unique(ind):\n test = np.argwhere(ind == cluster_id)[:, 0]\n train = np.argwhere(ind != cluster_id)[:, 0]\n #print test\n self.basemodel.fit(x[train, :], y[train], hyperparams_optim=False)\n hm_count[test] += 1.\n hm[test] += (self.basemodel.predict(x[test, :]) == y[test]).astype(float)\n\n proba = hm / hm_count\n if self.verbose:\n # print('H/M count:')\n # print(hm_count)\n print('Proba:')\n print(proba)\n self.basemodel.fit(x, y, hyperparams_optim=False)\n return proba", "def clustering_factor(self):\n return self.unpack_dword(0x2C)", "def cluster_count(self) -> int:\n cluster_count = max(1, round(16**3 * (self.vein.purity / 100.0) / self.cluster_size))\n return self.distribution.scale_cluster_count(cluster_count)", "def estimate_label_proportion(source_loader,target_loader,feat_extract,cuda,n_clusters,cluster_param): \n feat_extract.eval()\n #n_clusters = 3\n from sklearn.cluster import AgglomerativeClustering\n \n \n X_s,y_s = extract_feature(source_loader,feat_extract,cuda) \n X_t,y_t = extract_feature(target_loader,feat_extract,cuda) \n \n \n \n cluster = AgglomerativeClustering(n_clusters=n_clusters,linkage=cluster_param)\n label_t = cluster.fit_predict(X_t)\n #print(np.unique(label_t))\n mean_mat_S, num_in_class_S = extract_prototypes(X_s,y_s,n_clusters)\n mean_mat_T, num_in_class_T = extract_prototypes(X_t,label_t,n_clusters)\n \n \"\"\"\n We assume that prototypes of classes have been transported in some in the feature\n space \n \"\"\"\n \n import ot\n M = ot.dist(mean_mat_S, mean_mat_T)\n M /= M.max()\n \n n_1 = n_clusters\n a = np.ones((n_1,)) / n_1\n b = np.ones((n_1,)) / n_1\n \n \n gamma = ot.emd(a,b,M)\n nb_sample_S = [ np.sum(y_s==i) for i in range(n_clusters) ]\n proportion_T = num_in_class_T/np.sum(num_in_class_T)\n assignement_source_to_target = gamma.argmax(axis=1)\n \n # proportions are arranged directly per class\n proportion_T = proportion_T[assignement_source_to_target]\n print(proportion_T,assignement_source_to_target)\n \n\n return proportion_T,nb_sample_S, assignement_source_to_target", "def heuristic_3_partition(game, player) -> float:\n\n partition_possible_factor = get_partition_possible_factor(game, player)\n\n return float(partition_possible_factor)", "def fitness(individual, n_clusters=3, n_seeds=5):\n\n dataframe = common.scale_dataframe(individual)\n corr = abs(individual.dataframe.corr().iloc[0, 1])\n differences = []\n for seed in range(n_seeds):\n km = KMeans(n_clusters, random_state=seed).fit(dataframe)\n differences.append(silhouette_score(dataframe, km.labels_) - corr)\n\n best = max(differences)\n return best", "def percentage_hapaxes(corpus_parts, corpus):\n percentage_h = []\n count = 0\n dv = divide_corpus(corpus, 10)\n hapax_parts = hapaxes_parts(corpus_parts)\n for x in hapax_parts:\n percentage_h.append(percentage(x, len(dv[count])))\n count += 1\n return percentage_h", "def compute_clusters(self, documents):\n ###TODO\n for d in range(0, len(documents)):\n maxi = 999999999\n for cid in range(0, len(self.means)):\n dist = self.distance(documents[d], self.means[cid], self.norms[cid])\n if dist < maxi:\n maxi = dist\n clust = cid \n self.cluster[d] = clust", "def __bayesian_information_criterion(data, clusters, centers):\n\n scores = [0.0] * len(clusters) # splitting criterion\n dimension = len(data[0])\n\n # estimation of the noise variance in the data set\n sigma = 0.0\n K = len(clusters)\n N = 0.0\n\n for index_cluster in range(0, len(clusters)):\n for index_object in clusters[index_cluster]:\n sigma += (euclidean_distance(data[index_object], centers[index_cluster])) # It works\n\n N += len(clusters[index_cluster])\n\n if (N - K != 0):\n sigma /= (N - K)\n\n # splitting criterion\n for index_cluster in range(0, len(clusters)):\n n = len(clusters[index_cluster])\n\n if (sigma > 0.0):\n scores[index_cluster] = n * math.log(n) - n * math.log(N) - n * math.log(\n 2.0 * np.pi) / 2.0 - n * dimension * math.log(sigma) / 2.0 - (n - K) / 2.0\n\n return sum(scores)", "def calc_sw(X, cluster_labels):\n\n labels = np.array(cluster_labels)\n labels_set = set(cluster_labels)\n n_labels = len(labels_set)\n\n Sw = []\n for label in labels_set:\n # Loop through each cluster and calculate within cluster distance\n pairs = np.where(labels == label)\n pairs_distance = pdist(X[pairs[0]])\n within_cluster_distance = np.sum(pairs_distance, axis=0)\n Sw.append(within_cluster_distance)\n\n return np.sum(Sw)", "def compute_distortion(cluster_list, data_table):\n\tdistortion = 0\n\tfor cluster in cluster_list:\n\t\tdistortion += cluster.cluster_error(data_table)\n\treturn distortion", "def pie_chart_score(self, grouped):\n picked_scenario = self.scenario_dict[\"%d\" % (self.scenario_num-1)]\n distinct_enum_X = self.data_dict[picked_scenario[\"X\"]]['distinct_enum']\n score = 0\n if min(grouped) < 0:\n score = 0\n elif distinct_enum_X == 1:\n score = 0\n elif picked_scenario[\"Agg_func_Y\"] == \"avg\":\n score = 0\n elif distinct_enum_X >= 2 and distinct_enum_X <= 8:\n score += self.calculate_entropy(self.data_dict[picked_scenario[\"Y\"]]) / 8\n elif distinct_enum_X > 8:\n score += 4 * (self.calculate_entropy(self.data_dict[picked_scenario[\"Y\"]])) / distinct_enum_X\n if score > 3:\n score = 3\n return score", "def _distorted_distance(self):\n distance = 0\n for i, pixel in enumerate(self.training_set):\n distance += self._euclid_distance(\n pixel, self.clusters[self.labels[i]], axis=0)\n return distance", "def classProbs(observation, tree, classes):\n res = classify(observation, tree) #res = results\n total = sum(res.values())\n probs = []\n for c in classes:\n if c in res.keys():\n probs.append(float(res[c])/total)\n else:\n probs.append(0)\n return probs", "def hausd95(result, reference, voxelspacing=None, connectivity=1):\n hd1 = __surface_distances(result, reference, voxelspacing, connectivity)\n hd2 = __surface_distances(reference, result, voxelspacing, connectivity)\n hd95 = np.percentile(np.hstack((hd1, hd2)), 95)\n return hd95", "def score(self, dataset: List[Read]) -> int:\n kmers_in_common = 0\n for read in dataset:\n for kmer in read.kmers(self.k):\n if self.filter.contains(kmer):\n kmers_in_common += 1\n return self.filter.num_items_in_filter - kmers_in_common", "def _fraction_latency(self, users_distances):\n\n users_desired_latency = np.array(list(map(lambda a: self.services_desired_latency[a],\n self.users_services)))\n check = users_distances < users_desired_latency\n fraction = np.count_nonzero(check==True) / self.num_of_users\n return fraction", "def SquareClusteringCoefficient(graph):\n coef = np.mean(list(nx.square_clustering(graph).values()))\n return coef", "def percent_accuracy(self, test_set, predicted_values):\r\n\r\n correct = 0\r\n for i in range(len(test_set)):\r\n if test_set[i].classification == predicted_values[i]:\r\n correct += 1\r\n return correct / len(test_set)", "def __cluster_simi(self, i, j):\n sum_ = 0.\n for si in self.__indexclusters[i]:\n for sj in self.__indexclusters[j]:\n simi = self.__sample_simi(si, sj)\n sum_ += simi\n return sum_ / (len(self.__indexclusters[i]) * len(self.__indexclusters[j]))", "def occupation_distribution(data):", "def one_dimension_val_clutering(vals, max_distance=5):\n vals = sorted(vals)\n clusters = []\n for (idx, i) in enumerate(vals):\n cluster = [j for j in vals if abs(j - i) < max_distance]\n clusters.append(cluster)\n clusters = sorted(clusters, key=len, reverse=True)\n cluster = clusters[0]\n if len(cluster) / len(vals) > 0.6 or len(cluster) >= 3:\n return cluster\n else:\n return []", "def check_correctness_statistics(classifier_out, mode, image_type):\n labels = image_type.image_data[mode].labels\n num_correct = 0\n total = len(classifier_out)\n for index, label in classifier_out:\n if labels[index] == label:\n num_correct += 1\n return (num_correct / total) * 100", "def compute_distortion(cluster_list, data_table):\r\n distortion = 0\r\n \r\n for cluster in cluster_list:\r\n distortion += cluster.cluster_error(data_table)\r\n\r\n return distortion", "def get_proportion_of_unique_lemmas(self):\n lemmas = self.blob.words.lemmatize()\n return len(set(lemmas)) / float(len(self.blob.words))", "def FindScale(self):\n\n ## 6 and from the cv code the distance is 6 then we are good\n print(\"TODO: Very hard\")", "def calculate_percent_match(primers,\n seq_count,\n exclude_seq_count=1):\n # Calculate percent of sequences that are 'hit' by each primer\n for n in range(len(primers)):\n # Calculate percent perfect match\n primers[n].percent_match=float(primers[n].match_count/seq_count)\n primers[n].non_specific_percent=\\\n float(primers[n].non_specific_hits/exclude_seq_count)\n \n return primers", "def analysis_function_num_clusters(self,clustering):\n return len(clustering.clusters)", "def variance_ratio_criterion(cls, cluster_labels, pca_reduced, cluster_size, names: list):\n score = metrics.calinski_harabaz_score(pca_reduced, cluster_labels)\n return ClusterMetricScore('calinski harabaz', score, cluster_size, cluster_labels, names)", "def getBeliefDistribution(self):\n # This essentially gives a point to a location for each particle there, then \n # normalizes the point values so they add up to 1.\n dist = util.Counter()\n for part in self.particles: dist[part] += 1\n dist.normalize()\n return dist", "def good_clusters(block_path, cutoff=CLUSTER_ACCURACY_CUTOFF):\n accuracies, cluster_accuracies = morphs.load.cluster_accuracies()\n block_cluster_accuracies = cluster_accuracies[block_path]\n return block_cluster_accuracies[\n block_cluster_accuracies.accuracy > cutoff\n ].index.values", "def calc_verts_median(verts):\n return ft.reduce(operator.add, [v.co for v in verts]) / len(verts)", "def overlap_score(labels, labels_pred):\n raw_overlap = 1-fraction_mislabeled_nodes(labels, labels_pred)\n partition_true = np.array(labels).astype(int)\n partition_pred = np.array(labels_pred).astype(int)\n num_nodes = partition_pred.size\n num_groups = partition_true.max() + 1\n\n chance_level = 0.\n for i in range(num_groups):\n temp = np.sum(i == partition_true) / num_nodes\n if temp > chance_level:\n chance_level = temp\n\n score = (raw_overlap - chance_level) / (1 - chance_level)\n if score <= 0:\n score = 0\n\n return score", "def get_modularity_other_c(A, cluster_indices):\n # define the number of nodes in the graph and the number of clusters\n n = len(cluster_indices)\n nclusters = max(cluster_indices) + 1\n # define the row sums of the adjacency matrix\n row_sums = [sum(row) for row in A]\n # define one half of the sum of all entries in the adjacency matrix\n m = sum(row_sums) / 2.0\n # define the modularity\n Q = 0\n for i in range(n):\n for j in range(n):\n if cluster_indices[i] == cluster_indices[j]:\n Q += (A[i][j] - row_sums[i] * row_sums[j] / (2*m)) / (2*m)\n return Q", "def get_percentile(data_list, score, kind='weak'):\n n = len(data_list)\n\n if kind == 'strict':\n return len([i for i in data_list if i < score]) / float(n) * 100\n elif kind == 'weak':\n return len([i for i in data_list if i <= score]) / float(n) * 100\n elif kind == 'mean':\n return (len([i for i in data_list if i < score]) + len([i for i in data_list if i <= score])) * 50 / float(n)\n else:\n raise ValueError(\"The kind kwarg must be 'strict', 'weak' or 'mean'. You can also opt to leave it out and rely on the default method.\")", "def cluster_partition_distance(individual, test_data, truth_data, name=None):\r\n distance_sum = 0\r\n max_sum = 0\r\n for test_clusters, truth_clusters in zip(test_data, truth_data):\r\n # Get last column of target data\r\n test_clusters = test_clusters[-1].flatten()\r\n\r\n p1_dict = {}\r\n for i, x in enumerate(test_clusters):\r\n if x not in p1_dict:\r\n p1_dict[x] = []\r\n p1_dict[x].append(i)\r\n\r\n p2_dict = {}\r\n for i, x in enumerate(truth_clusters):\r\n if x not in p2_dict:\r\n p2_dict[x] = []\r\n p2_dict[x].append(i)\r\n\r\n p1 = list(p1_dict.values())\r\n p2 = list(p2_dict.values())\r\n d = _fast_partition_distance(p1, p2, len(test_clusters))\r\n if d is None:\r\n d = _partition_distance(p1, p2, len(test_clusters))\r\n distance_sum += d\r\n max_sum += len(test_clusters) - 1\r\n return distance_sum / max_sum", "def generate_clusters(df):\n\n df_size = df.shape[0]\n print(df_size)\n n_clusters = 0\n percent_min_pts = 0.105\n min_clusters = 3\n while (n_clusters != min_clusters):\n print(\"percent_min_pts\", percent_min_pts)\n min_cluster_pts = math.floor(df_size * percent_min_pts)\n print(\"min_cluster_pts\", min_cluster_pts)\n\n clusterer = hdbscan.HDBSCAN(min_cluster_size=min_cluster_pts)\n print(df.head())\n clusterer.fit(df)\n cluster_groups = {}\n labels = clusterer.labels_\n for i in labels:\n if cluster_groups.get(i):\n cluster_groups[i] = cluster_groups[i] + 1\n else:\n cluster_groups[i] = 1\n print(\"cluster_groups\", cluster_groups)\n n_clusters = len(set(labels))\n print(\"n_clusters\", n_clusters)\n multiplier = abs(n_clusters - min_clusters) * 0.001\n print(\"multiplier\", multiplier)\n if n_clusters > min_clusters:\n percent_min_pts += multiplier\n else:\n percent_min_pts -= multiplier\n print(\"percent_min_pts\", percent_min_pts)\n return labels", "def avarage_for_group(data: Dict[int, int]) -> float:\n values = data.values()\n summary = sum(values)\n return summary // len(data)", "def pct_bust(data):\n return round((data[\"new_total\"] > 21).sum() / len(data), 3)", "def compute_means(self):\n ###TODO\n vector_means = []\n for doc in self.fin_clust.values():\n vec = defaultdict(float)\n for d_id in doc:\n doc_keys = self.docs[d_id].keys()\n for key in self.docs[d_id]:\n vec[key] = vec[key] + self.docs[d_id][key]\n tot = len(doc)\n x = defaultdict(float)\n for k,v in vec.items():\n x[k] = float(v)/tot\n vec = Counter(x)\n vector_means.append(vec)\n return vector_means", "def calc_accuracy(truth, guess, n):\r\n if len(truth) == len(guess):\r\n guess = _reorder_clusters(truth, guess, intersect_allowed=False)\r\n\r\n num_correct = 0\r\n for i in range(len(truth)):\r\n num_correct += len(truth[i].intersection(guess[i]))\r\n return 100.0 * (num_correct/n)\r\n return 0.0" ]
[ "0.68239075", "0.66668284", "0.6481002", "0.6474694", "0.6268896", "0.6198322", "0.61978954", "0.6153407", "0.6145863", "0.61173046", "0.6088561", "0.6067123", "0.60514313", "0.605035", "0.60220045", "0.6007003", "0.59786797", "0.5954374", "0.5944898", "0.591898", "0.5904012", "0.58934736", "0.5888239", "0.58542675", "0.58466184", "0.58107376", "0.57912296", "0.5785383", "0.5767912", "0.57631725", "0.57616186", "0.57545954", "0.5744635", "0.57412815", "0.5735015", "0.5714543", "0.5708779", "0.5708463", "0.57064724", "0.5692788", "0.5682287", "0.56805253", "0.5661653", "0.5643415", "0.5630227", "0.5623516", "0.56189984", "0.5607578", "0.5595044", "0.55923426", "0.5582394", "0.5580711", "0.5579882", "0.557808", "0.5571734", "0.556507", "0.5545937", "0.5536173", "0.5532594", "0.5531841", "0.5530475", "0.5513373", "0.5511623", "0.5507246", "0.5507121", "0.5496146", "0.549497", "0.5486816", "0.5482418", "0.5481227", "0.5477074", "0.54646385", "0.5461252", "0.5459336", "0.5457425", "0.5452532", "0.5450086", "0.543752", "0.54371226", "0.5436107", "0.54346436", "0.54304904", "0.5428129", "0.54216015", "0.54107815", "0.5406267", "0.54025877", "0.53959966", "0.53912663", "0.53885216", "0.5383635", "0.5382499", "0.53818893", "0.5375736", "0.53704715", "0.5369408", "0.53573924", "0.5354119", "0.5350135", "0.53449774" ]
0.7220413
0
Returns the maximum number of clusters needed to have a percent of the total number of clustered elements.
def analysis_function_num_clusters_to_percent(self,clustering,percent): return clustering.number_of_clusters_to_get_percent(percent)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cluster_count(self) -> int:\n cluster_count = max(1, round(16**3 * (self.vein.purity / 100.0) / self.cluster_size))\n return self.distribution.scale_cluster_count(cluster_count)", "def get_clust_num_perc(model, vis_perc=0.9):\n\tnc = len(np.where(model.allocmodel.Nk > 0)[0])\n\tidx = np.argsort(-model.allocmodel.Nk)[0:nc]\n\n\ttot = model.allocmodel.Nk[idx].sum()\n\tcursum = 0\n\ti = 0\n\twhile cursum < tot*vis_perc:\n\t cursum += model.allocmodel.Nk[idx][i]\n\t i+=1\n\n\treturn i", "def n_clusters(self):\n return len(self.clusters)", "def get_cluster_sizes(self,sc):\n\n clusterSizes = []\n clusters = sc.labels\n for k in np.arange(clusters.max()):\n clusterSizes.append(len(np.where(clusters==k)[0])) \n \n return clusterSizes", "def set_nb_clusters(self):\n \n print(\"Finding the optimal number of clusters...\")\n \n sample = ro.r.matrix(self.df[self.df[\"filename\"].between(1, 4)][\"active_power\"].to_numpy())\n \n r=ro.r(\"\"\"\n check = function(matrix) {\n n_clust = fviz_nbclust(matrix, kmeans, k.max = 15)\n\n n_clust = n_clust$data\n\n max_cluster = as.numeric(n_clust$clusters[which.max(n_clust$y)])\n return(max_cluster)\n }\n \"\"\")\n\n result = r(sample)\n self.conf[\"nb_clust\"] = int(result[0])\n \n print(f\"Optimal number of clusters is {self.conf['nb_clust']}\\n\")", "def gal_count(clusters):\n sum = 0\n for x in clusters:\n sum += x.ngal\n return sum", "def get_cluster_count(self) -> int:\n return len(self.get_all_cluster_ids())", "def purity_score(clusters, classes):\n clusters = np.array(clusters)\n classes = np.array(classes)\n A = np.c_[(clusters,classes)]\n\n n_accurate = 0.\n\n for j in np.unique(A[:,0]):\n z = A[A[:,0] == j, 1]\n x = np.argmax(np.bincount(z))\n n_accurate += len(z[z == x])\n\n return n_accurate / A.shape[0]", "def n_clusters(self):\n return self.model.n_clusters", "def calculate_cluster_size(result, var):\n \n cluster_results=pd.DataFrame(result[var].value_counts())\n ratio=np.round(cluster_results/cluster_results.sum()*100, 2).rename(columns={var:\"ratio\"})\n return cluster_results.join(ratio)", "def analysis_function_top_4(self,clustering):\n clustering.sort_clusters_by_size()\n total = 0\n percents = clustering.get_population_percent_of_n_bigger_clusters(4)\n for p in percents:\n total = total+p\n return total", "def max_individuals(self) -> int:\n return self.group_size.upper * self.groups_allowed", "def compute_total_bipartiteness(hypergraph, clusters):\n bipartiteness_sum = 0\n for i in range(len(clusters)):\n for j in range(i + 1, len(clusters)):\n bipartiteness_sum += hypergraph_bipartiteness(hypergraph, clusters[i], clusters[j])\n return bipartiteness_sum", "def analysis_function_num_clusters(self,clustering):\n return len(clustering.clusters)", "def total_max_node_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"total_max_node_count\")", "def getNbClusters( model):\r\n\r\n\tlabels = model.labels_\r\n\tlabelValues = []\r\n\tfor label in labels:\r\n\t\tif label not in labelValues and label != -1: labelValues.append(label)\r\n\tnbClusters = len( labelValues)\r\n\treturn nbClusters", "def calc_Nw(cluster_labels):\n\n cluster_labels = np.array(cluster_labels)\n labels_set = set(cluster_labels)\n n_labels = len(labels_set)\n\n Nw = []\n for label in labels_set:\n n_examples = np.sum(np.where(cluster_labels == label, 1, 0))\n n_cluster_pairs = n_examples * (n_examples - 1) / 2 # Combinations\n Nw.append(n_cluster_pairs)\n\n return int(np.sum(Nw))", "def max_node_count(self) -> int:\n return pulumi.get(self, \"max_node_count\")", "def get_susceptibility(clusters):\n\n # If there is no or only one cluster then there is no finite cluster\n if len(clusters) <= 1:\n return np.nan\n\n # Remove largest, i.e. infinite, cluster\n clusters.remove(max(clusters))\n\n sizes = np.array(list(set(clusters)))\n n_s = []\n\n for size in sizes:\n n_s.append(clusters.count(size))\n\n temp = sizes * n_s\n S = np.sum(sizes * temp) / np.sum(temp)\n\n return S", "def maxCluster(clusters):\n try:\n return clusters[np.argmax(map(len, clusters))]\n except:\n return None", "def analysis_function_total_elements(self,clustering):\n return clustering.total_number_of_elements", "def get_number_of_clusters(df, use_pca, n_components):\n n_clusters = 10\n cluster_with_distances = []\n for i in range(n_clusters):\n pipe = _build_model(df, use_pca, n_components, use_kmeans=True, n_clusters=i + 1)\n cluster_with_distances.append(pipe.named_steps['kmeans'].inertia_)\n plt.figure(6, figsize=(12, 6))\n plt.plot(range(1, 11), cluster_with_distances, 'o')\n plt.plot(range(1, 11), cluster_with_distances, '-', alpha=0.5)\n plt.title('The Elbow Criterion')\n plt.xlabel('number of cluster')\n plt.ylabel('Sum of squared distances of samples to their closest cluster center')\n plt.show()", "def max_size(self):\n size = 1\n for idx in self.config.index_specs:\n size *= len(idx.distribution)\n return size", "def get_total_n_cpu(self) -> int:", "def get_nb_workers(ratio):\n return max(1, int(CPU_COUNT * ratio))", "def maxContigLength(self):\n\t\tstats = self.scores()\n\t\treturn stats['largestContig']", "def node_count_max(self) -> int:\n return int(self.graph_tuple_stats.node_count_max or 0)", "def max_edges_in(i, cluster_bounds, insertion_factor):\n # for j in range(1, len(cluster_bounds)):\n # if(i < cluster_bounds[j] and i >= cluster_bounds[j-1]):\n # break\n bounds = get_cluster_bounds(i, cluster_bounds)\n #n_c = cluster_bounds[j] - cluster_bounds[j-1]\n n_c = bounds[1] - bounds[0]\n return np.int64(n_c * insertion_factor)", "def num_links(self):\n count=0.0\n for cluster in self.clusters:\n if self.clusters[cluster] == self.clusters[cluster].antecessor:\n numberofmembers=self.clusters[cluster].number_of_members\n count+=numberofmembers\n return count", "def max_cardinality():\r\n #create a list containing the number of each vertex involvement.\r\n array = []\r\n for i in adj:\r\n array += [i[0],i[1]]\r\n\r\n #compute the degree by counting the involment\r\n degree = Counter(array).most_common()\r\n\r\n #retrieve the degree only\r\n degree_ = [ i[1] for i in degree]\r\n\r\n degree_ = np.array(degree_)\r\n \r\n max_m = None\r\n \r\n #check if m is valid\r\n for i in range(degree[0][1]+2)[2:]:\r\n \r\n #valid if there are at least m vertex with degree equals to at least m-1 \r\n if i < len(np.where(degree_>=i-1)[0]):\r\n max_m = i\r\n else:\r\n break\r\n max_m += 1\r\n print(f'maximum possible clique cardinality :{max_m}')\r\n return max_m", "def percenter(rank, max_rank):\n\treturn 100 * (rank/(max_rank or 1))", "def get_cluster_to_split(clusters):\n\treturn max(clusters.items(), key=lambda x: x[1].get_distortion())[1]", "def dissimilarity(clusters):\n totDist = 0\n for c in clusters:\n totDist += c.variability()\n return totDist", "def get_p_inf(clusters, shape):\n\n if len(clusters) == 0:\n return 0\n\n else:\n return max(clusters) / (shape[0] * shape[1])", "def calculate_cluster_fitness(self, cluster_id: ObjectId):\n\n genomes = self.genome_repository.get_genomes_in_cluster(cluster_id)\n\n cluster_fitness = 0\n\n for genome in genomes:\n cluster_fitness += genome.fitness\n if cluster_fitness == 0:\n return 0\n\n return cluster_fitness / len(list(genomes))", "def maximum_number_of_workers(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"maximum_number_of_workers\")", "def get_largest_cluster(self) -> tuple:\n flat_cluster = self.cluster.reshape(-1)\n true_clusters = np.extract(flat_cluster > 0, flat_cluster)\n counts = np.bincount(true_clusters)\n largest = np.argmax(counts)\n size = counts[largest]\n return largest, size", "def compute_cluster_class_fractions(k_means_model, y):\n\n n_classes = y.shape[1]\n class_labels = utils.one_hot_to_index(y)\n cluster_labels = k_means_model.labels_\n\n class_clustroid_counts = np.zeros((n_classes, K))\n for i in range(len(class_labels)):\n class_clustroid_counts[class_labels[i], cluster_labels[i]] += 1\n\n class_clustroid_fractions = class_clustroid_counts / np.sum(class_clustroid_counts, axis=1).reshape(n_classes, 1)\n\n print(\"\\n---- Class Clustroid Distribution ----\")\n for i in range(n_classes):\n print(\"Class {}: {}\".format(i, class_clustroid_fractions[i, :]))", "def get_lcc_size(G,seed_nodes):\n\n # getting subgraph that only consists of the black_nodes\n g = nx.subgraph(G,list(seed_nodes))\n\n if g.number_of_nodes() != 0:\n # get all components \n max_CC = max(nx.connected_component_subgraphs(g), key=len)\n return len(max_CC.nodes()) # size of largest connected component\"\n\n else:\n return 0", "def determine_jobs_per_pool(numpools, totaljobs):\n cluster = os.environ['CC_CLUSTER']\n if cluster in ['graham', 'beluga']:\n jobs_per_pool = math.floor(totaljobs / numpools)\n else:\n jobs_per_pool = totaljobs\n return jobs_per_pool", "def purity(clusters, classes):\n\n d = defaultdict(list)\n\n # Get a list of class numbers of all examples in a cluster.\n for k, v in zip(clusters, classes):\n d[k].append(v)\n\n mayority = 0\n\n # Count the mayority class number and add it up over all clusters.\n for k in d:\n mayority += Counter(d[k]).most_common(1)[0][1]\n\n return float(mayority) / len(clusters)", "def get_sectors_per_cluster(self):\n\n\t\tsectors_per_cluster_base = struct.unpack('B', self.boot_sector_data[13 : 14])[0]\n\t\tif sectors_per_cluster_base == 0:\n\t\t\traise BootSectorException('Invalid cluster size (zero)')\n\n\t\tif sectors_per_cluster_base <= 0x80: # Although 0x80 is a signed value, it's used as an unsigned one.\n\t\t\tsectors_per_cluster_real = sectors_per_cluster_base\n\t\telse:\n\t\t\tsectors_per_cluster_base = struct.unpack('b', self.boot_sector_data[13 : 14])[0] # Read this again as a signed value.\n\t\t\tsectors_per_cluster_real = 1 << abs(sectors_per_cluster_base)\n\n\t\treturn sectors_per_cluster_real", "def get_typical_size(workers: List[List[int]]) -> int:\n size = 0\n for worker in workers:\n size = max([size,\n np.abs(worker[2]-worker[0]),\n np.abs(worker[3]-worker[1])])\n \n return size", "def max_num_neighbors(self):\n return self._max_num_neighbors", "def test_count_reads_per_cluster(self):\n \n bedtool = pybedtools.BedTool(clipper.test_file(\"clip_analysis_test_peak_results.bed\"))\n \n total_reads, reads_per_cluster = count_reads_per_cluster(bedtool, None)\n \n self.assertListEqual([147,52, 239, 85, 47, 119, 58, 588, 92, 59, 196, 36], reads_per_cluster)\n self.assertEqual(sum([147,52, 239, 85, 47, 119, 58, 588, 92, 59, 196, 36]), total_reads)", "def find_count_divisor(this_list):\n max_found = this_list[0][1]\n count = 0\n\n while max_found/50 > 0:\n max_found -= 50\n count += 1\n\n return count", "def get_total_collisions(self):\n return self.count_collisions", "def largest_cc_size(ugraph):\n\tconnected = cc_visited(ugraph)\n\tmaxnum = 0\n\tfor content in connected:\n\t\tmaxnum = max(maxnum,len(content))\n\treturn maxnum", "def group_size_dist(self):\n return self.group_sizes() / self.group_sizes().sum()", "def clusterSize(l, scheme, clustertype='fluid'):\n clist = findClusters(l, scheme, clustertype)\n \n avglists=[]\n for i in clist:\n avglist=[]\n for l in i:\n avglist.append(np.mean(l))\n avglists.append(np.mean(avglist))\n return avglists", "def analysis_function_top_percent(self,clustering):\n clustering.sort_clusters_by_size()\n return clustering.get_population_percent_of_cluster(0)", "def get_utilization(self):\n child_prefixes = Prefix.objects.filter(prefix__net_contained_or_equal=str(self.prefix))\n # Remove overlapping prefixes from list of children\n networks = cidr_merge([c.prefix for c in child_prefixes])\n children_size = float(0)\n for p in networks:\n children_size += p.size\n return int(children_size / self.prefix.size * 100)", "def maximumDominationCount(leaf):\n maximumDominationCount = np.nanmax(leaf.calDominationCount())\n return maximumDominationCount", "def _get_max_gpu_processes(self):\n mem_usage = self._get_gpu_mem_usage()\n print('Mem Usage:', mem_usage)\n\n num_processes = int(1 / mem_usage)\n return num_processes", "def largest_cc_size(ugraph):\n if not ugraph:\n return 0\n return max(len(cc) for cc in cc_visited(ugraph))", "def node_size(graph):\n adj = nx.betweenness_centrality(graph)\n return np.array([x * 1e3 for x in adj.values()])", "def get_modularity(adjacency, clusters):\n total_weight = np.sum(adjacency)\n e = get_clusters_adjacencies(adjacency, clusters)\n e = e / total_weight\n a = np.sum(e, axis=1)\n return np.sum(e.diagonal() - np.power(a, 2))", "def __len__(self) -> int:\n\n length = self.n_classes * 100\n\n return length", "def getMaxMancount(self):\n return self.__size * 20", "def compute_optimal_block_maximum(block_counts) -> int:\n q1, q3 = compute_quartiles(block_counts)\n iqr = q3 - q1\n high_threshold = q3 + 1.5 * iqr\n return high_threshold", "def clustering_factor(self):\n return self.unpack_dword(0x2C)", "def maximum_number_of_workers(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"maximum_number_of_workers\")", "def optimalBinSize(x):\n interquartile = np.diff(np.prctile(x, [25, 75]))\n return 2. * interquartile * len(x)**(-1./3)", "def calc_dim(s):\n s = s.detach().numpy()\n dim = 0\n # calculate how much 90% would be\n s_square = [i ** 2 for i in s]\n sum_square = sum(s_square)\n goal = .9 * sum_square\n # find 90%\n count = 0\n while count < goal:\n count += s_square[dim]\n dim += 1\n return dim # return this many dimensions", "def largest_cc_size(ugraph):\n ccomp = cc_visited(ugraph)\n if len(ccomp) == 0:\n return 0\n \n return max([len(s) for s in ccomp])", "def max_node_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_node_count\")", "def cluster_size(result, var):\n df=calculate_cluster_size(result, var)\n df['cus']=df.index\n return df", "def get_max_rows_per_partition() -> int:\n pass", "def get_max_combination(total_cuts):\n max_pieces = 0\n for i in range(total_cuts):\n result = i * (total_cuts - i)\n if result > max_pieces:\n max_pieces = result\n print(max_pieces)", "def mem_per_core(self):\n return self.mem_per_node / self.cores_per_node", "def get_max_lb(self):\n max_lb = 0\n for arc in self.arc_info.keys():\n if self.arc_info[arc][\"lower_bound\"] > max_lb:\n max_lb = self.arc_info[arc][\"lower_bound\"]\n return max_lb", "def getCpuNum(self):\n return len(psutil.cpu_percent(interval=None, percpu=True))", "def berger_parker_d(counts):\n return counts.max()/float(counts.sum())", "def calculate_enrichment_factor_optimal(molecules, ranked_dataset_percentage_cutoff, pic50_cutoff):\n\n ratio = sum(molecules[\"pIC50\"] >= pic50_cutoff) / len(molecules) * 100\n if ranked_dataset_percentage_cutoff <= ratio:\n enrichment_factor_optimal = round(100 / ratio * ranked_dataset_percentage_cutoff, 1)\n else:\n enrichment_factor_optimal = 100.0\n return enrichment_factor_optimal", "def largest_cc_size(ugraph):\r\n\ttotal_list = cc_visited(ugraph)\r\n\tmax_length_list = []\r\n\tfor each_list in total_list:\r\n\t\tif len(max_length_list) < len(each_list):\r\n\t\t\tmax_length_list = each_list\r\n\treturn len(max_length_list)", "def num_largest_coeffs_for_energy_percent(a, p):\n # compute energies\n a = jnp.conj(a) * a\n # sort in descending order\n a = jnp.sort(a)[::-1]\n # total energy\n s = jnp.sum(a) * 1.\n # normalize\n a = a / s\n # convert to a cmf\n cmf = jnp.cumsum(a)\n # the quantile value\n q = (p - 1e-10) / 100\n # find the index\n index = jnp.argmax(cmf >= q)\n return index + 1", "def n_points(self):\n\n if self.data_reduced:\n return len(self.data_reduced[0])\n else:\n return 0", "def get_max_num_onsets():\r\n \"\"\" based on the numbers above, should equal to 932945... \"\"\"\r\n c1 = len(gen_onset_c1())\r\n c2 = len(gen_onset_c2())\r\n c3 = len(gen_onset_c3_c4())\r\n c4 = len(gen_onset_c3_c4())\r\n temp = c1\r\n temp = temp + ( c1 * c2 )\r\n temp = temp + ( c1 * c3 )\r\n temp = temp + ( c1 * c2 * c3 )\r\n temp = temp + ( c1 * c3 * c4 )\r\n temp = temp + ( c1 * c2 * c3 * c4 )\r\n return temp", "def max_node_size(self):\n return self.max_node_capacity", "def clustering_coefficient(graph):\r\n count = 0\r\n sumOfClusteringCoefficients = 0\r\n for vertex in graph:\r\n count += 1\r\n sumOfClusteringCoefficients += local_clustering_coefficient(graph, vertex)\r\n return sumOfClusteringCoefficients / count", "def largest_part_size():\n return usb_part_size(largest_partition())", "def control_edge_count_max(self) -> int:\n return int(self.graph_tuple_stats.control_edge_count_max or 0)", "def avg_sil_width(cluster_vec, centroid):\n tot_dis = 0\n for i in xrange(len(cluster_vec)):\n cos_dis = cos_sim(cluster_vec[i], centroid)\n tot_dis = tot_dis + cos_dis\n return tot_dis/len(cluster_vec)", "def _get_max_cpu_usage(self) -> Optional[int]:\n max_cpu_usage = self._get_cgroups_max_cpu_usage()\n if not max_cpu_usage:\n # if no cgroups limit is in place, then maximum possible cpu usage depends on the number of available cpus\n max_cpu_usage = psutil.cpu_count() * 1000000 # number of cpus * microseconds in one second\n return max_cpu_usage", "def local_clustering_coefficient(graph, vertex):\r\n edge_count = 0\r\n for neighbour1 in graph[vertex]:\r\n for neighbour2 in graph[vertex]: #look at each pair of neighbours of vertex\r\n if neighbour1 in graph[neighbour2]: #if the neighbours are joined to each other by an edge\r\n edge_count += 1 #add one to the edge count\r\n degree = len(graph[vertex]) #count how many neighbours vertex has\r\n return edge_count / (degree * (degree - 1)) #note factor of 2 missing as each edge counted twice\r", "def get_maxdist(self, pixel_size):\n\n total_area = self.minnpix_cluster*pixel_size**2.\n\n radius = ((np.sqrt(total_area)/2.))\n if radius > 1.0:\n radius = int(radius)\n else:\n radius = round_to_1(radius)\n dist = np.sqrt(2.*float(radius)**2.)\n dist = dist+(0.05*dist)\n\n return dist", "def n_cs(self):\n return np.size(self._cs, 0)", "def maximum_elastic_worker_count(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"maximum_elastic_worker_count\")", "def largest_bucket(self):\n size = 0\n for i in self.__buckets:\n if i.size() > size:\n size = i.size()\n return size", "def abs_max_cool_setpoint_limit(self) -> int:\n return self.cluster.get(\"abs_max_cool_setpoint_limit\", 3200)", "def csize(grades):\n\tp = 0\n\tfor k in grades:\n\t\tl = _comb(n,k)\n\t\tp += l\n\treturn p", "def csize(grades):\n\tp = 0\n\tfor k in grades:\n\t\tl = _comb(n,k)\n\t\tp += l\n\treturn p", "def abs_max_heat_setpoint_limit(self) -> int:\n return self.cluster.get(\"abs_max_heat_setpoint_limit\", 3000)", "def cluster_cal(self):\n self.Cluster = []\n for i in range(self.nodenum):\n neighborhood_node = self.neighbor_node(i)\n Node_num = len(neighborhood_node)\n Count = self.neighbor_edge(neighborhood_node)\n if(Node_num == 0 or Node_num == 1):\n self.Cluster.append(0.5)\n else:\n self.Cluster.append(Count/(Node_num*(Node_num - 1)))\n \n self.cluster_coeff = np.average(self.Cluster)", "def optimal_nr_of_hyperplanes(graph, L):\n\n max_degree = max(dict(graph.degree()).values())\n k = find_number_of_vector_colors_from_vector_coloring(graph, L)\n opt_nr_of_hyperplanes = 2\n try:\n opt_nr_of_hyperplanes = 2 + int(math.ceil(math.log(max_degree, k)))\n except ValueError:\n logging.info(\"math domain error\")\n\n return max(1, opt_nr_of_hyperplanes - 2)", "def getNNodesTot(self):\n nNodesTot = 0\n for iElt in Elements._all:\n nNodesTot += len(iElt.coord)\n return nNodesTot", "def get_optimal_clusters(cell,threshold=140):\n\n\t#\tTurn image to numpy array\n\tpic = image_to_matrix(cell)\n\n\t#\tGet the array of coordinates of dark dots\n\tdots = get_threshold_dots(pic,threshold)\n\n\tscores = []\n\n\tfor n_clusters in range(1,10):\n\t\tclusters = kmeans.kmeans(pic,pic.shape[0],pic.shape[1],50,n_clusters,threshold)\n\t\tprint clusters\n\n\t\tsquare_sum_array = [0]*n_clusters\n\t\tcount_array = [0]*n_clusters\n\n\t\tfor dot in dots:\n\t\t\tdistance_array = [kmeans.euclid_distance(dot,cluster) for cluster in clusters]\n\t\t\tmin_index = distance_array.index(min(distance_array))\n\t\t\tsquare_sum_array[min_index] += kmeans.euclid_distance(clusters[min_index],dot)\n\t\t\tcount_array[min_index] += 1\n\n\t\tvariances = [square_sum/(count+0.001) for square_sum, count in zip(square_sum_array,count_array)]\n\n\t\tprint variances\n\t\tscores.append(sum(variances)/len(variances))\n\n\treturn scores", "def nb_murs_total(self):\r\n murs_pleins=0\r\n for x in range(0,self.largeur):\r\n for y in range(0,self.hauteur):\r\n murs_pleins+=self.matrice_cases[x][y].nb_murs_pleins()\r\n \r\n return int((murs_pleins-self.hauteur*2-self.largeur*2)/2)", "def reduction_ratio(links_pred, *total):\n\n n_max = full_index_size(*total)\n\n if isinstance(links_pred, pandas.MultiIndex):\n links_pred = len(links_pred)\n\n if links_pred > n_max:\n raise ValueError(\"n has to be smaller of equal n_max\")\n\n return 1 - links_pred / n_max", "def element_size(self):\n vecs = (\n self.nodes[self.elements[:, :4], :][:, 1:, :]\n - self.nodes[self.elements[:, :4], :][:, 0, None, :]\n )\n return np.abs(np.linalg.det(vecs)) / 6" ]
[ "0.73421067", "0.72231865", "0.6814632", "0.6761012", "0.67437637", "0.6737779", "0.6622332", "0.66150904", "0.6535396", "0.649745", "0.64956445", "0.6430533", "0.64143646", "0.6354163", "0.63429755", "0.63404", "0.6339005", "0.632302", "0.6261867", "0.62327045", "0.62271553", "0.6199023", "0.61949694", "0.61724746", "0.61688817", "0.6168732", "0.6147854", "0.6138262", "0.6125837", "0.61169857", "0.6106808", "0.60965633", "0.60560846", "0.6038405", "0.60346556", "0.5998857", "0.5997318", "0.59752667", "0.5973794", "0.59673345", "0.5958641", "0.59400076", "0.59305626", "0.5922549", "0.5919258", "0.59191257", "0.5914993", "0.5908023", "0.5905588", "0.5884294", "0.588338", "0.5864176", "0.5857997", "0.5835185", "0.58074844", "0.58055365", "0.58001447", "0.5791224", "0.5785508", "0.5783607", "0.5775817", "0.5748888", "0.5743911", "0.57412124", "0.574053", "0.5727498", "0.56947994", "0.56703895", "0.5668831", "0.566237", "0.56609434", "0.56603837", "0.56581837", "0.5656916", "0.5654458", "0.5644952", "0.5643117", "0.5632182", "0.5628959", "0.5627479", "0.56190115", "0.5618644", "0.56151783", "0.5612925", "0.5612541", "0.56115025", "0.5608499", "0.5607013", "0.56026787", "0.5601381", "0.55999595", "0.55999595", "0.55995685", "0.559881", "0.5598412", "0.5596695", "0.559567", "0.5592091", "0.55809957", "0.55712235" ]
0.6772024
3
Returns the percent of elements over the total number of elements of the clustering, that have been clustered into the bigger cluster.
def analysis_function_top_percent(self,clustering): clustering.sort_clusters_by_size() return clustering.get_population_percent_of_cluster(0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def analysis_function_num_clusters_to_percent(self,clustering,percent):\n return clustering.number_of_clusters_to_get_percent(percent)", "def analysis_function_top_4(self,clustering):\n clustering.sort_clusters_by_size()\n total = 0\n percents = clustering.get_population_percent_of_n_bigger_clusters(4)\n for p in percents:\n total = total+p\n return total", "def get_clust_num_perc(model, vis_perc=0.9):\n\tnc = len(np.where(model.allocmodel.Nk > 0)[0])\n\tidx = np.argsort(-model.allocmodel.Nk)[0:nc]\n\n\ttot = model.allocmodel.Nk[idx].sum()\n\tcursum = 0\n\ti = 0\n\twhile cursum < tot*vis_perc:\n\t cursum += model.allocmodel.Nk[idx][i]\n\t i+=1\n\n\treturn i", "def calculate_cluster_size(result, var):\n \n cluster_results=pd.DataFrame(result[var].value_counts())\n ratio=np.round(cluster_results/cluster_results.sum()*100, 2).rename(columns={var:\"ratio\"})\n return cluster_results.join(ratio)", "def analysis_function_total_elements(self,clustering):\n return clustering.total_number_of_elements", "def dissimilarity(clusters):\n totDist = 0\n for c in clusters:\n totDist += c.variability()\n return totDist", "def purity_score(clusters, classes):\n clusters = np.array(clusters)\n classes = np.array(classes)\n A = np.c_[(clusters,classes)]\n\n n_accurate = 0.\n\n for j in np.unique(A[:,0]):\n z = A[A[:,0] == j, 1]\n x = np.argmax(np.bincount(z))\n n_accurate += len(z[z == x])\n\n return n_accurate / A.shape[0]", "def gal_count(clusters):\n sum = 0\n for x in clusters:\n sum += x.ngal\n return sum", "def cluster_cal(self):\n self.Cluster = []\n for i in range(self.nodenum):\n neighborhood_node = self.neighbor_node(i)\n Node_num = len(neighborhood_node)\n Count = self.neighbor_edge(neighborhood_node)\n if(Node_num == 0 or Node_num == 1):\n self.Cluster.append(0.5)\n else:\n self.Cluster.append(Count/(Node_num*(Node_num - 1)))\n \n self.cluster_coeff = np.average(self.Cluster)", "def compute_total_bipartiteness(hypergraph, clusters):\n bipartiteness_sum = 0\n for i in range(len(clusters)):\n for j in range(i + 1, len(clusters)):\n bipartiteness_sum += hypergraph_bipartiteness(hypergraph, clusters[i], clusters[j])\n return bipartiteness_sum", "def get_modularity(adjacency, clusters):\n total_weight = np.sum(adjacency)\n e = get_clusters_adjacencies(adjacency, clusters)\n e = e / total_weight\n a = np.sum(e, axis=1)\n return np.sum(e.diagonal() - np.power(a, 2))", "def get_susceptibility(clusters):\n\n # If there is no or only one cluster then there is no finite cluster\n if len(clusters) <= 1:\n return np.nan\n\n # Remove largest, i.e. infinite, cluster\n clusters.remove(max(clusters))\n\n sizes = np.array(list(set(clusters)))\n n_s = []\n\n for size in sizes:\n n_s.append(clusters.count(size))\n\n temp = sizes * n_s\n S = np.sum(sizes * temp) / np.sum(temp)\n\n return S", "def contains_percentage_of(self, other: 'Interval') -> float:\n if other.length == 0:\n return other.a in self\n intersection = Interval.intersection([self, other])\n return intersection.length / other.length if intersection else 0.0", "def get_cluster_sizes(self,sc):\n\n clusterSizes = []\n clusters = sc.labels\n for k in np.arange(clusters.max()):\n clusterSizes.append(len(np.where(clusters==k)[0])) \n \n return clusterSizes", "def assign_labels_to_centroids(self):\n labelled_centroids = []\n for i in range(len(self.final_clusters)):\n labels = map(lambda x: x[0], self.final_clusters[i])\n # pick the most common label\n most_common = Counter(labels).most_common(1)[0][0]\n c = np.round(len([item for item in self.final_clusters[i] if item[0]==1])/len(self.final_clusters[i]),2)\n if c>=0.46:\n most_common = 1.0\n centroid = (most_common, self.final_centroids[i])\n labelled_centroids.append(centroid)\n\n self.labelled_centroids = labelled_centroids\n print(\"cluster_0: \", np.round(len([item for item in self.final_clusters[0] if item[0]==1])/len(self.final_clusters[0]),2), \"size_0: \", len(self.final_clusters[0]))\n print(\"cluster_1: \", np.round(len([item for item in self.final_clusters[1] if item[0]==1])/len(self.final_clusters[1]),2), \"size_1: \", len(self.final_clusters[1]))\n #print(\"cluster_2: \", np.round(len([item for item in self.final_clusters[2] if item[0]==1])/len(self.final_clusters[2]),2), \"size_2: \", len(self.final_clusters[2]))\n #print(\"cluster_3: \", np.round(len([item for item in self.final_clusters[3] if item[0]==1])/len(self.final_clusters[3]),2), \"size_2: \", len(self.final_clusters[3]))", "def compute_cluster_class_fractions(k_means_model, y):\n\n n_classes = y.shape[1]\n class_labels = utils.one_hot_to_index(y)\n cluster_labels = k_means_model.labels_\n\n class_clustroid_counts = np.zeros((n_classes, K))\n for i in range(len(class_labels)):\n class_clustroid_counts[class_labels[i], cluster_labels[i]] += 1\n\n class_clustroid_fractions = class_clustroid_counts / np.sum(class_clustroid_counts, axis=1).reshape(n_classes, 1)\n\n print(\"\\n---- Class Clustroid Distribution ----\")\n for i in range(n_classes):\n print(\"Class {}: {}\".format(i, class_clustroid_fractions[i, :]))", "def calculate_cluster_fitness(self, cluster_id: ObjectId):\n\n genomes = self.genome_repository.get_genomes_in_cluster(cluster_id)\n\n cluster_fitness = 0\n\n for genome in genomes:\n cluster_fitness += genome.fitness\n if cluster_fitness == 0:\n return 0\n\n return cluster_fitness / len(list(genomes))", "def purity(clusters, classes):\n\n d = defaultdict(list)\n\n # Get a list of class numbers of all examples in a cluster.\n for k, v in zip(clusters, classes):\n d[k].append(v)\n\n mayority = 0\n\n # Count the mayority class number and add it up over all clusters.\n for k in d:\n mayority += Counter(d[k]).most_common(1)[0][1]\n\n return float(mayority) / len(clusters)", "def get_utilization(self):\n child_prefixes = Prefix.objects.filter(prefix__net_contained_or_equal=str(self.prefix))\n # Remove overlapping prefixes from list of children\n networks = cidr_merge([c.prefix for c in child_prefixes])\n children_size = float(0)\n for p in networks:\n children_size += p.size\n return int(children_size / self.prefix.size * 100)", "def percenter(rank, max_rank):\n\treturn 100 * (rank/(max_rank or 1))", "def clust_strength(mat,groups):\n cluster_strengths = []\n for group in range(len(np.unique(groups))):\n this_cluster = mat[groups==group,:]\n this_cluster_mean = np.mean(this_cluster,axis=0)\n all_dists = mat - this_cluster_mean\n out_dists = np.linalg.norm(all_dists[groups!=group],axis=1)\n in_dists = np.linalg.norm(all_dists[groups==group],axis=1)\n this_strength = np.mean(out_dists)/np.mean(in_dists)\n cluster_strengths.append(this_strength)\n \n return np.mean(cluster_strengths)", "def leaf_nodes_to_search_percent(self) -> Optional[int]:\n return pulumi.get(self, \"leaf_nodes_to_search_percent\")", "def group_size_dist(self):\n return self.group_sizes() / self.group_sizes().sum()", "def clusterAlgorithm(values):\n clusterMap = dict()\n for value in values:\n if value[2] not in clusterMap.keys():\n clusterMap[value[2]] = []\n clusterMap[value[2]].append(value)\n frequency = [float(len(clusterMap[value[2]])) for value in values]\n total = sum(frequency)\n weightValues = [freq / total for freq in frequency]\n print sum(weightValues)\n lightValues = [value[1] for value in values]\n return np.average(lightValues, weights = weightValues)", "def _cluster_hitprobability(self, x, y):\n hm_count = np.zeros_like(y).astype(float)\n hm = np.zeros_like(y).astype(float)\n #skf = StratifiedShuffleSplit(n_splits=self.n_iter, test_size=self.shuffle_test_split, random_state=self.random_state)\n\n ind = self._cluster(x, 35)\n\n for cluster_id in np.unique(ind):\n test = np.argwhere(ind == cluster_id)[:, 0]\n train = np.argwhere(ind != cluster_id)[:, 0]\n #print test\n self.basemodel.fit(x[train, :], y[train], hyperparams_optim=False)\n hm_count[test] += 1.\n hm[test] += (self.basemodel.predict(x[test, :]) == y[test]).astype(float)\n\n proba = hm / hm_count\n if self.verbose:\n # print('H/M count:')\n # print(hm_count)\n print('Proba:')\n print(proba)\n self.basemodel.fit(x, y, hyperparams_optim=False)\n return proba", "def get_modularity_other_c(A, cluster_indices):\n # define the number of nodes in the graph and the number of clusters\n n = len(cluster_indices)\n nclusters = max(cluster_indices) + 1\n # define the row sums of the adjacency matrix\n row_sums = [sum(row) for row in A]\n # define one half of the sum of all entries in the adjacency matrix\n m = sum(row_sums) / 2.0\n # define the modularity\n Q = 0\n for i in range(n):\n for j in range(n):\n if cluster_indices[i] == cluster_indices[j]:\n Q += (A[i][j] - row_sums[i] * row_sums[j] / (2*m)) / (2*m)\n return Q", "def compute_clusters(self, p: float):\n pass", "def analysis_function_mean_cluster_size(self,clustering):\n sizes = get_cluster_sizes(clustering.clusters)[1]\n return numpy.mean(sizes)", "def getBeliefDistribution(self):\n # This essentially gives a point to a location for each particle there, then \n # normalizes the point values so they add up to 1.\n dist = util.Counter()\n for part in self.particles: dist[part] += 1\n dist.normalize()\n return dist", "def clusterSize(l, scheme, clustertype='fluid'):\n clist = findClusters(l, scheme, clustertype)\n \n avglists=[]\n for i in clist:\n avglist=[]\n for l in i:\n avglist.append(np.mean(l))\n avglists.append(np.mean(avglist))\n return avglists", "def average_consensus(self, cluster):\n\t\tcenterk = 0\n\t\tindex = 0\n\t\tfor value in cluster:\n\t\t\tcenterk += value\n\t\t\tindex += 1\n\t\tcenterk = centerk / index\n\t\treturn centerk", "def inter_cost(cluster):\n def _p2p(point):\n _freq_sum = 0\n for pt in cluster.points:\n if point != pt:\n _freq_sum += point.frequency(pt)\n return _freq_sum\n\n return int(sum(map(_p2p, cluster.points)))", "def describe_reduce_nb(col, a, perc, ddof, *args):\n a = a[~np.isnan(a)]\n out = np.empty(5 + len(perc), dtype=np.float_)\n out[0] = len(a)\n if len(a) > 0:\n out[1] = np.mean(a)\n out[2] = nanstd_1d_nb(a, ddof=ddof)\n out[3] = np.min(a)\n out[4:-1] = np.percentile(a, perc * 100)\n out[4 + len(perc)] = np.max(a)\n else:\n out[1:] = np.nan\n return out", "def cluster_count(self) -> int:\n cluster_count = max(1, round(16**3 * (self.vein.purity / 100.0) / self.cluster_size))\n return self.distribution.scale_cluster_count(cluster_count)", "def test_clusters(trained_data, centroids):\n\n for c in range(len(centroids)):\n count_1 = 0\n count_0 = 0\n for p in range(len(trained_data)):\n if trained_data[p][-2] == 0 and trained_data[p][-1] == centroids[c]:\n count_0 += 1\n if trained_data[p][-2] == 1 and trained_data[p][-1] == centroids[c]:\n count_1 += 1\n print (\"Centroid \", c+1, \":\", centroids[c])\n print(\"Number of 1's: \", count_1)\n print(\"Number of 0's: \", count_0)\n print(\"Percent 1's: \", round((count_1/(count_1 + count_0))*100,2))\n print(\"Percent 0's: \", round((count_0 / (count_1 + count_0)) * 100,2))\n print(\"****************\")", "def occupation_distribution(data):", "def __cluster_simi(self, i, j):\n sum_ = 0.\n for si in self.__indexclusters[i]:\n for sj in self.__indexclusters[j]:\n simi = self.__sample_simi(si, sj)\n sum_ += simi\n return sum_ / (len(self.__indexclusters[i]) * len(self.__indexclusters[j]))", "def _calculate_cluster_probs(self, dist_mat, temperature):\n dist_mat = np.square(dist_mat) # euclidean distance -> squared euclidean distance\n n_samples = dist_mat.shape[0]\n n_clusters = dist_mat.shape[1]\n cluster_probs = np.zeros(shape=(n_samples, n_clusters))\n for i in range(n_clusters):\n for j in range(n_samples):\n normalizer = sum(self.marginal_probs[k] * np.exp(-dist_mat[j, k] / temperature) for k in range(n_clusters))\n cluster_probs[j, i] = self.marginal_probs[i] * np.exp(-dist_mat[j, i] / temperature) / normalizer\n return cluster_probs", "def get_modularity3(adjacency, clusters):\n\n rows, cols = adjacency.shape\n num_ids = adjacency.shape[0]\n id_to_cluster = get_id_to_cluster(clusters, num_ids)\n degrees = np.sum(adjacency, axis=1)\n total_weight = np.sum(adjacency)\n sum = 0\n for i in range(rows):\n for j in range(cols):\n if id_to_cluster[i] == id_to_cluster[j]:\n sum += adjacency[i, j] - (degrees[i] * degrees[j]) / total_weight\n sum = sum / total_weight\n return sum", "def _assign_clusters(self):\n\n dist = np.zeros((self.k, ))\n distortion = 0\n\n for index in range(0, self.data.shape[0]):\n for i in range(0, self.k):\n dist[i] = np.linalg.norm(self.data[index] - self.centroids[i])\n\n self.assigned_clusters[index] = np.argmin(dist)\n distortion += np.min(dist)\n\n return distortion", "def estimate_label_proportion(source_loader,target_loader,feat_extract,cuda,n_clusters,cluster_param): \n feat_extract.eval()\n #n_clusters = 3\n from sklearn.cluster import AgglomerativeClustering\n \n \n X_s,y_s = extract_feature(source_loader,feat_extract,cuda) \n X_t,y_t = extract_feature(target_loader,feat_extract,cuda) \n \n \n \n cluster = AgglomerativeClustering(n_clusters=n_clusters,linkage=cluster_param)\n label_t = cluster.fit_predict(X_t)\n #print(np.unique(label_t))\n mean_mat_S, num_in_class_S = extract_prototypes(X_s,y_s,n_clusters)\n mean_mat_T, num_in_class_T = extract_prototypes(X_t,label_t,n_clusters)\n \n \"\"\"\n We assume that prototypes of classes have been transported in some in the feature\n space \n \"\"\"\n \n import ot\n M = ot.dist(mean_mat_S, mean_mat_T)\n M /= M.max()\n \n n_1 = n_clusters\n a = np.ones((n_1,)) / n_1\n b = np.ones((n_1,)) / n_1\n \n \n gamma = ot.emd(a,b,M)\n nb_sample_S = [ np.sum(y_s==i) for i in range(n_clusters) ]\n proportion_T = num_in_class_T/np.sum(num_in_class_T)\n assignement_source_to_target = gamma.argmax(axis=1)\n \n # proportions are arranged directly per class\n proportion_T = proportion_T[assignement_source_to_target]\n print(proportion_T,assignement_source_to_target)\n \n\n return proportion_T,nb_sample_S, assignement_source_to_target", "def clustering_coefficient(graph):\r\n count = 0\r\n sumOfClusteringCoefficients = 0\r\n for vertex in graph:\r\n count += 1\r\n sumOfClusteringCoefficients += local_clustering_coefficient(graph, vertex)\r\n return sumOfClusteringCoefficients / count", "def analysis_function_num_clusters(self,clustering):\n return len(clustering.clusters)", "def davies_bouldin_score(self):\r\n print(colored(\"The davies bouldin score of the clustering is %0.002f\\n\" %(davies_bouldin_score(self.X, self.labels)),color = 'red', attrs=['bold']))\r\n print()\r\n print(colored(\"The points in each cluster are : \",color = 'yellow', attrs=['bold']))\r\n print(collections.Counter(self.labels))", "def calculate_cost(self, medoids, clusters):\n cost = 0.0\n for i in range(0, len(medoids)):\n for j in range(0, len(clusters[i])):\n cost += distance.sqeuclidean(medoids[i], clusters[i][j])\n return cost\n pass", "def get_cluster_to_split(clusters):\n\treturn max(clusters.items(), key=lambda x: x[1].get_distortion())[1]", "def d50(clones, num_Reads): \n\n\n d50_amount = num_Reads/2\n read_count=0\n for i in clones:\n read_count+=clones[i].num_reads\n if read_count>=d50_amount:\n return i/float(len(clones))", "def _calculate_cluster_measures(\n arr4d,\n threshold,\n bin_struct,\n two_sided_test=False,\n):\n n_regressors = arr4d.shape[3]\n\n max_sizes = np.zeros(n_regressors, int)\n max_masses = np.zeros(n_regressors, float)\n\n for i_regressor in range(n_regressors):\n arr3d = arr4d[..., i_regressor].copy()\n\n if two_sided_test:\n arr3d[np.abs(arr3d) <= threshold] = 0\n else:\n arr3d[arr3d <= threshold] = 0\n\n labeled_arr3d, _ = label(arr3d > 0, bin_struct)\n\n if two_sided_test:\n # Label positive and negative clusters separately\n n_positive_clusters = np.max(labeled_arr3d)\n temp_labeled_arr3d, _ = label(\n arr3d < 0,\n bin_struct,\n )\n temp_labeled_arr3d[temp_labeled_arr3d > 0] += n_positive_clusters\n labeled_arr3d = labeled_arr3d + temp_labeled_arr3d\n del temp_labeled_arr3d\n\n clust_vals, clust_sizes = np.unique(labeled_arr3d, return_counts=True)\n assert clust_vals[0] == 0\n\n clust_vals = clust_vals[1:] # First cluster is zeros in matrix\n clust_sizes = clust_sizes[1:]\n\n # Cluster mass-based inference\n max_mass = 0\n for unique_val in clust_vals:\n ss_vals = np.abs(arr3d[labeled_arr3d == unique_val]) - threshold\n max_mass = np.maximum(max_mass, np.sum(ss_vals))\n\n # Cluster size-based inference\n max_size = 0\n if clust_sizes.size:\n max_size = np.max(clust_sizes)\n\n max_sizes[i_regressor], max_masses[i_regressor] = max_size, max_mass\n\n return max_sizes, max_masses", "def calc_Nw(cluster_labels):\n\n cluster_labels = np.array(cluster_labels)\n labels_set = set(cluster_labels)\n n_labels = len(labels_set)\n\n Nw = []\n for label in labels_set:\n n_examples = np.sum(np.where(cluster_labels == label, 1, 0))\n n_cluster_pairs = n_examples * (n_examples - 1) / 2 # Combinations\n Nw.append(n_cluster_pairs)\n\n return int(np.sum(Nw))", "def avg_sil_width(cluster_vec, centroid):\n tot_dis = 0\n for i in xrange(len(cluster_vec)):\n cos_dis = cos_sim(cluster_vec[i], centroid)\n tot_dis = tot_dis + cos_dis\n return tot_dis/len(cluster_vec)", "def calculate_sample_silhouette(self):\n sum_samples = 0\n for cluster in self.cluster_lst:\n sum_samples += self.sum_silhouette(cluster)\n sample_size = len(self.samples)\n return sum_samples/sample_size", "def print_cluster(self, cluster, value):\n total = 0\n ham = 0\n spam = 0\n for message in cluster:\n if self.spamorham[self.ids[message]] == 'ham':\n ham += 1\n elif self.spamorham[self.ids[message]] == 'spam':\n spam += 1\n else:\n print(\"ERROR!\")\n total += 1\n\n print(\"Total number of messages in the {0} cluster: {1}\\n\"\n \"Percentage of SPAM messages in the {2} cluster: {3}\\n\"\n \"Percentage of HAM messages in the {4} cluster: {5}\".format(value, total, value,\n str((float(spam) / total) * 100), value,\n str((float(ham) / total) * 100)))", "def SquareClusteringCoefficient(graph):\n coef = np.mean(list(nx.square_clustering(graph).values()))\n return coef", "def percent_processed(self):\n try:\n return (self.pos / self.data_encap.size) * 100.0\n except ZeroDivisionError:\n return 100.0", "def _overlap_energy(self, this, that):\n if not this.overlaps(that):\n return 0.0\n\n return min(10.0 / this.rank, 10.0 / that.rank)", "def purity_score(label, pred):\n \n df = pd.concat([label, pd.DataFrame(pred)], axis=1)\n df.set_axis(['label', 'pred'], axis=1, inplace=True)\n \n s = 0\n\n for x, cluster in df.groupby('pred'):\n s += cluster['label'].value_counts().iloc[0] # adding the most occuring class in a cluster\n\n return s / label.shape[0]", "def get_optimal_clusters(cell,threshold=140):\n\n\t#\tTurn image to numpy array\n\tpic = image_to_matrix(cell)\n\n\t#\tGet the array of coordinates of dark dots\n\tdots = get_threshold_dots(pic,threshold)\n\n\tscores = []\n\n\tfor n_clusters in range(1,10):\n\t\tclusters = kmeans.kmeans(pic,pic.shape[0],pic.shape[1],50,n_clusters,threshold)\n\t\tprint clusters\n\n\t\tsquare_sum_array = [0]*n_clusters\n\t\tcount_array = [0]*n_clusters\n\n\t\tfor dot in dots:\n\t\t\tdistance_array = [kmeans.euclid_distance(dot,cluster) for cluster in clusters]\n\t\t\tmin_index = distance_array.index(min(distance_array))\n\t\t\tsquare_sum_array[min_index] += kmeans.euclid_distance(clusters[min_index],dot)\n\t\t\tcount_array[min_index] += 1\n\n\t\tvariances = [square_sum/(count+0.001) for square_sum, count in zip(square_sum_array,count_array)]\n\n\t\tprint variances\n\t\tscores.append(sum(variances)/len(variances))\n\n\treturn scores", "def percentage_hapaxes(corpus_parts, corpus):\n percentage_h = []\n count = 0\n dv = divide_corpus(corpus, 10)\n hapax_parts = hapaxes_parts(corpus_parts)\n for x in hapax_parts:\n percentage_h.append(percentage(x, len(dv[count])))\n count += 1\n return percentage_h", "def disaggregate_by_cluster(self):\n # wt = np.zeros((1, self.ds.shape[1]))\n # total = np.zeros((self.n_ahead, self.ds.shape[1]))\n \n agg_cluster_ds = np.zeros((self.n_ahead+1, self.n_clusters))\n agg_cluster_ds[0] = self.ds_agg_by_c[-1]\n agg_cluster_ds[1:] = self.ds_c_for\n cluster_perc_change = np.diff(agg_cluster_ds, axis = 0) / agg_cluster_ds[:-1]\n\n cluster_scaling_vector = np.zeros((2, self.ds.shape[1]))\n\n # break down proportionally -> don't work well\n # for c in range(self.n_clusters):\n # c_m = self.ds.iloc[-self.cluster_n_period:, np.where(self.ds_c == c)[0]]\n # c_sum = sum(c_m)\n # indiv_sum = np.sum(c_m, axis = 0)\n # wt[:,np.where(self.ds_c == c)[0]] = (indiv_sum/c_sum)\n # total[:,np.where(self.ds_c == c)[0]] = np.reshape(\n # np.repeat(self.ds_c_for[:,c], c_m.shape[1]), (self.n_ahead, c_m.shape[1]))\n \n # multiply by the perc change\n \n for i in range(self.ds_c.shape[0]):\n cluster_scaling_vector[:,i] = cluster_perc_change[:,self.ds_c[i]]\n cluster_scaling_vector = cluster_scaling_vector+1\n cluster_scaling_vector = np.array(cluster_scaling_vector)\n \n self.ds_for = self.ds.copy()\n\n for yr in range(self.n_ahead)[::-1]:\n # forecast on foretasted number\n yr_ind = self.ds_for.index[-(yr+1)]\n self.ds_for.ix[yr_ind] = self.ds_for.iloc[-(yr+2),:].values * cluster_scaling_vector[-(yr+1)]\n\n # self.ds_for.iloc[-(self.n_ahead):,:] = self.ds_for.iloc[-(self.n_ahead+1):-1,:].values * np.array(cluster_scaling_vector)\n\n # if negative -> 0\n self.ds_for[self.ds_for < 0] = 0", "def dim_reduction( M ):\n tot_count_per_type = M.sum(axis = 1)\n tot_count = float(tot_count_per_type.sum())\n sorted_index = np.argsort(tot_count_per_type)\n threshold = 0.01\n accu = 0\n for i in range(len(sorted_index)):\n perc = float(tot_count_per_type[sorted_index[i]])/tot_count\n accu = accu + perc\n if accu > threshold:\n break;\n \n return sorted_index[0:i]", "def in_xi(self, sample, cluster, cluster_size):\n sum_distance = 0\n for cur_sample in cluster.get_samples():\n if sample.get_s_id() != cur_sample.get_s_id():\n if cur_sample.get_s_id() < sample.get_s_id():\n sum_distance += self.distance_dict[(cur_sample.get_s_id(), sample.get_s_id())]\n elif cur_sample.get_s_id() > sample.get_s_id():\n sum_distance += self.distance_dict[(sample.get_s_id(), cur_sample.get_s_id())]\n return sum_distance / (cluster_size - 1)", "def num_links(self):\n count=0.0\n for cluster in self.clusters:\n if self.clusters[cluster] == self.clusters[cluster].antecessor:\n numberofmembers=self.clusters[cluster].number_of_members\n count+=numberofmembers\n return count", "def calc_sw(X, cluster_labels):\n\n labels = np.array(cluster_labels)\n labels_set = set(cluster_labels)\n n_labels = len(labels_set)\n\n Sw = []\n for label in labels_set:\n # Loop through each cluster and calculate within cluster distance\n pairs = np.where(labels == label)\n pairs_distance = pdist(X[pairs[0]])\n within_cluster_distance = np.sum(pairs_distance, axis=0)\n Sw.append(within_cluster_distance)\n\n return np.sum(Sw)", "def cluster_mcc_ratio(result, cluster_names, var, n=5):\n rel1=get_cluster_country_distr(result, var)\n clusters=calculate_cluster_size(result, var)\n hours=hours_tusc(result, var)\n res=\"\"\n for i in zip(clusters.index, cluster_names[:len(clusters)]):\n res=res+f\"By the number of unique visitors the {i[1]} cluster's top 5 countries are; \"\n rel=rel1.sort_values(i[0],ascending=False)[:n]\n for j in range(0,5):\n if j!=n-1:\n res=res+f'{rel[i[0]].index[j]} ({rel[i[0]][j]}%), '\n else:\n res=res+f'and {rel[i[0]].index[j]} ({rel[i[0]][j]}%). '\n res=res+f'This cluster spends on average {int(hours.hrs_in_tusc[i[0]])} days in Tuscany, '\n res=res+get_places_at_least4_hours(result, i[0], var)\n res=res+ cluster_airport_result(result, i[0], var)\n return res", "def percentage(self):\n temp = self.cpu_freq_time_spent.copy()\n for i in self.cpu_freq_time_spent:\n total = 0\n for j in self.cpu_freq_time_spent[i]:\n total += self.cpu_freq_time_spent[i][j]\n for j in self.cpu_freq_time_spent[i]:\n if total != 0:\n temp[i][j] = self.cpu_freq_time_spent[i][j] * 100 / total\n else:\n temp[i][j] = 0\n return temp", "def test_count_reads_per_cluster(self):\n \n bedtool = pybedtools.BedTool(clipper.test_file(\"clip_analysis_test_peak_results.bed\"))\n \n total_reads, reads_per_cluster = count_reads_per_cluster(bedtool, None)\n \n self.assertListEqual([147,52, 239, 85, 47, 119, 58, 588, 92, 59, 196, 36], reads_per_cluster)\n self.assertEqual(sum([147,52, 239, 85, 47, 119, 58, 588, 92, 59, 196, 36]), total_reads)", "def GlobalClusteringCoefficient(graph):\n coef = np.mean(list(nx.clustering(graph).values()))\n return coef", "def diversion_score(X, offspring_list):\r\n similarity_sum = 0\r\n if len(offspring_list[0]) == 2:\r\n offspring_list = [(parent_a, offspring, parent_a) for (parent_a, offspring) in offspring_list]\r\n for (parent_a, offspring, parent_b) in offspring_list:\r\n similarity_sum += max(icc(parent_a, offspring), icc(parent_b, offspring))\r\n return (1 - (((similarity_sum / len(offspring_list)) + 1) / 2)) * 100 # move from [-1,1] to [0,2], then to [0,1], then inverse, finally move to [0,100]\r", "def _calculate_score(lsh, minhash, total_num_events):\n neighbours = lsh.query(minhash)\n return float(len(neighbours)) / float(total_num_events)", "def ds_ratio(group):\n nix_count = (group=='nix').sum()\n top_count = (group=='top').sum()\n ratio = nix_count/(nix_count+top_count) #could smooth this\n return ratio", "def evaluate_clusters(self, cluster_formulas, value='weighted_sum'):\n num_elems = len(self.labels)\n total_val = {}\n num_cl = len(cluster_formulas)\n clustered_points_num = 0\n print(\"\\n\\n\")\n print(\"Sufficiently big clusters: {}\".format(num_cl))\n for c, formula, val in cluster_formulas:\n c_size = len([l for l in self.labels if l == c])\n clustered_points_num += c_size\n\n if value == 'weighted_sum':\n total_val[c] = val * c_size / num_elems\n elif value == 'sum':\n total_val[c] = val * 1\n\n clust_val = sum(total_val.values())\n self.clustering_value = total_val\n print(\"Value of clustering: {}\".format(clust_val))\n return clust_val", "def percent_overlap(items1, items2, k = None):\n if k is None:\n k = max([len(items1), len(items2)])\n assert k > 0 and k <= max([len(items1), len(items2)]), 'k is out of bounds!'\n items1_set, items2_set = set(items1[:k]), set(items2[:k])\n return len(items1_set & items2_set) / len(items1_set | items2_set)", "def percentage(count, total):\n return count / total * 100", "def cluster_size(result, var):\n df=calculate_cluster_size(result, var)\n df['cus']=df.index\n return df", "def calculate_cluster_silhouette(self, cluster):\n cluster_size = len(cluster.get_samples())\n return self.sum_silhouette(cluster) / cluster_size", "def percentage(self):\n return sum(self.chunk_percentage) / self.total_steps", "def get_sectors_per_cluster(self):\n\n\t\tsectors_per_cluster_base = struct.unpack('B', self.boot_sector_data[13 : 14])[0]\n\t\tif sectors_per_cluster_base == 0:\n\t\t\traise BootSectorException('Invalid cluster size (zero)')\n\n\t\tif sectors_per_cluster_base <= 0x80: # Although 0x80 is a signed value, it's used as an unsigned one.\n\t\t\tsectors_per_cluster_real = sectors_per_cluster_base\n\t\telse:\n\t\t\tsectors_per_cluster_base = struct.unpack('b', self.boot_sector_data[13 : 14])[0] # Read this again as a signed value.\n\t\t\tsectors_per_cluster_real = 1 << abs(sectors_per_cluster_base)\n\n\t\treturn sectors_per_cluster_real", "def pct_bust(data):\n return round((data[\"new_total\"] > 21).sum() / len(data), 3)", "def valency(self):\n return len(self.neighbors())", "def _avg_cluster_hitprobability(self, x, y, n_clusters=30):\n\n # Compute the individual Hit probability\n proba = self._shufflesplit(x, y)\n\n # average the individual hit probability for each cluster\n ind = self._cluster(x, x.shape[0]/2.)\n\n avg_proba = np.copy(proba)\n\n for cluster in np.unique(ind):\n mask_ = ind == cluster\n avg_proba[mask_] = avg_proba[mask_].mean()\n\n return avg_proba", "def cluster_partition_distance(individual, test_data, truth_data, name=None):\r\n distance_sum = 0\r\n max_sum = 0\r\n for test_clusters, truth_clusters in zip(test_data, truth_data):\r\n # Get last column of target data\r\n test_clusters = test_clusters[-1].flatten()\r\n\r\n p1_dict = {}\r\n for i, x in enumerate(test_clusters):\r\n if x not in p1_dict:\r\n p1_dict[x] = []\r\n p1_dict[x].append(i)\r\n\r\n p2_dict = {}\r\n for i, x in enumerate(truth_clusters):\r\n if x not in p2_dict:\r\n p2_dict[x] = []\r\n p2_dict[x].append(i)\r\n\r\n p1 = list(p1_dict.values())\r\n p2 = list(p2_dict.values())\r\n d = _fast_partition_distance(p1, p2, len(test_clusters))\r\n if d is None:\r\n d = _partition_distance(p1, p2, len(test_clusters))\r\n distance_sum += d\r\n max_sum += len(test_clusters) - 1\r\n return distance_sum / max_sum", "def hausd95(result, reference, voxelspacing=None, connectivity=1):\n hd1 = __surface_distances(result, reference, voxelspacing, connectivity)\n hd2 = __surface_distances(reference, result, voxelspacing, connectivity)\n hd95 = np.percentile(np.hstack((hd1, hd2)), 95)\n return hd95", "def cluster_membership_occupancy(data):\n \n \n \n n_clusters = len(set(data['clusters'])-{-1}) # since -1 element denotes noice\n\n if n_clusters == 0:\n membership=[Cluster_Membership_Features()]\n membership = pd.DataFrame([o.__dict__ for o in membership])\n areas=[Cluster_Area_Features()]\n areas = pd.DataFrame([o.__dict__ for o in areas])\n density=[Cluster_Density_Features()]\n density = pd.DataFrame([o.__dict__ for o in density])\n all_features = pd.concat([membership.reset_index(drop=True), areas.reset_index(drop=True),\n density], axis=1)\n \n elif n_clusters ==1:\n #obtain_total_cluster_areas_set_everything_else_to_default\n membership=[Cluster_Membership_Features()]\n membership = pd.DataFrame([o.__dict__ for o in membership])\n d = dict(tuple(data.groupby('clusters')))\n d.pop(-1, None)\n \n try:\n cluster_chull_areas=[ss.ConvexHull(np.column_stack([d[i]['X'].array,d[i]['Y'].array])).volume for i in d.keys()]\n except:\n cluster_chull_areas=[0,0,0]\n \n Total_cluster_area=np.sum(cluster_chull_areas)\n areas=[Cluster_Area_Features([Total_cluster_area,0,0,0,0,0,0,0,0])]\n areas = pd.DataFrame([o.__dict__ for o in areas])\n density=[Cluster_Density_Features()]\n density = pd.DataFrame([o.__dict__ for o in density])\n all_features = pd.concat([membership.reset_index(drop=True), areas.reset_index(drop=True),\n density], axis=1)\n \n elif n_clusters >1:\n #Summarizing the cluster membership distribution characteristics\n cluster_size_nums=np.delete(np.array(data.groupby(['clusters']).size()),0)\n (cluster_size_nums_avg,cluster_size_nums_min,cluster_size_nums_max,\n cluster_size_nums_std,cluster_size_nums_cv,cluster_size_nums_cd,\n cluster_size_nums_IQR,cluster_size_nums_Quartile_CD)= distribution_statistics(cluster_size_nums)\n\n #For each cluster calculate the area by calculating the area of the convex hull of cluster members\n # Note: concavehull implementation here might be a good addition as it will provide more imformative values. \n\n d = dict(tuple(data.groupby('clusters')))\n d.pop(-1, None)\n try:\n cluster_chull_areas=[ss.ConvexHull(np.column_stack([d[i]['X'].array,d[i]['Y'].array])).volume for i in d.keys()]\n except:\n cluster_chull_areas=[0,0,0,0,0]\n \n\n (avg_cluster_area,min_cluster_area,max_cluster_area,\n std_cluster_area,CV_cluster_area,CD_cluster_area,\n IQR_cluster_area,Quartile_CD_cluster_area)= distribution_statistics(cluster_chull_areas)\n Total_cluster_area=np.sum(cluster_chull_areas)\n\n #Calculate cluster density: number of nuclei/ convex area of cluster\n cluster_density=np.divide(cluster_size_nums,cluster_chull_areas)\n (avg_cluster_density,min_cluster_density,max_cluster_density,\n std_cluster_density,CV_cluster_density,CD_cluster_density,\n IQR_cluster_density,Quartile_CD_cluster_density)= distribution_statistics(cluster_density)\n\n #return dataframe of features\n membership=[Cluster_Membership_Features([cluster_size_nums_avg,cluster_size_nums_min,cluster_size_nums_max,\n cluster_size_nums_std,cluster_size_nums_cv,cluster_size_nums_cd,\n cluster_size_nums_IQR,cluster_size_nums_Quartile_CD])]\n membership = pd.DataFrame([o.__dict__ for o in membership])\n areas=[Cluster_Area_Features([Total_cluster_area,\n avg_cluster_area,min_cluster_area,max_cluster_area,\n std_cluster_area,CV_cluster_area,CD_cluster_area,\n IQR_cluster_area,Quartile_CD_cluster_area])]\n areas = pd.DataFrame([o.__dict__ for o in areas])\n density=[Cluster_Density_Features([avg_cluster_density,min_cluster_density,max_cluster_density,\n std_cluster_density,CV_cluster_density,CD_cluster_density,\n IQR_cluster_density,Quartile_CD_cluster_density])]\n density = pd.DataFrame([o.__dict__ for o in density])\n\n all_features = pd.concat([membership.reset_index(drop=True), areas.reset_index(drop=True),\n density], axis=1)\n return all_features", "def element_size(self):\n vecs = (\n self.nodes[self.elements[:, :4], :][:, 1:, :]\n - self.nodes[self.elements[:, :4], :][:, 0, None, :]\n )\n return np.abs(np.linalg.det(vecs)) / 6", "def calculate_enrichment_factor(enrichment, ranked_dataset_percentage_cutoff):\n\n # Keep only molecules that meet the cutoff\n enrichment = enrichment[\n enrichment[\"% ranked dataset\"] <= ranked_dataset_percentage_cutoff / 100\n ]\n # Get highest percentage of actives and the corresponding percentage of actives\n highest_enrichment = enrichment.iloc[-1]\n enrichment_factor = round(100 * float(highest_enrichment[\"% true actives identified\"]), 1)\n return enrichment_factor", "def media(self):\n self.kmeans = [[] for i in range(0,self.cluster_number)]\n for i in range(self.cluster_number):\n for j in range(0,len(self.cluster[i][0])):\n self.kmeans[i].append(np.sum(self.cluster[i][::,j:j+1:])/len(self.cluster[i][::,j:j+1:]))", "def modularity():\n\n q = 0.0\n for idx in range(0, node_count):\n if _tot[idx] > 0.0:\n q += (_in[idx] / m - math.pow(_tot[idx] / m, 2))\n return q", "def lscoreatpercentile (inlist, percent):\r\n if percent > 1:\r\n print \"\\nDividing percent>1 by 100 in lscoreatpercentile().\\n\"\r\n percent = percent / 100.0\r\n targetcf = percent*len(inlist)\r\n h, lrl, binsize, extras = histogram(inlist)\r\n cumhist = cumsum(copy.deepcopy(h))\r\n for i in range(len(cumhist)):\r\n if cumhist[i] >= targetcf:\r\n break\r\n score = binsize * ((targetcf - cumhist[i-1]) / float(h[i])) + (lrl+binsize*i)\r\n return score", "def compute_distortion(cluster_list, data_table):\n\tdistortion = 0\n\tfor cluster in cluster_list:\n\t\tdistortion += cluster.cluster_error(data_table)\n\treturn distortion", "def local_clustering_coefficient(graph, vertex):\r\n edge_count = 0\r\n for neighbour1 in graph[vertex]:\r\n for neighbour2 in graph[vertex]: #look at each pair of neighbours of vertex\r\n if neighbour1 in graph[neighbour2]: #if the neighbours are joined to each other by an edge\r\n edge_count += 1 #add one to the edge count\r\n degree = len(graph[vertex]) #count how many neighbours vertex has\r\n return edge_count / (degree * (degree - 1)) #note factor of 2 missing as each edge counted twice\r", "def compute_distortion(cluster_list, data_table):\r\n distortion = 0\r\n \r\n for cluster in cluster_list:\r\n distortion += cluster.cluster_error(data_table)\r\n\r\n return distortion", "def _calc_perc(arr: np.array, p: Sequence[float] = None):\n if p is None:\n p = [50]\n\n nan_count = np.isnan(arr).sum(axis=-1)\n out = np.moveaxis(np.percentile(arr, p, axis=-1), 0, -1)\n nans = (nan_count > 0) & (nan_count < arr.shape[-1])\n if np.any(nans):\n out_mask = np.stack([nans] * len(p), axis=-1)\n # arr1 = arr.reshape(int(arr.size / arr.shape[-1]), arr.shape[-1])\n # only use nanpercentile where we need it (slow performance compared to standard) :\n out[out_mask] = np.moveaxis(\n np.nanpercentile(arr[nans], p, axis=-1), 0, -1\n ).ravel()\n return out", "def berger_parker_d(counts):\n return counts.max()/float(counts.sum())", "def n_percentage_part(percentage_level, counted_od):\n total = 0.0\n od_num = 0\n percentage_od = count_to_percentage(counted_od)\n if percentage_level == 1.0:\n od_num = len(counted_od)\n else:\n for i in percentage_od:\n if total < percentage_level:\n total += i\n else:\n od_num = percentage_od.index(i)\n break\n return od_num", "def percent_passing(self) -> float:\n num_meas = Enumerable(self.mlc_meas).select_many(lambda m: m.passed).count()\n num_pass = (\n Enumerable(self.mlc_meas)\n .select_many(lambda m: m.passed)\n .count(lambda p: bool(p) is True)\n )\n return float(100 * num_pass / num_meas)", "def metric_value(x, y):\n return (sum(np.all(x == y, axis=1) * 1) / len(x)) * 100", "def gap_statistic(X, cluster_nums, ref_num=10, cluster_method=None):\n if cluster_method is None:\n def k_means(data, cluster_nums):\n \"\"\"\n http://scikit-learn.org/stable/modules/clustering.html#k-means\n \"\"\"\n from sklearn.cluster import KMeans\n\n labels_list = []\n for cluster_num in cluster_nums:\n kmeans = KMeans(cluster_num, random_state=0, n_init=10).fit(data)\n labels_list.append(kmeans.labels_ + 1)\n print('KMeans finished: {}'.format(cluster_num))\n return labels_list\n\n cluster_method = k_means\n\n print('Start: calculate W\\u2096s')\n Wks = []\n labels_list = cluster_method(X, cluster_nums)\n for labels in labels_list:\n Wks.append(elbow_score(X, labels))\n Wks = np.array(Wks)\n Wks_log = np.log(Wks)\n print('Finish: calculate W\\u2096s')\n\n print(\"Start: calculate references' W\\u2096s\")\n Wks_refs_log = []\n minimums = np.atleast_2d(np.min(X, axis=0))\n maximums = np.atleast_2d(np.max(X, axis=0))\n bounding_box = np.r_[minimums, maximums]\n for i in range(ref_num):\n X_ref = uniform_box_sampling(X.shape[0], bounding_box)\n labels_list_ref = cluster_method(X_ref, cluster_nums)\n Wks_ref_log = []\n for labels in labels_list_ref:\n Wks_ref_log.append(np.log(elbow_score(X_ref, labels)))\n Wks_refs_log.append(Wks_ref_log)\n print('Finish reference: {}/{}'.format(i+1, ref_num))\n print(\"Finish: calculate references' W\\u2096s\")\n\n print('Start: calculate gaps')\n Wks_refs_log = np.array(Wks_refs_log)\n Wks_refs_log_mean = np.mean(Wks_refs_log, axis=0)\n Wks_refs_log_std = np.std(Wks_refs_log, axis=0)\n gaps = Wks_refs_log_mean - Wks_log\n print('Finish: calculate gaps')\n\n print('Start: select optimal k')\n s = Wks_refs_log_std * np.sqrt(1 + 1.0 / ref_num)\n idx_selected = np.where(gaps[:-1] >= gaps[1:] - s[1:])[0][0]\n k_selected = cluster_nums[idx_selected]\n print('Finish: select optimal k')\n\n return labels_list, Wks, Wks_refs_log_mean, gaps, s, k_selected", "def compute_sufficient_stats(self):\n self.counts = (np.sum(self.resp, axis=0) + 10e-30)\n # print(self.counts)\n for k in range(self.k):\n self.means[k] = np.sum(self.resp[n, k] * self.x[n] for n in range(self.n)) / self.counts[k]\n self.covars[k] = np.sum(self.resp[n, k] * (self.x[n] - self.means[k]) @ (self.x[n] - self.means[k]).T\n for n in range(self.n)) / self.counts[k]\n self.covars[k] = np.nan_to_num(self.covars[k])\n self.means[k] = np.nan_to_num(self.means[k])", "def fraction_mislabeled_nodes(labels, labels_pred):\n G1 = partition_indicator(labels)\n G2 = partition_indicator(labels_pred)\n\n # cost is minimized, overlap maximized\n cost_matrix = -G1.T.dot(G2)\n row_ind, col_ind = linear_sum_assignment(cost_matrix.A)\n cost = -cost_matrix[row_ind, col_ind].sum()\n\n return 1 - (cost / len(labels))", "def clusterValues( values, relS=0.1 , refScaleAbs='range' ):\n if len(values)==0:\n return []\n if len(values.shape)==1:\n sortedV = numpy.stack([ values , numpy.arange(len(values))] ,1)\n else:\n # Assume value.shape = (N,2) and index are ok\n sortedV = values \n sortedV = sortedV[ numpy.argsort(sortedV[:,0]) ]\n\n sortedVV = sortedV[:,0]\n refScale = sortedVV[-1]-sortedVV[0]\n #sortedVV += 2*min(sortedVV)) # shift to avoid numerical issues around 0\n\n #print sortedVV\n class Cluster:\n def __init__(self, delta, sum, indices):\n self.delta = delta\n self.sum = sum\n self.N=len(indices)\n self.indices = indices\n def size(self):\n return self.delta/refScale\n \n def combine(self, c):\n #print ' combine ', self.indices[0], c.indices[-1], ' -> ', sortedVV[c.indices[-1]] - sortedVV[self.indices[0]]\n newC = Cluster(sortedVV[c.indices[-1]] - sortedVV[self.indices[0]],\n self.sum+c.sum,\n self.indices+c.indices)\n return newC\n\n def originIndices(self):\n return tuple(int(sortedV[i][1]) for i in self.indices)\n\n def size_local(self):\n return self.delta / sum( sortedVV[i] for i in self.indices) *len(self.indices)\n def size_range(self):\n return self.delta/refScale\n def size_abs(self):\n return self.delta\n\n if refScaleAbs=='range':\n Cluster.size = size_range\n elif refScaleAbs=='local':\n Cluster.size = size_local\n elif refScaleAbs=='abs':\n Cluster.size = size_abs\n \n class ClusterPair:\n next=None\n prev=None\n def __init__(self, c1, c2 ):\n self.c1=c1\n self.c2=c2\n self.refresh()\n def refresh(self):\n self.potentialC =self.c1.combine(self.c2)\n self.size = self.potentialC.size()\n def setC1(self, c1):\n self.c1=c1\n self.refresh()\n def setC2(self, c2):\n self.c2=c2\n self.refresh()\n \n #ave = 0.5*(sortedVV[1:,0]+sortedV[:-1,0])\n #deltaR = (sortedV[1:,0]-sortedV[:-1,0])/ave\n\n cList = [Cluster(0,v,(i,)) for (i,v) in enumerate(sortedVV) ]\n cpList = [ ClusterPair( c, cList[i+1] ) for (i,c) in enumerate(cList[:-1]) ]\n resetPrevNextSegment( cpList )\n\n #print cpList\n def reduceCL( cList ):\n if len(cList)<=1:\n return cList\n cp = min(cList, key=lambda cp:cp.size) \n #print '==', cp.size , relS, cp.c1.indices , cp.c2.indices, cp.potentialC.indices\n\n while cp.size < relS:\n if cp.next:\n cp.next.setC1(cp.potentialC)\n cp.next.prev = cp.prev\n if cp.prev:\n cp.prev.setC2(cp.potentialC)\n cp.prev.next = cp.next\n cList.remove(cp)\n if len(cList)<2:\n break\n cp = min(cList, key=lambda cp:cp.size) \n #print ' -----> ', [ (cp.c1.indices , cp.c2.indices) for cp in cList]\n return cList\n\n cpList = reduceCL(cpList)\n if len(cpList)==1:\n cp = cpList[0]\n if cp.potentialC.size()<relS:\n return [ cp.potentialC.originIndices() ]\n #print cpList\n if cpList==[]:\n return []\n finalCL = [ cp.c1.originIndices() for cp in cpList ]+[ cpList[-1].c2.originIndices() ]\n return finalCL" ]
[ "0.7153438", "0.704848", "0.6598043", "0.65899014", "0.65830696", "0.6483606", "0.6449112", "0.6421873", "0.63327456", "0.62887275", "0.6227568", "0.6170871", "0.61646694", "0.6152447", "0.6065469", "0.6052555", "0.6034332", "0.60112214", "0.6000157", "0.5957048", "0.5954154", "0.59512043", "0.5926732", "0.5926423", "0.5921683", "0.5915802", "0.5901626", "0.5896818", "0.58966404", "0.58849305", "0.58721614", "0.587037", "0.5857933", "0.5845804", "0.5840319", "0.5836977", "0.5822742", "0.5812218", "0.5797974", "0.579589", "0.574924", "0.5745349", "0.5733645", "0.5727552", "0.57212174", "0.57196945", "0.57092184", "0.5706276", "0.5703501", "0.57015884", "0.5688196", "0.5687761", "0.56852114", "0.56756467", "0.567427", "0.5668201", "0.5641451", "0.5633625", "0.56327474", "0.5632171", "0.56264544", "0.5624381", "0.56226003", "0.56129193", "0.5608246", "0.55882025", "0.5583245", "0.55824345", "0.5578064", "0.5577199", "0.55747426", "0.55712044", "0.55575585", "0.55500025", "0.5549561", "0.55480504", "0.55465287", "0.5534835", "0.5532562", "0.55306304", "0.5517971", "0.55165815", "0.55107313", "0.5507788", "0.5504864", "0.55045176", "0.5500046", "0.5497609", "0.5493939", "0.5493054", "0.5489692", "0.54893225", "0.5486331", "0.5486219", "0.54861754", "0.54842144", "0.5482547", "0.54811805", "0.5479472", "0.5476514" ]
0.6579574
5
Returns the percent of noise elements in the dataset.
def analysis_function_noise_level(self, clustering, total_elements): return 100.-(clustering.total_number_of_elements/float(total_elements))*100.
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_noise_level(self, data):\n noise = max(data)\n noise_min = 2600\n noise_max = 4095\n ratio = (noise - noise_min)/(noise_max - noise_min)\n return int(ratio*100)", "def getNoiseVar(img,fraction=0.95):\n last_val = np.percentile(img,fraction)\n #si(img<last_val,title=\"Pixel values considered as noise\")\n return np.var(img[img<last_val])", "def noise(self):\n # Extract parameters\n pzs = self.params[0]\n ngals = np.array([pz.gals_per_steradian for pz in pzs])\n return 1.0 / ngals", "def calculate_sample_silhouette(self):\n sum_samples = 0\n for cluster in self.cluster_lst:\n sum_samples += self.sum_silhouette(cluster)\n sample_size = len(self.samples)\n return sum_samples/sample_size", "def noise_level(data):\n length=len(data) - 2\n dev=[]\n for i in range(1,length - 1):\n dev.append((abs(data[i] - data[i-1]) + abs(data[i] - data[i + 1]))/2)\n dev.sort()\n return dev[round(0.9*length)]", "def noise(self, freq: int, /) -> None:", "def mdape(self) -> float:\n return float(np.median(np.abs(self._percentage_error())) * 100)", "def density(self):\r\n return self.count_ones() / float(self.xspan * self.yspan)", "def rmdspe(self) -> float:\n return float(np.sqrt(np.median(np.square(self._percentage_error()))) * 100.0)", "def noise(self):\n # Extract parameters\n pzs = self.params[0]\n # retrieve number of galaxies in each bins\n ngals = np.array([pz.gals_per_steradian for pz in pzs])\n if isinstance(self.config[\"sigma_e\"], list):\n sigma_e = np.array([s for s in self.config[\"sigma_e\"]])\n else:\n sigma_e = self.config[\"sigma_e\"]\n return sigma_e ** 2 / ngals", "def mean(self):\r\n\t\treturn sum(self.sample)/len(self.sample)", "def var(x):\n length = len(x)\n\n if length == 0:\n return None\n result = 0.0\n m = TinyStatistician.mean(x)\n for i in x:\n result += (i - m) ** 2\n\n return result / length", "def prob1(n):\n#raise NotImplementedError(\"Problem 1 Incomplete\")\n if n == 0 :\n raise ValueError(\"Sampling 0 points is not defined.\")\n total = 0\n for i in xrange(n) :\n if np.random.normal() > 3 :\n total += 1\n return float(total)/n", "def get_percentage_missing(series):\n num = series.isnull().sum()\n den = len(series)\n return round(num / den, 2)", "def get_percentage_missing(series):\n num = series.isnull().sum()\n den = len(series)\n return round(num / den, 2)", "def get_percent_wet():\n # Create an ADS1115 ADC (16-bit) instance.\n adc = Adafruit_ADS1x15.ADS1115()\n\n GAIN = 1\n DRY = 20280 # 100% Dry\n WET = 10140 # 100% Wet\n\n value = adc.read_adc(0, gain=GAIN)\n \n # print \"value: %d\" % value\n \n percent_dry = ((value - WET)*100)/(DRY-WET)\n percent_wet = 100 - percent_dry\n\n return percent_wet", "def get_percentage_missing(series):\n num = series.isnull().sum()\n den = len(series)\n return round(num/den*100, 2)", "def noise(self):\n return self._noise", "def sampling_ratio(self):\n return self.coincidences / self.n", "def get_percent(self, n):\n controlled = 0.00\n for i in range(len(self.tile_contents)):\n if(self.tile_contents[i].player_number == n):\n controlled += 1.00\n \n return float(controlled / self.paint_blocks)", "def noiseFraction(truth_i3, measured_i3, tolerance):\n if (measured_i3.getNumberMolecules() == 0):\n return [0, truth_i3.getNumberMolecules()]\n \n noise_locs = 0\n total_locs = 0\n for i in range(truth_i3.getNumberFrames()):\n t_locs = truth_i3.getMoleculesInFrame(i+1)\n m_locs = measured_i3.getMoleculesInFrame(i+1, good_only = False)\n \n dist = utilC.peakToPeakDist(m_locs['xc'], m_locs['yc'], t_locs['xc'], t_locs['yc'])\n\n noise_locs += numpy.count_nonzero((dist > tolerance))\n total_locs += dist.size\n\n return [noise_locs, total_locs]", "def prob1(n):\n\n # create a giant draw from a normal distribution\n random_draws = np.random.normal(loc= 0, scale = 1, size = n)\n\n # mask the values\n mask = random_draws > 3\n\n return np.sum(mask)/float(n)", "def _ion_densities_datafiles(self):\n ne = self.ne_in\n nD = self.ni_in[0,:]\n nC = (ne-nD)/6.\n print(\"nC/nD: \"+str(np.mean(nC/nD)*100.)+\" %\")\n self.ni_in[0,:] = nD\n self.ni_in[1,:] = nC", "def get_estimated_noise(self):\n return self.gp_core.noise_var", "def add_noise(self, noise):\n if noise > 0.0:\n for key in self.counts:\n self.counts[key] *= 1.0 + noise * np.random.random_sample()", "def smape(self) -> float:\n _temp = np.sum(2 * np.abs(self.predicted - self.true) / (np.abs(self.true) + np.abs(self.predicted)))\n return float(100 / len(self.true) * _temp)", "def percentages(self) -> pandas.Series:\n if self._percentages is None:\n scalar = 1 if self.use_fraction else 100\n self._percentages = scalar * self.counts/self.total\n return self._percentages", "def percent_passing(self) -> float:\n num_meas = Enumerable(self.mlc_meas).select_many(lambda m: m.passed).count()\n num_pass = (\n Enumerable(self.mlc_meas)\n .select_many(lambda m: m.passed)\n .count(lambda p: bool(p) is True)\n )\n return float(100 * num_pass / num_meas)", "def density(self, x):\n\t\tN = len(self.train_data)\n\t\tpoints = list(self.train_data)\n\t\tdists = [np.linalg.norm(x-point)**2 for point in points]\n\t\texps = [np.exp(-dist / (2 * (self.bandwidth ** 2))) for dist in dists]\n\t\tunnormalized_sum = sum(exps)\n\t\tprobability = (1 / N) * self.normalizing_constant() * unnormalized_sum\n\t\treturn probability", "def prob(self, w):\n return self.counts[w] / self.total_count", "def get_noise_distribution(corpus: List[str],\n vocabulary: np.ndarray,\n dist_alpha: float\n ) -> List[int]:\n all_words = [word for text in corpus for word in text]\n arr = np.array(list(map(\n lambda x: all_words.count(x)**dist_alpha, vocabulary\n )))\n return arr/arr.sum() # frequencies, normalised, in order of vocabulary", "def mean(data):\n n = len(data)\n return sum(data)/float(n)", "def estimate_noiseperbl(data):\n\n # define noise per baseline for data seen by detect_bispectra or image\n datamean = data.mean(axis=2).imag # use imaginary part to estimate noise without calibrated, on-axis signal\n noiseperbl = datamean.std() # measure single noise for input to detect_bispectra\n logger.debug('Measured noise per baseline of {0:.3f}'.format(noiseperbl))\n return noiseperbl", "def nats(self) -> float:\n return self.entropy()", "def theoretical_effective(dataset):\n return float(sum(dataset))/len(dataset)", "def dominance(counts):\n freqs = counts/float(counts.sum())\n return (freqs*freqs).sum()", "def mean(data):\n n = len(data)\n if n < 1:\n return 0\n return sum(data)/float(n)", "def get_percent_interest(self):\n return self.__percentage_interest", "def density(self):\n return self.nnz / self.size", "def pct(self):\n\t\treturn self.bottle.pct()", "def prob4():\n\n\n N = 500000\n random_draws = np.random.multivariate_normal(mean = [-1,1], cov =[[1,0],[0,1]], size = N)\n\n h = lambda x: x[0] < -1 and x[1] > 1\n f = lambda x: stats.multivariate_normal(mean = [ 0, 0]).pdf(x)\n g = lambda x: stats.multivariate_normal(mean = [-1, 1]).pdf(x)\n\n probability = [h(random_draws[i]) * f(random_draws[i]) / g(random_draws[i]) for i in range(N)]\n\n return 1./N * np.sum(probability)", "def get_estimated_noise(self):\n return self.gp_core.likelihood.noise.item()", "def probability(self, samples):\n pass", "def _ion_densities(self):\n nD = self.ne_in*(6-self.zeff_in)/(5.)\n nC = self.ne_in*(self.zeff_in-1)/(30.)\n nC[np.where(nC<0)]=0.\n print(\"nC/nD: \"+str(np.mean(nC/nD)*100.)+\" %\")\n self.ni_in[0,:] = nD\n self.ni_in[1,:] = nC", "def get_probability_loss(self):\n return sum(self._loss)/len(self._loss)", "def variation_statistic(gene_data: pd.DataFrame) -> pd.Series:\n statistic = gene_data.std(axis=1) / gene_data.mean(axis=1)\n # statistic = gene_data.std(axis=1)\n # TODO How to deal with 0 expressed genes? Are they informative?????\n return statistic.replace(np.nan, 0)", "def ds_ratio(group):\n nix_count = (group=='nix').sum()\n top_count = (group=='top').sum()\n ratio = nix_count/(nix_count+top_count) #could smooth this\n return ratio", "def addNoise_amp(array,counts):\r\n if array.dtype == 'complex' :\r\n arrayout = addNoise(np.real(array),counts) + 1.0J * addNoise(np.imag(array),counts)\r\n else :\r\n if np.float64(counts) == 0.0e0 :\r\n arrayout = np.copy(array)\r\n elif np.float64(counts) < 0.0e0 :\r\n print 'bg.addNoise : warning counts < 0'\r\n else :\r\n arrayout = np.zeros(array.shape)\r\n arrayout = np.square(normalise(array))\r\n arrayout = np.random.poisson(arrayout*np.float64(counts))/np.float64(counts)\r\n arrayout = np.sqrt(arrayout)\r\n tot = np.sum(np.abs(array)**2)\r\n arrayout = normalise(arrayout,tot)\r\n return arrayout", "def get_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_rating() / self.field.range)", "def find_scales_dominant(wav, no_good, dataset=None):\n dataset_dic = {\n\n 'MFG' : -17, # purely empirical, sorry\n 'MSG' : -8,\n 'GRIDSAT' : -20,\n 'neutral' : 0\n }\n\n wll = wav['t']\n\n power_img = np.sum(wll, axis=0)\n power_img[no_good] = 0\n\n\n smaller = dataset_dic[dataset]\n thresh_p = np.sum((wav['scales'] + smaller) ** .5) # set different power threshold adjustments to datasets\n try:\n power_img[(power_img < np.percentile(power_img[power_img > 1], 25)) | (power_img < (thresh_p))] = 0\n except IndexError:\n return\n\n labels, numL = label(power_img)\n u, inv = np.unique(labels, return_inverse=True)\n\n for inds in u:\n if inds == 0:\n continue\n\n arr = power_img.copy()\n arr[np.where(labels != inds)] = 0\n power_img.flat[np.argmax(arr)] = -999\n\n return power_img", "def qc_NoiseRank(spread):\n\n variance = spread*spread # for a Gaussian\n \n if (variance <= 0.2):\n qc_label = 'good'\n elif (0.2 < variance < 0.25):\n qc_label = 'ok'\n else:\n qc_label = 'bad'\n \n return qc_label", "def calculate_probability(self):\n return 0", "def density(self):\n return self.nnz/self.dim", "def get_noise(self):\n\n n = self.qubic.get_noise().ravel()\n n = np.r_[n, self.planck.get_noise().ravel()]\n\n return n", "def trimean(data):\n p_25, p_50, p_75 = percentile(data, [25, 50, 75], axis=0)\n\n return (p_25 + 2 * p_50 + p_75) / 4", "def samplePercNaN(df, specie = \"Caenorhabditis elegans OX=6239\", sample = \"S01\"):\n spec = rawSpecies(df = df, specie = specie)\n nonDecoy = spec[spec[\"EG.IsDecoy\"] == False] \n sample = sample\n sampleDat = nonDecoy[nonDecoy[\"R.Condition\"].str[-3:] == sample] \n perc_NaN = sampleDat[\"PG.Quantity\"].isna().sum() / (sampleDat[\"PG.Quantity\"].count() + sampleDat[\"PG.Quantity\"].isna().sum())\n return perc_NaN", "def addNoise(array,counts):\r\n if array.dtype == 'complex' :\r\n arrayout = addNoise(np.real(array),counts) + 1.0J * addNoise(np.imag(array),counts)\r\n else :\r\n if np.float64(counts) == 0.0e0 :\r\n arrayout = np.zeros(array.shape, dtype=arrayout.dtype)\r\n elif np.float64(counts) < 0.0e0 :\r\n print 'bg.addNoise : warning counts < 0'\r\n elif np.float64(counts) > 1.0e9 :\r\n arrayout = np.zeros(array.shape)\r\n arrayout = normaliseInt(array)\r\n arrayout = np.random.normal(arrayout*np.float64(counts),np.sqrt(arrayout*np.float64(counts)))/np.float64(counts)\r\n tot = np.sum(array)\r\n arrayout = normaliseInt(arrayout,tot)\r\n else :\r\n arrayout = np.zeros(array.shape)\r\n arrayout = normaliseInt(array)\r\n arrayout = np.random.poisson(arrayout*np.float64(counts))/np.float64(counts)\r\n tot = np.sum(array)\r\n arrayout = normaliseInt(arrayout,tot)\r\n return arrayout", "def number_densities(self) -> u.m**-3:\n try:\n return (self.n_elem * self.ionic_fractions).to(u.m**-3)\n except Exception: # noqa: BLE001\n return np.full(self.atomic_number + 1, np.nan) * u.m**-3", "def noiseFraction(truth_h5, measured_h5, tolerance):\n if (measured_h5.getNLocalizations() == 0):\n return [0, truth_h5.getNLocalizations()]\n \n noise_locs = 0\n total_locs = 0\n for i in range(truth_h5.getMovieLength()):\n t_locs = truth_h5.getLocalizationsInFrame(i)\n m_locs = measured_h5.getLocalizationsInFrame(i)\n\n if bool(t_locs) and bool(m_locs):\n dist = iaUtilsC.peakToPeakDistAndIndex(m_locs['x'], m_locs['y'],\n t_locs['x'], t_locs['y'],\n max_distance = tolerance)[0]\n\n noise_locs += numpy.count_nonzero((dist < 0.0))\n total_locs += dist.size\n elif bool(m_locs):\n noise_locs += m_locs['x'].size\n total_locs += m_locs['x'].size\n\n return [noise_locs, total_locs]", "def noiseReduction(self):\n pass", "def stdProbabilityNorm(self):\n return 0.5", "def entropy(self):\r\n return 1/2 * (self.dim * (_LOG_2PI + 1) + self._log_det_cov)", "def sample_percent(self, percentage):\n count = int(len(self.features) * (percentage / 100))\n indices = np.random.randint(0, high=len(self.features), size=count)\n return ProcessedImageData(self.features[indices], self.labels[indices], indices)", "def pc_nproduced_avg(self) -> \"float\":\n return _beamforming_swig.randomsampler_sptr_pc_nproduced_avg(self)", "def variance(self):\n return 1 / self.count() * sum((number-self.average())**2 for number in self.numbers)", "def white_noise():\n return random.randint(-32767, 32767)", "def chance_of_rain(self):\r\n raise NotImplementedError", "def menhinick(counts):\n return observed_species(counts)/sqrt(counts.sum())", "def get_estimated_noise(self):\n raise NotImplementedError('Abstract Method.')", "def score(self, data):\n return np.mean( np.log( mvn.getSamplePointDensity(self.dataFrame_, self.H_, pd.DataFrame(data)) ) )", "def nanPercentage(df):\n rows = df.shape[0]\n cols = df.shape[1]\n tot = rows*cols\n \n nanNum = 0\n for i in range(df.shape[0]):\n nanNum = nanNum + np.sum ( pd.isnull(df.iloc[i]) )\n logger.debug ('nan %d tot %d ' % (nanNum, tot) )\n perc = (100*nanNum) / (tot * 1.0)\n return perc", "def percent(self):\r\n return self._percent", "def dissimilarity(clusters):\n totDist = 0\n for c in clusters:\n totDist += c.variability()\n return totDist", "def tail_ratio(returns):\n\n return np.abs(np.percentile(returns, 95)) / \\\n np.abs(np.percentile(returns, 5))", "def die(world, percentage):\n\n \n infected = np.sum((world >= 1) & (world <= 10))\n to_die = percentage * infected\n if to_die < 1:\n to_die = 0\n else:\n to_die = to_die\n to_die = np.round(to_die).astype(int)\n\n\n indizes = [] # Für die Koordinaten der infizierten Zellen\n for i, v in np.ndenumerate(world):\n if v in range(1, 11):\n indizes.append(i)\n #Ziehe Stichprobe aus den infizierten Zellen und setze sie auf 300\n sample = random.sample(indizes, to_die)\n for i in sample:\n world[i] = 300\n \n return world", "def mape(self) -> float:\n return float(np.mean(np.abs((self.true - self.predicted) / self.true)) * 100)", "def population_density(self) -> float:\n return self.population / self.area", "def get_percent(self):\n return self.percent", "def pct_bust(data):\n return round((data[\"new_total\"] > 21).sum() / len(data), 3)", "def get_fraction_significant_p_value_gray_screen(group):\n fraction_significant_p_value_gray_screen = len(group[group.p_value_gray_screen < 0.05]) / float(len(group))\n return pd.Series({'fraction_significant_p_value_gray_screen': fraction_significant_p_value_gray_screen})", "def main():\n x, y = create_simple_classification_dataset(5)\n count = 0\n for i in y:\n if i[0] == 0:\n count += 1\n print('percent of zeros', count / len(y)) # confirm that the distribution isn't too skewed.", "def noise_profile(nf, gain, ffs, df):\n\n h_mWThz = 1e-3 * h * (1e14)**2\n nf_lin = db2lin(nf)\n g_lin = db2lin(gain)\n ase = h_mWThz * df * ffs * (nf_lin * g_lin - 1)\n asedb = lin2db(ase)\n\n return asedb", "def entropy(self):\n n = len(self.y)\n sum_ = 0\n for i in np.unique(self.y):\n v = len(self.y[self.y == i])\n sum_ += -((v/n) * log2(v/n))\n return sum_", "def copyCounterWithNoise(self, counter, prob):\n \"\"\"\n counter = deepcopy(counter)\n for i in range(self.nObjectTypes):\n noise = np.random.randint(1,5)\n if np.random.random() < prob:\n if np.random.random() > 0.5:\n counter[i] += noise # false detections\n else:\n counter[i] -= noise # missed detections\n if counter[i] < 0:\n counter[i]=0\n return counter\n \"\"\"\n counter = deepcopy(counter)\n for i in range(self.nObjectTypes):\n noise = np.random.randint(1,5)\n if np.random.random() > prob:\n continue\n if counter[i] == 0:\n # Noise could only be +1\n counter[i] = noise\n else:\n # Noise could be either +1 or -1\n if np.random.random() > 0.5:\n counter[i] += noise\n else:\n counter[i] -= noise\n if counter[i] < 0:\n counter[i]=0\n return counter", "def percent_frequencies(self):\n word_count = 0\n local = self.frequencies()\n for key in local.keys():\n i = local[key]\n word_count += int(i)\n for key in local.keys():\n i = local[key]\n percentage = float(i) / float(word_count)\n local[key] = percentage\n return local", "def normpdf(x, mean, sd):\n var = float(sd)**2\n denom = (2*math.pi*var)**.5\n num = math.exp(-(float(x)-float(mean))**2/(2*var))\n return num/denom", "def test_noise_equiv_bandwidth():\n win = windows.blackmanharris(2000)\n assert np.isclose(2, 1.0 / utils.noise_equivalent_bandwidth(win), rtol=1e-2)", "def fracSeen(data):\n itemSet = set()\n numSeen = []\n for x in data:\n itemSet.add(x)\n numSeen.append(len(itemSet))\n\n fracSeen = [x/numSeen[-1] for x in numSeen]\n return fracSeen", "def get_mean(data, n=-1):\n \n return round((sum(data)/n),1)", "def entropy(data):\n strings, lens = Counter(data), np.float(len(data))\n return -sum(count / lens * np.log2(count / lens) for count in strings.values())", "def pulse_width_percent(self) -> float:", "def _basic_probability(count: int, sequence_total_count: int) -> float:\n return float(count) / sequence_total_count", "def __measurement_prob(angle, measurement, noise):\n return ParticleFilter.Gaussian(angle, noise, measurement)", "def percent_processed(self):\n try:\n return (self.pos / self.data_encap.size) * 100.0\n except ZeroDivisionError:\n return 100.0", "def percentage(my_list, item):\n return 100.0 * frequency(my_list, item)", "def intensity(self) -> int:", "def pc_noutput_items_avg(self) -> \"float\":\n return _beamforming_swig.randomsampler_sptr_pc_noutput_items_avg(self)", "def mean_deviation(self):\r\n\t\t_mean = sum(self.sample)/len(self.sample)\r\n\t\treturn sum(map(lambda x: abs(x - _mean), self.sample))/len(self.sample)", "def salt_and_pepper_noise(image, prob):\n output = np.zeros(image.shape,np.uint8)\n thres = 1 - prob \n for i in range(image.shape[0]):\n for j in range(image.shape[1]):\n rdn = random.random()\n if rdn < prob:\n output[i][j] = 0\n elif rdn > thres:\n output[i][j] = 255\n else:\n output[i][j] = image[i][j]\n return output", "def sampling_frequency(self) -> int:\n return int(1 / self.x_scale)" ]
[ "0.68183047", "0.67847115", "0.6347133", "0.62229025", "0.61716366", "0.61085075", "0.6024723", "0.59983605", "0.59494674", "0.5946805", "0.5945206", "0.5942025", "0.5883616", "0.58745587", "0.58745587", "0.5860042", "0.5856353", "0.5841199", "0.5834648", "0.57891965", "0.5787481", "0.5779676", "0.5754733", "0.5739616", "0.5738536", "0.5734329", "0.5733115", "0.57128024", "0.5706441", "0.5677128", "0.56746167", "0.56584126", "0.565517", "0.5646461", "0.56399137", "0.56369793", "0.56289685", "0.56271696", "0.5621887", "0.5619539", "0.5602273", "0.5588275", "0.558633", "0.55799896", "0.5562771", "0.55593", "0.55535597", "0.55517733", "0.55502284", "0.5539951", "0.553784", "0.55372566", "0.5527414", "0.5524691", "0.55224955", "0.55221075", "0.55201876", "0.55174595", "0.55141395", "0.551336", "0.55103344", "0.5485136", "0.5482127", "0.5481422", "0.5480001", "0.54757494", "0.5472683", "0.5470905", "0.5467766", "0.5466083", "0.5464174", "0.5460244", "0.5458011", "0.5451941", "0.5450008", "0.5448966", "0.5446524", "0.5443406", "0.5435541", "0.5430954", "0.54283017", "0.5427817", "0.5424463", "0.54225093", "0.5421148", "0.541611", "0.5413816", "0.5412646", "0.5412563", "0.54112935", "0.54090303", "0.5405418", "0.54045916", "0.5403542", "0.5402787", "0.53958315", "0.53947634", "0.53865856", "0.53851086", "0.53808147" ]
0.70178664
0
Returns the mean cluster size.
def analysis_function_mean_cluster_size(self,clustering): sizes = get_cluster_sizes(clustering.clusters)[1] return numpy.mean(sizes)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mean_cluster(self, labelled_cluster):\n sum_of_points = self.sum_cluster(labelled_cluster)\n size_cluster = len(labelled_cluster)\n if self.sigma_cl1:\n size_cluster += np.sqrt(2)*self.sigma_cl1*np.random.randn()\n mean_of_points = sum_of_points * (1.0 / size_cluster)\n return mean_of_points", "def meanContigLength(self):\n\t\tstats = self.scores()\n\t\treturn stats['meanContig']", "def avgsize_c(self):\n return self._avgsize_c", "def clusterSize(l, scheme, clustertype='fluid'):\n clist = findClusters(l, scheme, clustertype)\n \n avglists=[]\n for i in clist:\n avglist=[]\n for l in i:\n avglist.append(np.mean(l))\n avglists.append(np.mean(avglist))\n return avglists", "def cluster_count(self) -> int:\n cluster_count = max(1, round(16**3 * (self.vein.purity / 100.0) / self.cluster_size))\n return self.distribution.scale_cluster_count(cluster_count)", "def mean(self):\n mean = sum(self.data)/self.size\n return mean", "def get_cluster_sizes(self,sc):\n\n clusterSizes = []\n clusters = sc.labels\n for k in np.arange(clusters.max()):\n clusterSizes.append(len(np.where(clusters==k)[0])) \n \n return clusterSizes", "def get_cluster_count(self) -> int:\n return len(self.get_all_cluster_ids())", "def avg_sil_width(cluster_vec, centroid):\n tot_dis = 0\n for i in xrange(len(cluster_vec)):\n cos_dis = cos_sim(cluster_vec[i], centroid)\n tot_dis = tot_dis + cos_dis\n return tot_dis/len(cluster_vec)", "def meshsize_avg(self):\n nspans = self.numspans\n support = abs(self.kv[-1] - self.kv[0])\n return support / nspans", "def average_consensus(self, cluster):\n\t\tcenterk = 0\n\t\tindex = 0\n\t\tfor value in cluster:\n\t\t\tcenterk += value\n\t\t\tindex += 1\n\t\tcenterk = centerk / index\n\t\treturn centerk", "def average_size(self):\n sizes = []\n for i in range(self.params.num_trees):\n with tf.device(self.device_assigner.get_device(i)):\n sizes.append(self.trees[i].size())\n return tf.reduce_mean(tf.pack(sizes))", "def n_clusters(self):\n return len(self.clusters)", "def graph_data_size_avg(self) -> float:\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)", "def group_size_dist(self):\n return self.group_sizes() / self.group_sizes().sum()", "def avg_net(self) -> float:\n return torch.mean(self.units.net)", "def average_city_size(self):\r\n average = 0\r\n total = 0\r\n for code, node in self.vertices.items():\r\n average += node.population\r\n total += 1\r\n return average // total", "def ensemble_mean(self):\n return self.mean(dim='mem')", "def analysis_function_num_clusters(self,clustering):\n return len(clustering.clusters)", "def analysis_function_total_elements(self,clustering):\n return clustering.total_number_of_elements", "def n_clusters(self):\n return self.model.n_clusters", "def getCentroid(cluster):\n try:\n return np.mean(cluster, axis = 0)\n except:\n return None", "def get_cluster_average(cls, indices, dist_mat):\n distances = cls.get_all_distances(indices, dist_mat)\n return np.mean(distances)", "def gal_count(clusters):\n sum = 0\n for x in clusters:\n sum += x.ngal\n return sum", "def avgcpu(self):\n return (self._total_cpu['value'] / self._total_cpu['count']) if self._total_cpu['count'] else 0", "def extract_cluster_size(line):\r\n cluster_size = line.split(\":\")[-1]\r\n\r\n try:\r\n cluster_size = int(cluster_size)\r\n except ValueError:\r\n return 0\r\n return cluster_size", "def get_clust_cent(self):\r\n\r\n return self.__clust_cent", "def cf_mean(self):\n return self['capacity_factor'] / 100", "def cf_mean(self):\n return self['capacity_factor'] / 100", "def mean(self):\n return self._mean", "def mean(self):\n return self._mean", "def get_total(self):\n return int(self.total_cores)", "def get_total_n_cpu(self) -> int:", "def mean(self) -> float:\n return self._data.mean()", "def calculate_cluster_size(result, var):\n \n cluster_results=pd.DataFrame(result[var].value_counts())\n ratio=np.round(cluster_results/cluster_results.sum()*100, 2).rename(columns={var:\"ratio\"})\n return cluster_results.join(ratio)", "def meanZmArea(self):\n sumArea = 0\n for site in self.sites:\n sumArea = sumArea + site.siteZmArea\n meanArea = sumArea / self.countSites()\n return meanArea", "def get_mean(self):\n return self.serie.mean()", "def avg_hops(self):\n return self._avg_hops", "def kmean(X,initial_centroids,max_iters):\n m = np.size(X,0)\n K = np.size(initial_centroids,0)\n centroids = initial_centroids\n idx = np.zeros((m,1))\n for i in range(1,max_iters):\n idx = nearest_cluster(X,centroids)\n centroids = update_centroids(X,idx,K)\n return centroids,idx", "def mean(self) -> float:\n return self._interval_sum / len(self.intervals)", "def get_avg_word_length(self):\n words = self.blob.words\n average_word_length = np.mean(np.array([len(word) for word in words]))\n return average_word_length", "def compute_centroid(data):\n return sum(data[:]) / len(data)", "def mean_value(self):\n\n return self._system.mean()", "def k_mean_cluster(num_anchors, boxes, convergence_threshold=1e-9):\n # Randomly pick 5 boxes as centroids\n centroids = np.array(random.sample(boxes, k=num_anchors))\n # Clustering until reaching loss_convergence threshold\n centroids, prev_errors = k_mean(boxes, centroids)\n while True:\n centroids, errors = k_mean(boxes, centroids)\n if abs(errors - prev_errors) <= convergence_threshold:\n break\n else:\n prev_errors = errors\n\n avg_iou = np.mean(np.max(compute_iou(boxes, centroids), axis=-1))\n return centroids, avg_iou", "def meanMolarMass(self):\n return _cantera.phase_meanmolwt(self._phase_id)", "def get_average_neighbors(self,radius):\n return np.mean([agent.n_neighbors(radius) for agent in self.agents])", "def mean_radius(self):\n return self._mean_radius", "def avg_packet_size(self):\n result = 0\n try:\n result = sum(self.fcip_doc['packet_lengths'])/float(len(self.fcip_doc['packet_lengths']))\n except:\n pass\n return result", "def getMean(self):\n return self.mean", "def cluster_cal(self):\n self.Cluster = []\n for i in range(self.nodenum):\n neighborhood_node = self.neighbor_node(i)\n Node_num = len(neighborhood_node)\n Count = self.neighbor_edge(neighborhood_node)\n if(Node_num == 0 or Node_num == 1):\n self.Cluster.append(0.5)\n else:\n self.Cluster.append(Count/(Node_num*(Node_num - 1)))\n \n self.cluster_coeff = np.average(self.Cluster)", "def average_length_of_gene(self):\n return sum([len(e) for e in self.population]) / len(self.population)", "def get_mean_degree(self):\n\n return np.mean(self.graph.degree())", "def cluster_means_scaled(self):\n if self.evaluate_by is not None:\n return(self.merged_scaled_data.groupby(\n 'labels').mean().sort_values(self.evaluate_by).transpose())\n else:\n return(self.merged_scaled_data.groupby(\n 'labels').mean().transpose())", "def calculate_cluster_fitness(self, cluster_id: ObjectId):\n\n genomes = self.genome_repository.get_genomes_in_cluster(cluster_id)\n\n cluster_fitness = 0\n\n for genome in genomes:\n cluster_fitness += genome.fitness\n if cluster_fitness == 0:\n return 0\n\n return cluster_fitness / len(list(genomes))", "def mean(self):\n return self.sum / self.sum_weights", "def total_min_node_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"total_min_node_count\")", "def totalsize(self):\n return sum([sz for sz in self.iterate()])", "def size(self) -> int:\n\n return self.sizes.sum()", "def mean(self):\n return self.vmean", "def mean_sample_length(dataset, n_points, n_neighbors, n_resamplings):\r\n\tlength = 0\r\n\tfor _ in range(n_resamplings):\r\n\t\tsample = random.sample(range(len(dataset)), n_points)\r\n\t\tsample_kNN = kneighbors_graph(dataset[sample], 5, mode='distance', include_self=False, n_jobs=-1)\r\n\t\tlength += np.sum(sample_kNN)\r\n\treturn length/n_resamplings", "def _compute_centroids(self):\n\n for i in range(0, self.k):\n cluster = np.argwhere(self.assigned_clusters == i)\n cluster_points = self.data[cluster].squeeze()\n self.centroids[i] = np.mean(cluster_points, axis=0)", "def mean(self):\r\n\t\treturn sum(self.sample)/len(self.sample)", "def centroid(self):\n return self.contours_to_matrix().mean(axis=0)", "def data_center_count(self) -> int:\n return pulumi.get(self, \"data_center_count\")", "def mean(self):\n\n return self._reduce_for_stat_function(F.mean, only_numeric=True)", "def average_length(corpus):\n token_size = 0\n for i in corpus:\n token_size += len(i)\n return token_size/len(corpus)", "def clust_strength(mat,groups):\n cluster_strengths = []\n for group in range(len(np.unique(groups))):\n this_cluster = mat[groups==group,:]\n this_cluster_mean = np.mean(this_cluster,axis=0)\n all_dists = mat - this_cluster_mean\n out_dists = np.linalg.norm(all_dists[groups!=group],axis=1)\n in_dists = np.linalg.norm(all_dists[groups==group],axis=1)\n this_strength = np.mean(out_dists)/np.mean(in_dists)\n cluster_strengths.append(this_strength)\n \n return np.mean(cluster_strengths)", "def calcCentroid(self):\n size = len(self.vectors)\n # zip all features together\n zipped = zip(*self.vectors)\n # Calculate the mean for each feature/column\n centroid = [math.fsum(column)/size for column in zipped]\n \n return centroid", "def get_clust_num_perc(model, vis_perc=0.9):\n\tnc = len(np.where(model.allocmodel.Nk > 0)[0])\n\tidx = np.argsort(-model.allocmodel.Nk)[0:nc]\n\n\ttot = model.allocmodel.Nk[idx].sum()\n\tcursum = 0\n\ti = 0\n\twhile cursum < tot*vis_perc:\n\t cursum += model.allocmodel.Nk[idx][i]\n\t i+=1\n\n\treturn i", "def nrmse_mean(self) -> float:\n return float(self.rmse() / np.mean(self.true))", "def train_size(self) -> int:\n return int(self.data_size * self.__train_fraction)", "def get_utilization(self):\n child_prefixes = Prefix.objects.filter(prefix__net_contained_or_equal=str(self.prefix))\n # Remove overlapping prefixes from list of children\n networks = cidr_merge([c.prefix for c in child_prefixes])\n children_size = float(0)\n for p in networks:\n children_size += p.size\n return int(children_size / self.prefix.size * 100)", "def find_average_doc_length(self):\n avg_doc_length_result = self.es.search(index=\"ap_dataset\", doc_type=\"document\",\n body={\"aggs\": {\"avg_doc_length\": {\"avg\": {\"script\": \"doc['doclength'].values\"}}}})\n avg_doc_length = avg_doc_length_result['aggregations']['avg_doc_length']['value']\n return avg_doc_length", "def assign_to_current_mean(img: np.ndarray, clustermask: np.ndarray) -> float:\n\n rows, cols = img.shape[:2]\n distances = np.zeros((numclusters, 1))\n overall_dist = 0\n\n for i in range(rows):\n for j in range(cols):\n distances = distance(img[i, j, :]) # returned shape: (numclusters, 1)\n \n k = np.argmin(distances) # closest cluster\n clustermask.itemset((i, j), k) # update cluster mask\n overall_dist += distances[k, 0] # sum distance\n\n return overall_dist", "def get_avg_sentence_length(self):\n sentences = self.blob.sentences\n average_sentence_length = np.mean(np.array([len(sentence.words) for sentence in sentences]))\n return average_sentence_length", "def get_average(self) -> float:\n return sum(self._scores) / len(self._scores)", "def compute_means(self):\n ###TODO\n vector_means = []\n for doc in self.fin_clust.values():\n vec = defaultdict(float)\n for d_id in doc:\n doc_keys = self.docs[d_id].keys()\n for key in self.docs[d_id]:\n vec[key] = vec[key] + self.docs[d_id][key]\n tot = len(doc)\n x = defaultdict(float)\n for k,v in vec.items():\n x[k] = float(v)/tot\n vec = Counter(x)\n vector_means.append(vec)\n return vector_means", "def size(self):\n futures = self.client.map(_call_size, self.vecDask, pure=False)\n sizes = self.client.gather(futures)\n return np.sum(sizes)", "def compute_cluster_ensemble(var, indicesOnCluster, maxIndices, indicesToParticle): #{{{\n\n num_clusters = maxIndices.shape[0]\n if len(var.shape) == 1:\n meanvar = np.zeros((num_clusters,))\n elif len(var.shape) == 2:\n meanvar = np.zeros((var.shape[0],num_clusters))\n else:\n warnings.warn('did not have correct shape for ' + str(var) + ' with len(var.shape)='+ str(len(var.shape)))\n meanvar = None\n\n for aCluster, maxInd in enumerate(maxIndices):\n # get particles in cluster\n particles = indicesToParticle[indicesOnCluster[aCluster,0:maxInd]]\n\n # compute mean depending upon size of array\n if len(var.shape) == 1:\n meanvar[aCluster] = np.mean(var[particles])\n if len(var.shape) == 2:\n meanvar[:,aCluster] = np.mean(var[:,particles], axis=1)\n\n return meanvar #}}}", "def mean(self):\n return self._summarize(lambda c: c.mean)", "def group_size(self):\n return self._gsize", "def get_sectors_per_cluster(self):\n\n\t\tsectors_per_cluster_base = struct.unpack('B', self.boot_sector_data[13 : 14])[0]\n\t\tif sectors_per_cluster_base == 0:\n\t\t\traise BootSectorException('Invalid cluster size (zero)')\n\n\t\tif sectors_per_cluster_base <= 0x80: # Although 0x80 is a signed value, it's used as an unsigned one.\n\t\t\tsectors_per_cluster_real = sectors_per_cluster_base\n\t\telse:\n\t\t\tsectors_per_cluster_base = struct.unpack('b', self.boot_sector_data[13 : 14])[0] # Read this again as a signed value.\n\t\t\tsectors_per_cluster_real = 1 << abs(sectors_per_cluster_base)\n\n\t\treturn sectors_per_cluster_real", "def compute_mean_response_length(self):\n mean_response_length = 0\n for row in self.responses:\n mean_response_length += len(row.response)\n return round(mean_response_length / len(self.responses), 2)", "def mean_min_distance_to_cluster(actual_activation,\n cluster_activation, n_cluster_samples):\n if cluster_activation.ndim == 4:\n actual_activation = actual_activation.dimshuffle('x',0,1,2)\n squared_distances = T.mean(T.square(cluster_activation -\n actual_activation), axis=(1,2,3))\n elif cluster_activation.ndim == 2:\n actual_activation = actual_activation.dimshuffle('x',0)\n squared_distances = T.mean(T.square(cluster_activation -\n actual_activation), axis=(1,))\n \n \n squared_distances_sorted = T.sort(squared_distances)\n \n distance = T.mean(squared_distances_sorted[:n_cluster_samples])\n return distance", "def size(self) -> pulumi.Output[float]:\n return pulumi.get(self, \"size\")", "def getCentroid(self):\r\n return self._centroid", "def get_avg(self) -> float:\n if self._cur_elem_count < 1:\n return 0\n self._mtx.acquire()\n avg = self._sum / float(self._cur_elem_count)\n self._mtx.release()\n return avg", "def mean(self):\r\n return np.mean(self.data_array)", "def avg_latency(self):\n return self._avg_latency", "def avg_latency(self):\n return self._avg_latency", "def find_avg(centroids, short_cut=False, sim_scores=None):\n \n total_sim = 0.0\n total_comparisons = 0\n \n if short_cut:\n total_comparisons = len(sim_scores)\n \n for score in sim_scores:\n total_sim += score\n \n return (total_sim / total_comparisons)\n\n length = len(centroids)\n\n for i in xrange(0, length):\n for j in xrange(i + 1, length):\n total_sim += similarity(centroids[i], centroids[j])\n total_comparisons += 1\n\n return (total_sim / total_comparisons)", "def get_insert_size_mean(self, ctx, params):\n # ctx is the context object\n # return variables are: returnVal\n #BEGIN get_insert_size_mean\n\n if 'workspace_name' not in params:\n raise ValueError('Parameter workspace_name is not set in input arguments')\n workspace_name = params['workspace_name']\n if 'id' not in params:\n raise ValueError('Parameter id is not set in input arguments')\n objid = params['id']\n\n token = ctx['token']\n wsClient = workspaceService(self.workspaceURL, token=token)\n try:\n\n objref = workspace_name + '/' + str(objid)\n\n # Note that results from the workspace are returned in a list\n returnVal = wsClient.get_objects([{'ref': objref}])[0]\n\n print \"get_insert_size_mean returnVal \" + str(returnVal)\n\n if returnVal is not None:\n if returnVal['data']['single_genome'] is not None:\n returnVal = returnVal['data']['single_genome']\n\n # print \"is_single_genome issingle \" + str(returnVal)\n except:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n lines = traceback.format_exception(exc_type, exc_value, exc_traceback)\n orig_error = ''.join(' ' + line for line in lines)\n raise ValueError('Error from workspace:\\n' + orig_error)\n\n #END get_insert_size_mean\n\n # At some point might do deeper type checking...\n if not isinstance(returnVal, float):\n raise ValueError('Method get_insert_size_mean return value ' +\n 'returnVal is not type float as required.')\n # return the results\n return [returnVal]", "def get_total_distributed(self) -> int:\n return self._total_distributed.get()", "def centroid(clusters):\n centroids = list(map(\n lambda cluster: tuple(map(\n lambda x, cluster=cluster: x / len(cluster['vertices']),\n sum(map(\n numpy.array,\n cluster['vertices'])))),\n clusters))\n return centroids", "def mem_per_core(self):\n return self.mem_per_node / self.cores_per_node", "def num_cores(self):\n return self.cores_per_socket * self.sockets_per_node * self.num_nodes", "def __len__(self):\n return len(self.centroid_vector)", "def get_num_nodes(self):\n\n return sum(self.topology)", "def getNNodesTot(self):\n nNodesTot = 0\n for iElt in Elements._all:\n nNodesTot += len(iElt.coord)\n return nNodesTot", "def compute(self): \r\n hist = self.confusion_matrix\r\n iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))\r\n mean_iu = np.nanmean(iu)\r\n return mean_iu" ]
[ "0.7243587", "0.7179832", "0.7015998", "0.7009933", "0.68922645", "0.68876517", "0.684724", "0.6808377", "0.669405", "0.6671428", "0.6667033", "0.6651806", "0.65882844", "0.6482646", "0.63869536", "0.63827366", "0.63742137", "0.63045746", "0.6303817", "0.62123394", "0.61937535", "0.61916125", "0.6186689", "0.6182542", "0.618072", "0.61401653", "0.6137394", "0.6104162", "0.6104162", "0.6092552", "0.6092552", "0.60821295", "0.6077454", "0.6044927", "0.60269886", "0.5993478", "0.5986319", "0.59649414", "0.59624803", "0.595281", "0.59385246", "0.5932969", "0.5908111", "0.5876624", "0.58603084", "0.58601516", "0.585777", "0.5856976", "0.5856251", "0.5845542", "0.5830276", "0.5815427", "0.58153605", "0.58131456", "0.58100456", "0.5805519", "0.58031666", "0.5798056", "0.57971174", "0.57949406", "0.57882005", "0.57876503", "0.57706314", "0.5770221", "0.57467306", "0.57463014", "0.57405955", "0.5722507", "0.57032484", "0.5681643", "0.5676549", "0.5669544", "0.5669448", "0.56686467", "0.5667061", "0.5665254", "0.5658459", "0.56547993", "0.5652273", "0.56429666", "0.563285", "0.56299233", "0.56297314", "0.5616537", "0.5610085", "0.56086594", "0.56074643", "0.5606431", "0.5601822", "0.5601822", "0.5588715", "0.5588563", "0.55882394", "0.558775", "0.55836505", "0.5571456", "0.5569384", "0.5564178", "0.5563764", "0.556266" ]
0.8215371
0
Creates a calculator using a class.
def evaluate_with_calculator(self, clustering, key_args): calculator = key_args['class']() return calculator.evaluate(clustering, key_args['matrix'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n model = Calculator()", "def __init__(self, mode=\"Normal\"):\n\n self.mode = mode\n # minor optimization\n # apparently [] is 5x faster than list()\n # https://youtu.be/YjHsOrOOSuI?t=19m30s\n self.button = []\n \"\"\"button: List of buttons.\"\"\"\n\n self.numbers = (\"789456123\")\n \"\"\"numbers: basic numberpad layout of ints\"\"\"\n\n self.root = tk.Tk()\n self.root.title('Michael\\'s Calculator')\n\n self.menu = self.root.winfo_toplevel()\n self.root.menuBar = tk.Menu(self.menu)\n self.menu['menu'] = self.root.menuBar\n\n self.root.subMenu = tk.Menu(self.root.menuBar)\n self.root.menuBar.add_cascade(label='View', menu=self.root.subMenu)\n self.root.subMenu.add_command(label='Basic')\n self.root.subMenu.add_command(label='Scientific')\n\n self.display = tk.Entry(self.root, width=15)\n self.display.grid(row=0, column=0, columnspan=5)\n self.create_number_buttons()\n self.create_operation_buttons()\n\n self.root.mainloop()", "def calculator(**pars):\n # paying for parameter conversion each time to keep life simple, if not fast\n pars = revert_pars(model_info, pars)\n for k, v in pars.items():\n parts = k.split('.') # polydispersity components\n if len(parts) == 2:\n model.dispersion[parts[0]][parts[1]] = v\n else:\n model.setParam(k, v)\n return theory()", "def create(cls, **dictionary):\n if cls.__name__ == 'Square':\n object = cls(1)\n object.update(**dictionary)\n return object\n\n if cls.__name__ == 'Rectangle':\n object = cls(1, 2)\n object.update(**dictionary)\n return object", "def calculator(operation): \n \n operation = MATH[operation]\n a = int(request.args.get(\"a\"))\n b = int(request.args.get(\"b\"))\n total = operation(a, b)\n\n return f\"<h1>TOTAL: {total}</h1>\"", "def make_fee_class():\r\n fee = 1\r\n return make_class(locals())", "def withMaker(cls):\n nodeName = cls.__name__.decode(\"utf-8\")\n if cls.__init__ is object.__init__:\n names = ()\n else:\n names = inspect.getargspec(cls.__init__).args[1:]\n verb = nodeName\n if getattr(cls, \"fromMonte\", None) is not None:\n verb += \".fromMonte\"\n arglist = u\", \".join([u\"args[%s]\" % i for i in range(len(names))])\n runverb = 'RUN_%s' % len(names)\n src = \"\"\"\\\n @audited.DF\n class %sMaker(Object):\n def printOn(self, out):\n out.call(u\"print\", [StrObject(u\"<kernel make%s>\")])\n def recv(self, atom, args):\n if atom is %s:\n return %s(%s)\n raise Refused(self, atom, args)\n \"\"\" % (nodeName, nodeName, runverb, verb, arglist)\n d = globals()\n exec textwrap.dedent(src) in d\n cls.nodeMaker = d[nodeName + \"Maker\"]()\n return cls", "def __init__(self):\n\n #Creates the window.\n EasyFrame.__init__(self, \"Calculator\", resizable = False)\n\n #Creates the data model Calculator.\n self.calculator = Calculator()\n\n #Keeps track of when the user enters an input.\n self.operatorEntered = False\n\n #Creates the colored panels.\n for column in range(5):\n numberBarPanel = self.addPanel(row = 0, column = column, background = \"black\")\n\n for row in range(1, 6):\n symbolPanel = self.addPanel(row = row, column = 4, background = 'orange')\n\n for column in range (4):\n topPanel = self.addPanel(row = 1, column = column, background = \"snow3\")\n\n for row in range (2, 6):\n for column in range (0, 3):\n numberPanel = self.addPanel(row = row, column = column, background = \"snow2\")\n\n #Creates fonts.\n barFont = Font(family = \"San Francisco\", size = 20)\n \n #Creates the number bar at the top of the calculator.\n self.digits = self.addLabel(\"0\", row= 0, column= 0, columnspan= 5, sticky= \"E\", background = 'black', foreground = 'white', font = barFont)\n\n #Creates button for clear.\n self.clearButton= self.addButton(text= \"AC\", row= 1, column= 0, command= self.clearCommand)\n self.clearButton['background'] = 'snow3'\n \n #Creates button for +/-.\n negativeButton= self.addButton(text= \"+/-\", row= 1, column= 1, command= self.negativeCommand)\n negativeButton['background'] = 'snow3'\n \n #Creates button for %\n percentButton= self.addButton(text= \"%\", row= 1, column= 2, command= self.percentCommand)\n percentButton['background'] = 'snow3'\n\n #Creates side row of operator symbols.\n sideSymbols= [\"/\", \"X\", \"-\", \"+\", \"=\"]\n row= 1\n for symbol in sideSymbols:\n symbolButton = self.addButton(text= symbol, row= row, column= 4)\n symbolButton[\"command\"] = self.operatorCommand(symbol)\n symbolButton['foreground'] = 'white'\n symbolButton['background'] = 'orange'\n row += 1\n \n #Goes through and creates a grid with numbers 1-9.\n digit= 7\n for row in range(2, 5):\n for column in range(3):\n numberButton = self.addButton(str(digit), row, column)\n numberButton[\"command\"] = self.numberCommand(str(digit))\n numberButton['background'] = 'snow2'\n if digit == 9:\n digit = 3\n elif digit == 6:\n digit = 0\n digit += 1\n\n #Creates 0 button.\n zeroButton = self.addButton(text= \"0 \", row= 5, column = 0, columnspan = 2, command = self.numberCommand(\"0\"))\n zeroButton['background'] = 'snow2'\n\n #Creates . button.\n self.decimalButton = self.addButton(text= \".\", row = 5, column = 2, command = self.decimalCommand, state = 'normal')\n self.decimalButton['background'] = 'snow2'", "def __init__(self, operation, operand):\n self.operation = operation\n self.right = operand", "def test_module(self):\n calculator = Calculator(Adder(), Subtracter(), Multiplier(), Divider())\n\n calculator.enter_number(5)\n calculator.enter_number(2)\n\n # Tests\n # Add:\n self.assertEqual(7, calculator.add())\n\n # Subtract:\n calculator.enter_number(5)\n self.assertEqual(2, calculator.subtract())\n\n # Multiply:\n calculator.enter_number(5)\n self.assertEqual(10, calculator.multiply())\n\n # Divide:\n calculator.enter_number(1)\n self.assertEqual(10, calculator.divide())", "def test_calculator_add():\n # Arrange by instantiating the calc class\n calc = Calculator()\n # Act by calling the method to be tested\n calc.add(4)\n # Assert that the results are correct\n assert calc.result == 4", "def beta_create_Calculator_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):\n request_serializers = {\n ('calculator.Calculator', 'Add'): CalculateRequest.SerializeToString,\n ('calculator.Calculator', 'Divide'): CalculateRequest.SerializeToString,\n ('calculator.Calculator', 'Multiply'): CalculateRequest.SerializeToString,\n ('calculator.Calculator', 'Subtract'): CalculateRequest.SerializeToString,\n }\n response_deserializers = {\n ('calculator.Calculator', 'Add'): CalculateResponse.FromString,\n ('calculator.Calculator', 'Divide'): CalculateResponse.FromString,\n ('calculator.Calculator', 'Multiply'): CalculateResponse.FromString,\n ('calculator.Calculator', 'Subtract'): CalculateResponse.FromString,\n }\n cardinalities = {\n 'Add': cardinality.Cardinality.UNARY_UNARY,\n 'Divide': cardinality.Cardinality.UNARY_UNARY,\n 'Multiply': cardinality.Cardinality.UNARY_UNARY,\n 'Subtract': cardinality.Cardinality.UNARY_UNARY,\n }\n stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)\n return beta_implementations.dynamic_stub(channel, 'calculator.Calculator', cardinalities, options=stub_options)", "def class_test():\n\n class Zulu(object):\n x = 6\n y = 6\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n @classmethod\n def add(cls):\n print(cls.x + cls.y)\n\n kowabunga = Zulu(5, 5)\n\n kowabunga.add()", "def __init__(self):\r\n EasyFrame.__init__(self, title=\"Investment Calculator\")\r\n self.addLabel(text=\"Initial amount\", row=0, column=0)\r\n self.addLabel(text=\"Number of years\", row=1, column=0)\r\n self.addLabel(text=\"Interest rate in %\", row=2, column=0)\r\n self.amount = self.addFloatField(value=0.0, row=0, column=1)\r\n self.period = self.addIntegerField(value=0, row=1, column=1)\r\n self.rate = self.addIntegerField(value=0, row=2, column=1)\r\n\r\n self.outputArea = self.addTextArea(\"\", row=4, column=0, columnspan=2, width=50, height=15)\r\n\r\n self.compute = self.addButton(text=\"Compute\", row=3, column=0, columnspan=2, command=self.compute)", "def __init__(self):\n Cmd.__init__(self)\n self.calc = ReversePolishCalc()", "def makeCalc(self, dataSet):\n\n #cyl = sasmodels.core.load_model_info('cylinder')\n #hs = sasmodels.core.load_model_info('hardsphere')\n #cylhs = sasmodels.core.load_model_info('cylinder@hardsphere')\n cylhmsa = sasmodels.core.load_model_info('cylinder@hayter_msa')\n\n # Build using c version instead of python. Avoids pyopencl\n model = sasmodels.core.build_model(cylhmsa, platform='dll')\n self.calculator = sasmodels.direct_model.DirectModel(dataSet, model)\n\n return", "def create(cls, **dictionary):\n new_inst = cls.__new__(cls)\n if cls.__name__ == \"Rectangle\":\n new_inst.__init__(42, 98)\n elif cls.__name__ == \"Square\":\n new_inst.__init__(42)\n new_inst.update(**dictionary)\n return new_inst", "def __init__(self, *args):\n this = _ida_hexrays.new_operand_locator_t(*args)\n try: self.this.append(this)\n except: self.this = this", "def create(cls, **dictionary):\n if cls.__name__ == \"Rectangle\":\n dummy = cls(1, 2)\n dummy.update(**dictionary)\n return dummy\n if cls.__name__ == \"Square\":\n dummy = cls(1)\n dummy.update(**dictionary)\n return dummy", "def create(cls, **props: Any) -> 'ResolverOp':\n real_instance = super().__call__()\n cls._check_kwargs(props)\n for name, value in props.items():\n setattr(real_instance, name, value)\n return real_instance", "def create(cls, **props: Any) -> 'ResolverOp':\n real_instance = super().__call__()\n cls._check_kwargs(props)\n for name, value in props.items():\n setattr(real_instance, name, value)\n return real_instance", "def make_operators(self):\n self.relationship_operator = Operators.RelationshipOperator(self)\n self.infection_operator = Operators.InfectionOperator(self)\n self.time_operator = Operators.TimeOperator(self)", "def create(cls, **dictionary):\n if cls.__name__ == \"Rectangle\":\n object = cls(1, 1)\n object.update(**dictionary)\n return object\n\n if cls.__name__ == \"Square\":\n object = cls(1)\n object.update(**dictionary)\n return object", "def create_expr(self, exprcls, ast, params=None, nopush=False):\n if params is None:\n expr = exprcls(self.current_parent, ast=ast)\n else:\n expr = exprcls(self.current_parent, ast=ast, **params)\n if not nopush:\n self.push_state(expr)\n return expr", "def create(cls, **dictionary):\n if cls.__name__ == \"Rectangle\":\n new_class = cls(1, 2)\n else:\n new_class = cls(1)\n new_class.update(**dictionary)\n return new_class", "def __call__(self):\r\n new_node = Node()\r\n new_node.op = self\r\n return new_node", "def __init__(self, op, expression1, expression2):\n LinearExpression.__init__(self)\n\n self.op = op\n self.expression1 = expression1\n self.expression2 = expression2", "def main():\n #------------------------------------- Functions\n def add(text):\n \"\"\"\n This will add to the display, and be the go to function of most buttons.\n We'll want to add in conditions for what buttons go.\n \"\"\"\n orig = dispb[\"text\"]\n new = orig + text\n ops = [\"+\",\"-\",\"*\",\"/\"]\n # conditions\n # length 21\n if len(new) > 21:\n dispb[\"text\"] = orig\n return 0\n \n # one calc at a time\n if len(orig) > 0:\n if (orig[-1] in ops) & (text in ops):\n dispb[\"text\"] = orig\n return 0\n\n dispb[\"text\"] = new\n return 0\n \n def clear():\n dispb[\"text\"] = \"\"\n return 0\n \n def backspace():\n dispb[\"text\"] = dispb[\"text\"][:len(dispb[\"text\"])-1]\n return 0\n \n def equals():\n try:\n dispb[\"text\"] = str(eval(dispb[\"text\"]))\n except:\n dispb[\"text\"]=\"ERROR, clear display\"\n \n #------------------------------------- UI\n \n # title and start\n calc = tk.Tk()\n calc.title(\"Calculator\")\n # size\n calc.geometry(\"255x235\")\n #calc.columnconfigure(range(3), weight=1, minsize=50)\n #calc.rowconfigure(range(1,4), weight=1, minsize=48)\n \n # Icon\n calc.iconbitmap('Icon.ico')#'Icon.ico')\n \n \n calcarea = tk.Frame(master=calc)\n calcarea.pack(padx=5, pady=10)\n \n # display box\n disp = tk.Frame(\n master = calcarea\n )\n disp.grid(row = 0, column = 0, columnspan = 3)\n dispb = tk.Label(\n master = disp,\n text = '',\n fg = 'black',\n bg = 'white',\n borderwidth = 1,\n relief = 'solid',\n height = 2,\n width = 19\n )\n dispb.pack()\n \n # number buttons\n num1 = tk.Frame(\n master=calcarea\n )\n num1.grid(row = 3, column = 0)\n num1b = tk.Button(\n master = num1,\n text = 1,\n width = 5,\n height = 2,\n command = lambda: add(\"1\")\n ).pack()\n # the pack is what adds it to the UI\n # two \n num2 = tk.Frame(\n master=calcarea\n )\n num2.grid(row = 3, column = 1)\n num2b = tk.Button(\n master = num2,\n text = \"2\",\n width = 5,\n height = 2,\n command = lambda: add(\"2\")\n ).pack()\n \n # three \n num3 = tk.Frame(\n master=calcarea\n )\n num3.grid(row = 3, column = 2)\n num3b = tk.Button(\n master = num3,\n text = \"3\",\n width = 5,\n height = 2,\n command = lambda: add(\"3\")\n ).pack()\n \n # four \n num4 = tk.Frame(\n master=calcarea\n )\n num4.grid(row = 2, column = 0)\n num4b = tk.Button(\n master = num4,\n text = \"4\",\n width = 5,\n height = 2,\n command = lambda: add(\"4\")\n ).pack()\n \n # five \n num5 = tk.Frame(\n master=calcarea\n )\n num5.grid(row = 2, column = 1)\n num5b = tk.Button(\n master = num5,\n text = \"5\",\n width = 5,\n height = 2,\n command = lambda: add(\"5\")\n ).pack()\n \n # six \n num6 = tk.Frame(\n master=calcarea\n )\n num6.grid(row = 2, column = 2)\n num6b = tk.Button(\n master = num6,\n text = \"6\",\n width = 5,\n height = 2,\n command = lambda: add(\"6\")\n ).pack()\n \n # seven \n num7 = tk.Frame(\n master=calcarea\n )\n num7.grid(row = 1, column = 0)\n num7b = tk.Button(\n master = num7,\n text = \"7\",\n width = 5,\n height = 2,\n command = lambda: add(\"7\")\n ).pack()\n \n # eight \n num8 = tk.Frame(\n master=calcarea\n )\n num8.grid(row = 1, column = 1)\n num8b = tk.Button(\n master = num8,\n text = \"8\",\n width = 5,\n height = 2,\n command = lambda: add(\"8\")\n ).pack()\n \n # nine \n num9 = tk.Frame(\n master=calcarea\n )\n num9.grid(row = 1, column = 2)\n num9b = tk.Button(\n master = num9,\n text = \"9\",\n width = 5,\n height = 2,\n command = lambda: add(\"9\")\n ).pack()\n \n # zero\n num0 = tk.Frame(\n master = calcarea\n )\n num0.grid(row = 4, column = 0)\n num0b = tk.Button(\n master = num0,\n text = 0,\n width = 5,\n height = 2,\n command = lambda: add(\"0\")\n ).pack()\n \n # period\n dot = tk.Frame(\n master = calcarea\n )\n dot.grid(row = 4, column = 1)\n dotb = tk.Button(\n master = dot,\n text = \".\",\n width = 5,\n height = 2,\n command = lambda: add(\".\")\n ).pack()\n \n # equal sign\n eq = tk.Frame(\n master = calcarea\n )\n eq.grid(row = 4, column = 2, columnspan = 2)\n eqb = tk.Button(\n master = eq,\n text = \"=\",\n width = 11,\n height = 2,\n command = equals\n ).pack()\n \n # plus sign\n plus = tk.Frame(\n master = calcarea\n )\n plus.grid(row = 3, column = 4, rowspan = 2)\n plusb = tk.Button(\n master = plus,\n text = \"+\",\n width = 5,\n height = 5,\n command = lambda: add(\"+\")\n ).pack()\n \n # minus sign\n minu = tk.Frame(\n master = calcarea\n )\n minu.grid(row = 3, column = 3)\n minub = tk.Button(\n master = minu,\n text = \"-\",\n width = 5,\n height = 2,\n command = lambda: add(\"-\")\n ).pack()\n \n # multiplication\n mult = tk.Frame(\n master = calcarea\n )\n mult.grid(row = 2, column = 3)\n multb = tk.Button(\n master = mult,\n text = \"*\",\n width = 5,\n height = 2,\n command = lambda: add(\"*\")\n ).pack()\n \n # division\n div = tk.Frame(\n master = calcarea\n )\n div.grid(row = 2, column = 4)\n divb = tk.Button(\n master = div,\n text = \"/\",\n width = 5,\n height = 2,\n command = lambda: add(\"/\")\n ).pack()\n \n # left parentheses\n lefp = tk.Frame(\n master = calcarea\n )\n lefp.grid(row = 1, column = 3)\n lefpb = tk.Button(\n master = lefp,\n text = \"(\",\n width = 5,\n height = 2,\n command = lambda: add(\"(\")\n ).pack()\n \n # right paraentheses\n rigp = tk.Frame(\n master = calcarea\n )\n rigp.grid(row = 1, column = 4)\n rigpb = tk.Button(\n master = rigp,\n text = \")\",\n width = 5,\n height = 2,\n command = lambda: add(\")\")\n ).pack()\n \n # Clear button\n Clr = tk.Frame(\n master = calcarea\n )\n Clr.grid(row = 0, column = 3)\n Clrb = tk.Button(\n master = Clr,\n text = \"C\",\n width = 5,\n height = 2,\n command = clear\n ).pack()\n \n # backspace\n bck = tk.Frame(\n master = calcarea\n )\n bck.grid(row = 0, column = 4)\n bckb = tk.Button(\n master = bck,\n text = \"\\N{RIGHTWARDS BLACK ARROW}\",\n width = 5,\n height = 2,\n command = backspace\n ).pack()\n \n # This is what kicks the whole thing off, lets it wait for commands.\n calc.mainloop()", "def create(cls, **dictionary):\n if cls.__name__ == \"Square\":\n dummy = cls(1)\n if cls.__name__ == \"Rectangle\":\n dummy = cls(1, 1)\n dummy.update(**dictionary)\n return dummy", "def main():\r\n # Create an instance of the MyCallCostCalc class.\r\n my_callcost = MyCallCostCalc()", "def New(*args, **kargs):\n obj = itkSingleValuedCostFunction.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def calculate(numbers, operator):\n \n if operator == 'add':\n return add(prepare_numbers(numbers))\n elif operator == 'subtract':\n return subtract(prepare_numbers(numbers))\n elif operator == 'multiply':\n return multiply(prepare_numbers(numbers))\n elif operator == 'divide':\n return divide(prepare_numbers(numbers))\n elif operator == 'remainder':\n return remainder(prepare_numbers(numbers))\n elif operator == 'power':\n return power(prepare_numbers(numbers))", "def New(*args, **kargs):\n obj = itkCostFunction.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def __call__(self):\n new_node = Node()\n new_node.op = self\n return new_node", "def get_calculator(settings_path, logging = True):\n settings = SettingsReader(settings_path)\n if settings.get(\"energy_calculator\", \"code\") == \"psi4\":\n return Psi4Calculator(settings_path, logging)\n elif settings.get(\"energy_calculator\", \"code\") == \"qchem\":\n return QchemCalculator(settings_path, logging)\n else:\n raise NoSuchLibraryError(settings.get(\"energy_calculator\", \"code\"))", "def __init__(self):\n self.currentTotal = None\n self.previousOperator = None\n self.previousOperand = None", "def __init__(self, op, value):\n self.op = op\n self.value = value", "def operation_class(self):\n clazzname = self.name.title() + \"Op\"\n if clazzname == \"NopOp\":\n clazz = BaseOp\n else:\n clazz = globals()[clazzname]\n return clazz", "def create(cls, **dictionary):\n if cls.__name__ == \"Rectangle\":\n dummy = cls(1, 1)\n elif cls.__name__ == \"Square\":\n dummy = cls(1)\n dummy.update(**dictionary)\n return dummy", "def test_get_operator_class(self):\n Node = collections.namedtuple(\"Node\", \"op_type\")\n\n op_types = [\"Sum\", \"AveragePool\", \"Mean\"]\n for op_type in op_types:\n node = Node(op_type)\n operator = onnx_converter._get_operator_class(node.op_type, {})\n self.assertTrue(\n issubclass(operator, crypten.nn.Module),\n f\"{op_type} operator class {operator} is not a CrypTen module.\",\n )\n # check conv\n kernel_shapes = [[1], [3, 3]]\n node = Node(\"Conv\")\n for kernel_shape in kernel_shapes:\n attributes = {\"kernel_shape\": kernel_shape}\n operator = onnx_converter._get_operator_class(node.op_type, attributes)\n\n # check invalid op_types\n invalid_types = [(\"Convolution\", {\"kernel_shape\": [3, 3, 3]}), (\"Banana\", {})]\n for invalid_type, attr in invalid_types:\n with self.assertRaises(ValueError):\n node = Node(invalid_type)\n operator = onnx_converter._get_operator_class(node.op_type, attr)", "def __init__(self, opToken, leftOper, rightOper):\n self.operator = opToken\n self.leftOperand = leftOper\n self.rightOperand = rightOper", "def __init__(self, operations = []):\n self.operations = operations", "def infectious_math(cls):\n for prp in [\n \"__add__\", \"__radd__\",\n \"__sub__\", \"__rsub__\",\n \"__mul__\", \"__rmul__\",\n \"__matmul__\", \"__rmatmul__\",\n \"__truediv__\", \"__rtruediv__\",\n \"__floordiv__\", \"__rfloordiv__\",\n \"__mod__\", \"__rmod__\",\n \"__divmod__\", \"__rdivmod__\",\n \"__pow__\", \"__rpow__\",\n \"__lshift__\", \"__rlshift__\",\n \"__rshift__\", \"__rrshift__\",\n \"__and__\", \"__rand__\",\n \"__xor__\", \"__rxor__\",\n \"__or__\", \"__ror__\",\n\n \"__neg__\", \"__rneg__\",\n \"__pos__\", \"__rpos__\",\n \"__abs__\", \"__rabs__\",\n \"__invert__\", \"__rinvert__\",\n ]:\n if hasattr(cls, prp):\n def_impl = getattr(cls, prp)\n def augment(method):\n def augmented(self, *args, **kwargs):\n nonlocal method\n result = method.__get__(self).__call__(*args, **kwargs)\n return cls(result)\n return augmented\n replacement = augment(def_impl)\n replacement.__name__ = prp\n setattr(cls, prp, replacement)\n\n return cls", "def create(cls, **dictionary):\n if cls.__name__ == \"Rectangle\":\n r_dummy = cls(2, 4)\n r_dummy.update(**dictionary)\n return (r_dummy)\n\n if cls.__name__ == \"Square\":\n s_dummy = cls(3)\n s_dummy.update(**dictionary)\n return (s_dummy)", "def create(cls, **dictionary):\n if cls.__name__ == 'Rectangle':\n dummy = cls(1, 1)\n elif cls.__name__ == 'Square':\n dummy = cls(1)\n dummy.update(**dictionary)\n return dummy", "def main():\n pycalcApp = QApplication(sys.argv)\n pycalcView = PyCalcUi()\n pycalcView.show()\n model = evaluateExpression\n PyCalcController(model=model, view=pycalcView)\n sys.exit(pycalcApp.exec())", "def build(cls, real, imag):\n return cls(f\"{real} {imag}\")", "def New(*args, **kargs):\n obj = itkMultipleValuedCostFunction.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkMultipleValuedCostFunction.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def create(cls, **dictionary):\n if cls.__name__ == 'Rectangle':\n dummy = cls(1, 1)\n if cls.__name__ == 'Square':\n dummy = cls(1)\n dummy.update(**dictionary)\n return dummy", "def main():\n app = QApplication(sys.argv)\n calc = Calculator()\n calc.show()\n calc.setFixedSize(calc.size())\n sys.exit(app.exec_())", "def _new_instance(self):\n return self.__class__(self._fmodule)", "def _new_instance(self):\n return self.__class__(self._fmodule)", "def __init__(self, operation, constargs, randomargs):\n Operation.__init__(self)\n self.operation = operation\n self.constargs = constargs\n self.randomargs = randomargs\n if type(operation) is str:\n import CCAugmentation.outputs as cca_out\n import CCAugmentation.transformations as cca_trans\n self.operation = eval(self._get_op_str())\n self.args = {'operation': self.operation.__name__, 'constargs': constargs, 'randomargs': randomargs}", "def createWidgets(self):\n num_positions = [ (3,0), (2,0), (2,1), (2,2), (1,0), \\\n (1,1), (1,2), (0,0), (0,1), (0,2) ] \n\n op_info = { '+': (3, 3), '-': (2, 3), '*': (1, 3), '/': (0, 3) }\n self.operators = {}\n\n #Creates the 10 number buttons.\n for i in range(10):\n button = Button(self, text=str(i), height=2, width=5)\n button['command'] = lambda i=i: self.handle_num_general(str(i))\n r, c = num_positions[i][0], num_positions[i][1]\n button.grid(row=r, column=c)\n\n #Creates the 4 operator buttons.\n for op,position in op_info.iteritems():\n button = Button(self, text=str(op), height=2, width=5)\n button['command'] = lambda op=op: self.handle_op_general(op)\n button.grid(row=position[0], column=position[1])\n self.operators[op] = button\n\n self.equals = Button(self, text='=', height=2, width=5)\n self.equals['command'] = self.perform_op\n self.equals.grid(row=3, column=2)\n\n self.clear = Button(self, text='C', height=2, width=5)\n self.clear['command'] = lambda: self.reset('0') \n self.clear.grid(row=3, column=1)\n\n self.change_equals_state(DISABLED)\n self.change_ops_state(DISABLED)", "def calculator(): \n\n #asks for user's input \n user_input = raw_input(\"Type in the math expression and two numbers like + 1 2 : \")\n #splits the user's input into a list\n math_op = user_input.split(\" \")\n\n #pulls the appropriate function based on the user's input\n if math_op[0] == '+':\n print add(int(math_op[1]), int(math_op[2]))\n\n elif math_op[0] == '-':\n print subtract(int(math_op[1]), int(math_op[2]))\n\n elif math_op[0] == '*':\n print multiply(int(math_op[1]), int(math_op[2]))\n\n elif math_op[0] == '/':\n print divide(int(math_op[1]), int(math_op[2]))\n \n elif math_op[0] == \"square\":\n print square(int(math_op[1]))\n\n elif math_op[0] == 'cube':\n print cube(int(math_op[1]))\n\n elif math_op[0] == 'pow':\n print power(int(math_op[1]), int(math_op[2]))\n\n elif math_op[0] == 'mod':\n print mod(int(math_op[1]), int(math_op[2]))\n\n else:\n print \"That is not a valid input. Please try any of the following operator: + - * / square cube pow mod.\"", "def eval(cls, *args):\n raise NotImplementedError(\"subclasses need to override this method\")", "def __init__(self, name, operator, values):\n self.name = name\n self.operator = operator\n self.values = values", "def basic_calculator():\r\n\r\n num1 = input(\"Enter first number: \") # taking input\r\n\r\n # handling the exception of typecasting the value of 'num1' to float\r\n try:\r\n num1 = float(num1)\r\n except ValueError:\r\n print(\"Error: Input numeric values.\\nTry Again!\")\r\n exit()\r\n\r\n num2 = input(\"Enter second number: \") # taking input\r\n\r\n # handling the exception of typecasting the value of 'num2' to float\r\n try:\r\n num2 = float(num2)\r\n except ValueError:\r\n print(\"Error: Input numeric values.\\nTry Again!\")\r\n exit()\r\n\r\n # Asking user for the operation\r\n print(\"Select the operation:\")\r\n print(\"Type:\")\r\n print(\"1 for Addition\\n2 for Subtraction\\n3 for Multiplication\\n4 for Division\\n5 for Integer Division\\n6 for Power\")\r\n choice = input(\"Enter your choice: \")\r\n\r\n result = 0.0\r\n\r\n # Performing the operation and providing the result\r\n if choice == '1':\r\n result = num1 + num2\r\n elif choice == '2':\r\n result = num1 - num2\r\n elif choice == '3':\r\n result = num1 * num2\r\n elif choice == '4':\r\n result = num1 / num2\r\n elif choice == '5':\r\n result = num1 // num2\r\n elif choice == '6':\r\n result = num1 ** num2\r\n else:\r\n print(\"Wrong Input! Try Again.\")\r\n exit()\r\n\r\n print(f'\\nThe result is: {result}')", "def New():\n Self = $classname()\n Self._initialize_()\n Self._update_()\n return Self", "def from_operator(operation=debug):\r\n\r\n def C(*things):\r\n return Container(freezed(operation), list(things), [], [], [], [])\r\n return C", "def __new__(cls, ctx):\n return cls.__run(cls, ctx)", "def make_instance(cls):\r\n def get_value(name):\r\n if name in attributes:\r\n return attributes[name]\r\n else:\r\n value = cls['get'](name)\r\n return bind_method(value, instance)\r\n\r\n def set_value(name, value):\r\n attributes[name] = value\r\n\r\n attributes = {}\r\n instance = {'get': get_value, 'set': set_value}\r\n return instance", "def parse_calc_cmd(self, line):\n self.E_str = \"parse_calc_cmd\"\n # Clean up the line\n line, any_vars = self.find_vars_in_str(line)\n words = line.split()\n _, calc_type, _, var_name, _, new_var_name = words\n\n # Get the variable to calculate the property with\n Var = getattr(self, var_name)\n\n # Check the required metadata has been set\n required_metadata = f_dicts.calc_fncs[calc_type].required_metadata\n Var = getattr(self, var_name)\n for attr in required_metadata:\n if attr not in Var.metadata and attr not in f_dicts.calc_fncs[calc_type]._defaults:\n err_msg = f\"'{attr}' required for calculation of '{calc_type}'\"\n err_msg += \"\\n\\nPlease set it with the following syntax:\\n\\t\"\n err_msg += f\"{var_name}['{attr}'] = <value>\"\n err_msg += f\" or by using a set command.\"\n self.print_error(err_msg)\n\n\n Calc_Obj = f_dicts.calc_fncs[calc_type](Var)\n\n Calc_Obj.calc()\n\n # Create a new variable type\n New_Var = inp_types.Variable(Calc_Obj.name, Calc_Obj, Calc_Obj.metadata)\n setattr(self, new_var_name, New_Var)\n if new_var_name not in self.variables:\n self.variables.append(new_var_name)", "def new_calc_target(self, day, block, which):\n calc = Calculated(\n target_date=day.day,\n target_block=which,\n name=block.name,\n color=block.color,\n note=block.note\n )\n for service in block.services:\n calc.services.append(CalculatedService(\n name=service.name,\n start_time=service.start_time\n ))\n return calc", "def setup_class(self):\n class SubCosmology(Cosmology):\n\n H0 = Parameter(unit=u.km / u.s / u.Mpc)\n Tcmb0 = Parameter(unit=u.K)\n\n def __init__(self, H0, Tcmb0=0*u.K, name=None, meta=None):\n super().__init__(name=name, meta=meta)\n self._H0 = H0\n self._Tcmb0 = Tcmb0\n\n self.cls = SubCosmology\n self.cls_args = (70 * (u.km / u.s / u.Mpc), 2.7 * u.K)\n self.cls_kwargs = dict(name=self.__class__.__name__, meta={\"a\": \"b\"})", "def __new__(cls, value_str):\n\n value_str = value_str.strip()\n if value_str[0] == '[' and value_str[-1] == ']':\n value = literal_eval(value_str)\n else:\n value_str = value_str.split()\n assert len(value_str) >= 1\n\n try:\n value = float(value_str[0])\n except ValueError:\n value = value_str[0]\n\n #False positive, pylint does not see attributes\n #defined in __new__\n #pylint: disable=attribute-defined-outside-init\n if isinstance(value, float):\n if len(value_str) != 1:\n if len(value_str) == 4:\n unit = Unit(value_str[3])\n else:\n unit = Unit('')\n\n if value_str[1] == '+/-':\n plus_error = float(value_str[2])\n minus_error = plus_error\n else:\n plus_error = float(value_str[1])\n minus_error = abs(float(value_str[2]))\n result = super().__new__(cls, value, unit)\n result.plus_error = plus_error\n result.minus_error = minus_error\n return result", "def setUp(self):\n self.adder = Adder()\n self.subtracter = Subtracter()\n self.multiplier = Multiplier()\n self.divider = Divider()\n\n self.calculator = Calculator(self.adder, self.subtracter,\n self.multiplier, self.divider)", "def createMath(self, *args):\n return _libsbml.ASTBasePlugin_createMath(self, *args)", "def New(*args, **kargs):\n obj = itkIsoDataThresholdCalculatorHDUS.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def __init__(self, operand_string):\n\n # String to hold the operand literal\n self.op_string = operand_string\n\n # Integer value of the operand\n self.op_value = int(operand_string)", "def from_string_expr(cls, expr):\n if \"*\" in expr:\n ch = \"*\"\n op = \"cross\"\n elif \"+\" in expr:\n ch = \"+\"\n op = \"blend\"\n elif \"/\" in expr:\n ch = \"/\"\n op = \"nest\"\n factors = [cls(s.strip()) for s in expr.split(ch)]\n return cls(op=op, factors=factors)", "def __new__(mcs, name, parent, attr):\n def add_numeric_op(attr_name, op):\n \"\"\"Create an attribute named attr_name that calls\n _numeric_op(self, other, op).\"\"\"\n def closure(self, other):\n return VTKCompositeDataArray._numeric_op(self, other, op)\n closure.__name__ = attr_name\n attr[attr_name] = closure\n\n def add_reverse_numeric_op(attr_name, op):\n \"\"\"Create an attribute named attr_name that calls\n _reverse_numeric_op(self, other, op).\"\"\"\n def closure(self, other):\n return VTKCompositeDataArray._reverse_numeric_op(self, other, op)\n closure.__name__ = attr_name\n attr[attr_name] = closure\n\n def add_default_reverse_numeric_op(op_name):\n \"\"\"Adds '__r[op_name]__' attribute that uses operator.[op_name]\"\"\"\n add_reverse_numeric_op(\"__r%s__\"%op_name, getattr(operator, op_name))\n\n def add_default_numeric_op(op_name):\n \"\"\"Adds '__[op_name]__' attribute that uses operator.[op_name]\"\"\"\n add_numeric_op(\"__%s__\"%op_name, getattr(operator, op_name))\n\n def add_default_numeric_ops(op_name):\n \"\"\"Call both add_default_numeric_op and add_default_reverse_numeric_op.\"\"\"\n add_default_numeric_op(op_name)\n add_default_reverse_numeric_op(op_name)\n\n add_default_numeric_ops(\"add\")\n add_default_numeric_ops(\"sub\")\n add_default_numeric_ops(\"mul\")\n add_default_numeric_ops(\"truediv\")\n add_default_numeric_ops(\"floordiv\")\n add_default_numeric_ops(\"mod\")\n add_default_numeric_ops(\"pow\")\n add_default_numeric_ops(\"lshift\")\n add_default_numeric_ops(\"rshift\")\n add_numeric_op(\"__and__\", operator.and_)\n add_reverse_numeric_op(\"__rand__\", operator.and_)\n add_default_numeric_ops(\"xor\")\n add_numeric_op(\"__or__\", operator.or_)\n add_reverse_numeric_op(\"__ror__\", operator.or_)\n\n add_default_numeric_op(\"lt\")\n add_default_numeric_op(\"le\")\n add_default_numeric_op(\"eq\")\n add_default_numeric_op(\"ne\")\n add_default_numeric_op(\"ge\")\n add_default_numeric_op(\"gt\")\n return type.__new__(mcs, name, parent, attr)", "def calculations(self):\n return Calculation.find(self)", "def create_instance(self,name):\n print \"INFO : new %s\" % name\n return self.get_class(name)()", "def operator(app):\n return car(app)", "def R(cls, *args, **kwargs):\n return cls(*args, model_type='regressor', **kwargs)", "def some_operation(self):\n\n # Call the factory method to create a Product object.\n product = self.factory_method()\n\n # Now, use the product.\n product.operation()", "def new(cls):\n return cls()", "def __init__(self, math, variables=None, first_derivatives=None,\n second_derivatives=None, name=None):\n self.math = math\n if variables is not None:\n self.variables = variables\n else:\n # expr_manip still returns Sets instead of sets; cast the\n # result to set mostly because this annoys me.\n self.variables = set(em.extract_vars(self.math))\n self._first_derivatives = {}\n if first_derivatives:\n self._first_derivatives.update(first_derivatives)\n self._second_derivatives = {}\n if second_derivatives:\n self._second_derivatives.update(second_derivatives)\n self._code = None\n self._first_derivative_code = {}\n self._second_derivative_code = {}\n self.name = name\n if self.name:\n self.tag = name\n elif len(self.math) < 100:\n self.tag = \"'%s'\" % self.math\n else:\n self.tag = \"'%s...'\" % self.math[:100]", "def create_operator(statement_a, operator, statement_b):\n return S(statement_a=statement_a, operator=operator, statement_b=statement_b)", "def run_calc(self):\n\n from openquake.calculators import base, getters\n from openquake.baselib import config, performance, zeromq\n if self.vtag >= 11:\n from openquake.baselib import version\n else:\n from openquake.baselib import __version__ as version\n\n with self.calculator._monitor:\n self.calculator._monitor.username = ''\n try:\n # Pre-execute setups\n self.calculator.pre_execute()\n\n #self.calculator.datastore.swmr_on()\n oq = self.calculator.oqparam\n dstore = self.calculator.datastore\n self.calculator.set_param()\n self.calculator.offset = 0\n\n # Source model\n #print('self.__dict__ = ')\n #print(self.calculator.__dict__)\n if oq.hazard_calculation_id: # from ruptures\n dstore.parent = self.calculator.datastore.read(\n oq.hazard_calculation_id)\n elif hasattr(self.calculator, 'csm'): # from sources\n self.calculator_build_events_from_sources()\n #self.calculator.build_events_from_sources()\n if (oq.ground_motion_fields is False and oq.hazard_curves_from_gmfs is False):\n return {}\n elif 'rupture_model' not in oq.inputs:\n logging.warning(\n 'There is no rupture_model, the calculator will just '\n 'import data without performing any calculation')\n fake = logictree.FullLogicTree.fake()\n dstore['full_lt'] = fake # needed to expose the outputs\n dstore['weights'] = [1.]\n return {}\n else: # scenario\n self.calculator._read_scenario_ruptures()\n if (oq.ground_motion_fields is False and oq.hazard_curves_from_gmfs is False):\n return {}\n\n # Intensity measure models\n if oq.ground_motion_fields:\n if self.vtag >= 12:\n imts = oq.get_primary_imtls()\n nrups = len(dstore['ruptures'])\n base.create_gmf_data(dstore, imts, oq.get_sec_imts())\n dstore.create_dset('gmf_data/sigma_epsilon',\n getters.sig_eps_dt(oq.imtls))\n dstore.create_dset('gmf_data/time_by_rup',\n getters.time_dt, (nrups,), fillvalue=None)\n elif self.vtag == 11:\n imts = oq.get_primary_imtls()\n nrups = len(dstore['ruptures'])\n base.create_gmf_data(dstore, len(imts), oq.get_sec_imts())\n dstore.create_dset('gmf_data/sigma_epsilon',\n getters.sig_eps_dt(oq.imtls))\n dstore.create_dset('gmf_data/time_by_rup',\n getters.time_dt, (nrups,), fillvalue=None)\n else:\n pass\n\n # Prepare inputs for GmfGetter\n nr = len(dstore['ruptures'])\n logging.info('Reading {:_d} ruptures'.format(nr))\n if self.vtag >= 12:\n rgetters = getters.get_rupture_getters(dstore, oq.concurrent_tasks * 1.25,\n srcfilter=self.calculator.srcfilter)\n elif self.vtag == 11:\n rgetters = getters.gen_rupture_getters(dstore, oq.concurrent_tasks)\n else:\n rgetters = getters.gen_rupture_getters(dstore, self.calculator.srcfilter, oq.concurrent_tasks)\n\n \n args = [(rgetter, self.calculator.param) for rgetter in rgetters]\n mon = performance.Monitor()\n mon.version = version\n mon.config = config\n rcvr = 'tcp://%s:%s' % (config.dbserver.listen,\n config.dbserver.receiver_ports)\n skt = zeromq.Socket(rcvr, zeromq.zmq.PULL, 'bind').__enter__()\n mon.backurl = 'tcp://%s:%s' % (config.dbserver.host, skt.port)\n mon = mon.new(\n operation='total ' + self.calculator.core_task.__func__.__name__, measuremem=True)\n mon.weight = getattr(args[0], 'weight', 1.) # used in task_info\n mon.task_no = 1 # initialize the task number\n args += (mon,)\n\n self.args = args\n self.mon = mon\n self.dstore = dstore\n\n finally:\n print('FetchOpenQuake: OpenQuake Hazard Calculator defined.')\n # parallel.Starmap.shutdown()", "def New(*args, **kargs):\n obj = itkIsoDataThresholdCalculatorHDUC.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def classFactory(iface): # pylint: disable=invalid-name\n #\n from .nuclear_energy_plant_radiation_module import energy_plant_radiation_class\n return energy_plant_radiation_class(iface)", "def build_evaluator(cfg: CfgNode) -> EvaluatorBase:\n name = cfg[\"name\"]\n evaluator = simple_build(name, cfg, EVALUATORS)\n return evaluator", "def _new_instance(cls: Type[T], mod_dir: str, home_dir: str,\n state_dir: str, mod_reg: Dict[str, \"Definition\"]) -> T:\n assert cls.name\n mod = cls(mod_dir, home_dir, state.load_state(state_dir, cls.name),\n logging.get_logger(cls.name))\n for dep_name in cls.required:\n dep = mod_reg[dep_name]\n setattr(mod, dep_name, _Protector(dep))\n for dep_name in cls.optional:\n opt_dep = mod_reg.get(dep_name)\n if not opt_dep:\n _LOG.info(\n f\"optional dependency {dep_name} of {cls.name} not available\"\n )\n setattr(mod, dep_name, None)\n continue\n setattr(mod, dep_name, _Protector(opt_dep))\n\n return mod", "def simple_calculator(calculation):\n\n\n operations = {'+': lambda x,y: x + y,'-': lambda x,y: x-y,'*': lambda x,y: x * y,'/': lambda x,y: x/y}\n \n def is_numeric(x):\n\n try:\n float(x)\n int(x)\n except:\n return False\n else:\n return True\n \n\n values = calculation.split()\n print(values)\n if is_numeric(values[0]) and is_numeric(values[2]) and values[1] in operations:\n operation = operations[values[1]]\n try:\n return operation(float(values[0]),float(values[2]))\n except ZeroDivisionError:\n raise ValueError(\"Division by zero\")\n\n\n raise ValueError(\"Invalid Operation\")", "def cmd_calculation():", "def disassembler_factory():\n \n ir_id, ir_cls, dis_cls = find_current_arch()\n \n class disassembler(dis_cls, ir_cls): # disassembler (host) class must be left-most.\n def __init__(self, ir_id):\n self.ir_id = ir_id\n dis_cls.__init__(self)\n ir_cls.__init__(self)\n return\n \n dis = disassembler(ir_id)\n \n return dis", "def __new__(mcs, name, parent, attr):\n def add_numeric_op(attr_name):\n \"\"\"Create an attribute named attr_name that calls\n _numeric_op(self, other, op).\"\"\"\n def closure(self, other):\n return VTKArray._numeric_op(self, other, attr_name)\n closure.__name__ = attr_name\n attr[attr_name] = closure\n\n def add_default_numeric_op(op_name):\n \"\"\"Adds '__[op_name]__' attribute that uses operator.[op_name]\"\"\"\n add_numeric_op(\"__%s__\"%op_name)\n\n def add_reverse_numeric_op(attr_name):\n \"\"\"Create an attribute named attr_name that calls\n _reverse_numeric_op(self, other, op).\"\"\"\n def closure(self, other):\n return VTKArray._reverse_numeric_op(self, other, attr_name)\n closure.__name__ = attr_name\n attr[attr_name] = closure\n\n def add_default_reverse_numeric_op(op_name):\n \"\"\"Adds '__r[op_name]__' attribute that uses operator.[op_name]\"\"\"\n add_reverse_numeric_op(\"__r%s__\"%op_name)\n\n def add_default_numeric_ops(op_name):\n \"\"\"Call both add_default_numeric_op and add_default_reverse_numeric_op.\"\"\"\n add_default_numeric_op(op_name)\n add_default_reverse_numeric_op(op_name)\n\n add_default_numeric_ops(\"add\")\n add_default_numeric_ops(\"sub\")\n add_default_numeric_ops(\"mul\")\n add_default_numeric_ops(\"truediv\")\n add_default_numeric_ops(\"floordiv\")\n add_default_numeric_ops(\"mod\")\n add_default_numeric_ops(\"pow\")\n add_default_numeric_ops(\"lshift\")\n add_default_numeric_ops(\"rshift\")\n add_numeric_op(\"and\")\n add_default_numeric_ops(\"xor\")\n add_numeric_op(\"or\")\n\n add_default_numeric_op(\"lt\")\n add_default_numeric_op(\"le\")\n add_default_numeric_op(\"eq\")\n add_default_numeric_op(\"ne\")\n add_default_numeric_op(\"ge\")\n add_default_numeric_op(\"gt\")\n return type.__new__(mcs, name, parent, attr)", "def New(*args, **kargs):\n obj = itkIsoDataThresholdCalculatorHFF.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def create(cls, **dictionary):\n if cls.__name__ == \"Rectangle\":\n myDummy = cls(1, 1)\n elif cls.__name__ == \"Square\":\n myDummy = cls(1)\n myDummy.update(**dictionary)\n return myDummy", "def createStandardInputObjFromCalcObjs(self, calcObjs):\n\t\treturn self._stdInpFromCalcObjs(self, calcObjs)", "def main():\n CalculatorApp().mainloop()", "def __init__(self, adder, subtracter, multiplier, divider):\n self.adder = adder\n self.subtracter = subtracter\n self.multiplier = multiplier\n self.divider = divider\n\n self.stack = []", "def __init__(self, adder, subtracter, multiplier, divider):\n self.adder = adder\n self.subtracter = subtracter\n self.multiplier = multiplier\n self.divider = divider\n\n self.stack = []", "def __new__(cls, *args, **kwargs):\n if cls.instance is None:\n cls.instance = super(_ZeroAmount, cls).__new__(cls)\n return cls.instance", "def calculator():\n print(art.logo)\n # Changed 'int' to 'float' to do calculation for floating numbers as well\n num1 = float(input(\"Enter the first number : \"))\n end_calculation = False\n\n while not end_calculation:\n list_operators()\n operator = input(\"Pick an operation : \")\n num2 = float(input(\"Enter the next number : \"))\n calculation_fun = operations[operator]\n answer = round(calculation_fun(num1, num2), 2)\n print(f\"{num1} {operator} {num2} = {answer}\")\n\n wish_to_continue = input(\"Type 'Y' to Continue or Type 'N' to Exit : \").lower()\n if wish_to_continue == \"y\":\n num1 = answer\n else:\n # clear()\n end_calculation = True\n # recursive function call to restart the calculation freshly when user doesn't want to continue\n calculator()", "def __init__(self, expression, result, is_singleton=False):\n\n self.expr = expression\n self.result = result\n self.is_singleton = is_singleton", "def __init__(self, calcGrad, calcCost, input):\n\tself.calcGrad = calcGrad\n\tself.calcCost = calcCost\n\tself.input = np.asarray(input, dtype=np.float32)\n\tself.inp_shape = input.shape", "def __new__(mcs, name, parent, attr):\n def _add_op(attr_name, op):\n \"\"\"Create an attribute named attr_name that calls\n _numeric_op(self, other, op).\"\"\"\n def closure(self, other):\n return VTKNoneArray._op(self, other, op)\n closure.__name__ = attr_name\n attr[attr_name] = closure\n\n def _add_default_reverse_op(op_name):\n \"\"\"Adds '__r[op_name]__' attribute that uses operator.[op_name]\"\"\"\n _add_op(\"__r%s__\"%op_name, getattr(operator, op_name))\n\n def _add_default_op(op_name):\n \"\"\"Adds '__[op_name]__' attribute that uses operator.[op_name]\"\"\"\n _add_op(\"__%s__\"%op_name, getattr(operator, op_name))\n\n def _add_default_ops(op_name):\n \"\"\"Call both add_default_numeric_op and add_default_reverse_numeric_op.\"\"\"\n _add_default_op(op_name)\n _add_default_reverse_op(op_name)\n\n _add_default_ops(\"add\")\n _add_default_ops(\"sub\")\n _add_default_ops(\"mul\")\n _add_default_ops(\"truediv\")\n _add_default_ops(\"floordiv\")\n _add_default_ops(\"mod\")\n _add_default_ops(\"pow\")\n _add_default_ops(\"lshift\")\n _add_default_ops(\"rshift\")\n _add_op(\"__and__\", operator.and_)\n _add_op(\"__rand__\", operator.and_)\n _add_default_ops(\"xor\")\n _add_op(\"__or__\", operator.or_)\n _add_op(\"__ror__\", operator.or_)\n\n _add_default_op(\"lt\")\n _add_default_op(\"le\")\n _add_default_op(\"eq\")\n _add_default_op(\"ne\")\n _add_default_op(\"ge\")\n _add_default_op(\"gt\")\n return type.__new__(mcs, name, parent, attr)" ]
[ "0.6303939", "0.5606582", "0.5468204", "0.5322988", "0.53078365", "0.529308", "0.5283356", "0.5271009", "0.52677727", "0.5266936", "0.5252997", "0.5238419", "0.5233305", "0.5231769", "0.5197301", "0.51828855", "0.5182773", "0.5162779", "0.51625335", "0.51475465", "0.51475465", "0.5142594", "0.51315063", "0.512747", "0.51260394", "0.5125916", "0.5114183", "0.51043624", "0.51041955", "0.5100643", "0.50856876", "0.5074678", "0.5065174", "0.505762", "0.5033035", "0.5021856", "0.5016422", "0.5001367", "0.49883187", "0.4975265", "0.49676546", "0.49675006", "0.496178", "0.4961175", "0.49468952", "0.49457493", "0.49444577", "0.4943917", "0.4943917", "0.49434912", "0.4930547", "0.49191317", "0.49191317", "0.49040225", "0.4885421", "0.48683232", "0.48602933", "0.484954", "0.4846732", "0.48448187", "0.4842286", "0.48397857", "0.4819495", "0.48171175", "0.4816521", "0.48144796", "0.4807072", "0.48000586", "0.4799887", "0.47929788", "0.47913298", "0.47909614", "0.47846982", "0.478406", "0.4783031", "0.47814113", "0.47791123", "0.4777555", "0.47668615", "0.47636095", "0.4758488", "0.47579035", "0.47576138", "0.47439644", "0.47438866", "0.474275", "0.4741818", "0.4741164", "0.4740029", "0.4732702", "0.47321844", "0.47295067", "0.47228393", "0.4722288", "0.47192174", "0.47192174", "0.47142357", "0.47127345", "0.47114635", "0.47105315", "0.4710372" ]
0.0
-1
This method create a project in pivotal tracker
def create_project(): client = RequestManager() project_name = "".join(choices(string.ascii_letters + string.digits, k=10)) client.set_method("POST") client.set_endpoint("/projects") body = {"name": project_name} client.set_body(json.dumps(body)) response = client.execute_request() STORED_ID['project_id'] = response.json()['id']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def project_create(project):\n client.project.create(project)", "def test_create_project(self):\n pass", "def test_create_project(self):\n pass", "def test_create_project(self):\n pass", "def create_project(self, **kwargs):\n save = kwargs.get('save', True) \n if kwargs.has_key('save'):\n del(kwargs['save'])\n\n index = self.object_index()\n defaults = dict(slug = \"test-project-%s\" % index,\n basecamp_url = \"https://foo.basecamphq.com/projects/%s/log\" % index)\n defaults.update(kwargs)\n p = Project(**defaults)\n\n if save:\n p.save()\n self.assert_(p.id)\n return p", "def _create_project(self):\n request = {\n \"project\": {\n \"description\": \"description\",\n \"enabled\": True,\n \"name\": uuid.uuid4().hex,\n \"domain_id\": \"default\",\n }\n }\n response = self.client.post(PROJECT_PATH, data=json.dumps(request),\n headers=HEADERS)\n\n if response.status_code == 201:\n return response.json()\n else:\n raise SystemExit(\"Failed to create project.\")", "def createProject(self, payLoad):\n\n uri = \"/v1/projects/\" \n response = self.client.post(uri, payLoad)\n return response", "def test_create_project_request(self):\n pass", "def ktrack_project(ktrack_instance):\n project = ktrack_instance.create(\"project\", {\"name\": \"My_Test_Project\"})\n return project", "def project_created_handler(event):\n obj = event.obj\n # submit Project after creation\n obj.workflow.start()", "def create_project_info(data):\n\t\n\tproject = ProjectInfo()\n\tproject.name = data['name']\n\tproject.description = data['description']\n\tproject.start_date = data['start_date']\n\tproject.end_date = data['end_date']\n\tproject.save()\n\tprint ('Inserted')\n\treturn True", "def test_new_project_existing(self):\n\n project = fake_clients.FakeProject(name=\"test_project\")\n\n setup_identity_cache(projects=[project])\n\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": \"test_project_id\",\n \"roles\": \"admin,member\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n new_task = Task.objects.all()[0]\n url = \"/v1/tasks/\" + new_task.uuid\n response = self.client.post(\n url, {\"approved\": True}, format=\"json\", headers=headers\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.json(), {\"errors\": [\"actions invalid\"]})", "def test_project_creation(self):\n title = 'Project title'\n code = 'SCW-12345'\n project = self.create_project(\n title=title,\n code=code,\n institution=self.institution,\n tech_lead=self.project_owner,\n category=self.category,\n funding_source=self.funding_source,\n )\n self.assertTrue(isinstance(project, Project))\n self.assertEqual(project.__str__(), code + ' - ' + title)\n self.assertEqual(project.status, Project.AWAITING_APPROVAL)\n self.assertEqual(project.title, title)\n self.assertEqual(project.code, code)\n self.assertTrue(project.awaiting_approval())", "def test_create_project(self):\n yield self.nodes[0].overlay.create_project(\"test\", \"specpointer\", \"01-02-03\", 300, \"EUR\", 5)\n yield self.deliver_messages()\n\n # Node 2 should know about this app request now\n projects = self.nodes[1].overlay.persistence.get_projects()\n self.assertTrue(projects)\n self.assertEqual(projects[0]['id'], 1)", "def post_project_create(self, resource_dict):\n pass", "def test_projects_post(self):\n project = Project()\n response = self.client.open('/project-tracker/projects',\n method='POST',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def _create_project(org, project_name):\n project = Project(\n org=org,\n name=project_name\n )\n project.save()\n return project", "def create_project(projectname):\n auth_id = request.get_json().get(\"auth_id\")\n storage_accesses = request.get_json().get(\"storage_accesses\", [])\n response = jsonify(\n admin.create_project(\n current_app.scoped_session(), projectname, auth_id, storage_accesses\n )\n )\n return response", "def test_add_project(self):\n pass", "def create_project(self, pool, project, arg):\n self.verify_pool(pool)\n svc = self.project_path % (pool, project)\n ret = self.rclient.get(svc)\n if ret.status != restclient.Status.OK:\n svc = self.projects_path % pool\n ret = self.rclient.post(svc, arg)\n if ret.status != restclient.Status.CREATED:\n exception_msg = (_('Error creating project: '\n '%(project)s on '\n 'pool: %(pool)s '\n 'return code: %(ret.status)d '\n 'message: %(ret.data)s.')\n % {'project': project,\n 'pool': pool,\n 'ret.status': ret.status,\n 'ret.data': ret.data})\n raise exception.ShareBackendException(msg=exception_msg)", "def create_new_project(project_name, token=None):\n session = konfuzio_session(token)\n url = create_new_project_url()\n new_project_data = {\"name\": project_name}\n r = session.post(url=url, json=new_project_data)\n return r", "def create_project(self, **kwargs):\n _url = f\"{self.base_url}/projects\"\n if \"name\" not in kwargs:\n raise ValueError(\"Parameter 'name' is mandatory\")\n return self.http_call(\"post\", _url, json_data=kwargs).json()", "def test_new_project(self):\n\n setup_identity_cache()\n\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": \"test_project_id\",\n \"roles\": \"admin,member\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n new_task = Task.objects.all()[0]\n url = \"/v1/tasks/\" + new_task.uuid\n response = self.client.post(\n url, {\"approved\": True}, format=\"json\", headers=headers\n )\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"created token\"]})\n\n new_project = fake_clients.identity_cache[\"new_projects\"][0]\n self.assertEqual(new_project.name, \"test_project\")\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n data = {\"password\": \"testpassword\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_create_project(client, session, tokens):\n response = client.post(\n \"/projects\",\n json={\n \"name\": \"New Project\",\n \"organizations\": [],\n \"teams\": [],\n \"users\": [],\n },\n headers={\"Authorization\": f\"Bearer {tokens['write']}\"},\n )\n assert response.status_code == 201\n project_id = response.json[\"id\"]\n assert Project.query.filter(Project.id == project_id).count() == 1", "def test_create_project_target_enabled(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 201, msg=response.content)\n self.assertEqual(Project.objects.count(), 3)", "def create(self):\n \n # create the sequence structure by calling the self.project.create\n self.project.create()", "def create(self, token: Any):\n params = [token, ]\n method = \"ProjectAPI.Create\"\n self.__add_request(method, params, lambda payload: Definition.from_json(payload))", "def post(self, data):\n conn = pecan.request.db_conn\n try:\n project = db_models.Project(**data.as_dict())\n return conn.create_project(request.context, project)\n except Exception:\n LOG.exception('Fail to create project: %s' % data.as_dict())\n raise exception.ProjectCreateFailed(project_id=data.project_id,\n user_id=data.user_id)", "def test_duplicate_tasks_new_project(self):\n\n setup_identity_cache()\n\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)\n\n data = {\"project_name\": \"test_project_2\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)", "def test_projects_id_post(self):\n project = Project()\n response = self.client.open('/project-tracker/projects/{id}'.format(id=3.4),\n method='POST',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def create_project(self, project):\n\n with self._transaction.cursor() as cur:\n if project.project_id is not None:\n id_ = project.project_id\n else:\n cur.execute(\"SELECT MAX(project_id) + 1 \"\n \"FROM barcodes.project\")\n id_ = cur.fetchone()[0]\n\n query = f\"\"\"\n INSERT INTO barcodes.project\n ({PROJECT_FIELDS})\n VALUES (\n %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,\n %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,\n %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,\n %s, %s, %s, %s, %s, %s, %s);\"\"\"\n\n cur.execute(query,\n [id_, project.project_name, project.is_microsetta,\n project.bank_samples, project.plating_start_date,\n project.contact_name, project.additional_contact_name,\n project.contact_email, project.deadlines,\n project.num_subjects, project.num_timepoints,\n project.start_date, project.disposition_comments,\n project.collection, project.is_fecal,\n project.is_saliva, project.is_skin, project.is_blood,\n project.is_other, project.do_16s,\n project.do_shallow_shotgun, project.do_shotgun,\n project.do_rt_qpcr, project.do_serology,\n project.do_metatranscriptomics,\n project.do_mass_spec, project.mass_spec_comments,\n project.mass_spec_contact_name,\n project.mass_spec_contact_email,\n project.do_other,\n project.branding_associated_instructions,\n project.branding_status, project.subproject_name,\n project.alias, project.sponsor, project.coordination,\n project.is_active])\n\n # if we made it this far, all is well\n return id_", "def newproject():\n log('Criando novo projeto', yellow)\n log('Cria a conta no bitbucket com o nome do projeto vázio que o script se encarregará do resto', red)\n\n conta = raw_input('Digite o nome do projeto: ')\n\n local('echo \"clonando projeto %s\"' % bitbucket_repository)\n local('git clone {0} {1}{2}'.format(bitbucket_repository, folder_project_local, conta))\n local('cd {0}{1}'.format(folder_project_local, conta))\n local('mkvirtualenv {0}'.format(conta))\n local('setvirtualenvproject')\n local('pip install -r requirements.txt')\n local('rm -rf {0}{1}/.git'.format(folder_project_local, conta))\n local('rm -rf README.md')\n local('git init')\n local('git remote add origin [email protected]:{0}/{1}.git'.format(bitbucket_user, conta))", "def create_project(self, name):\n project = self._post('/projects', data={'name': name})\n self.create_project_hook(project['id'], self.webhook_url + name)\n return project", "def create_keystone_v3_project(self, **kwargs):\n LOG_OBJ.debug(\"Creating the project.\")\n print self.project_info\n\n _url = \"http://\" + self.host_ip + \":35357/v3/projects\"\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n\n _project_info = {\"project\": {}}\n for argument in [\"name\", \"description\", \"domain_id\",\n \"enabled\", \"disabled\"]:\n try:\n _project_info['project'].update(\n {argument: kwargs[argument]})\n except KeyError:\n pass\n _body = json.dumps(_project_info)\n response = self.request(\"POST\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while creating project\")\n print (\"No response from Server while creating project\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\" Creating project Failed with status %s \"\n \"and error : %s\" % (response.status, response.data))\n print (\" Creating project Failed with status %s and error : %s\" %\n (response.status, response.data))\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Project details : %s \" % output)\n print (\"Project details : %s \" % output)\n return output['project']['id']", "def create_project(cls, title, code, institution, tech_lead, category, funding_source):\n project = Project.objects.create(\n title=title,\n description='Project description',\n legacy_hpcw_id='HPCW-12345',\n legacy_arcca_id='ARCCA-12345',\n code=code,\n institution=institution,\n institution_reference='BW-12345',\n department='School of Chemistry',\n pi='Project Principal Investigator',\n tech_lead=tech_lead,\n category=category,\n funding_source=funding_source,\n start_date=datetime.datetime.now(),\n end_date=datetime.datetime.now() + datetime.timedelta(days=10),\n economic_user=True,\n requirements_software='None',\n requirements_gateways='None',\n requirements_training='None',\n requirements_onboarding='None',\n allocation_rse=True,\n allocation_cputime='1000000',\n allocation_memory='100',\n allocation_storage_home='5000',\n allocation_storage_scartch='1000',\n notes='Project notes',\n )\n return project", "def create_projects(self):\n if self.gl is None or self.config is None:\n print(\"No config/Gitlab found, please run connect first.\")\n exit(1)\n else:\n print(\"Starting Project creation.\")\n gl = self.gl\n config = self.config\n for project in config[\"projects\"]:\n # get the import url\n imp_url = config[\"projects\"][project][\"import_url\"]\n\n # Set rights/members/protected master\n if config[\"projects\"][project][\"owner_conf\"][\"owner\"] == \"all_users\":\n for user in self.users:\n print(\"Importing \\'\" + imp_url + \"\\' for user \\'\" + user.username + \"\\'\")\n pj = user.projects.create({'name': project,\n 'user_id': user.id,\n 'access_level': gitlab.OWNER_ACCESS,\n 'import_url': imp_url})\n elif config[\"projects\"][project][\"owner_conf\"][\"owner\"] == \"user\":\n for user in self.users:\n if user.username == config[\"projects\"][project][\"owner_conf\"][\"name\"]:\n print(\"Importing \\'\" + imp_url + \"\\' for user \\'\" + user.username + \"\\'\")\n pj = user.projects.create({'name': project,\n 'user_id': user.id,\n 'Access_level': gitlab.OWNER_ACCESS,\n 'import_url': imp_url})\n elif config[\"projects\"][project][\"owner_conf\"][\"owner\"] == \"group\":\n for group in self.groups:\n if group.name == config[\"projects\"][project][\"owner_conf\"][\"name\"]:\n print(\"Importing \\'\" + imp_url + \"\\' for group \\'\" + group.name + \"\\'\")\n pj = group.projects.create({'name': project,\n 'namespace_id': group.id,\n 'import_url': imp_url})\n else:\n print(\"Project owner Config is wrong, aborting\")\n exit(1)\n # Delete protected Master Branch\n if config[\"projects\"][project][\"protect_master_branch\"] == \"False\":\n print(\"Removing Project master Branch protection\")\n pj.protectedbranches.delete('master')", "def NewProject (projectname):\n\tif projectname == \"\" or projectname == None:\n\t\tnewprojcode(projectname)\n\telse:\n\t\tnewprojCode_withNamed()", "def _post_project(prj=None):\n template_path = (os.path.join(\n os.path.split(__file__)[0], \"post_project_template.xml\"))\n with open(template_path, 'r') as file:\n template = Template(file.read())\n response_xml = template.render(\n name=f\"Project_TEST_{datetime.now()}\",\n open_date=str(datetime.today().date()),\n res_uri=f\"{LIMS_API.tools.api.host}researchers/1\")\n\n prj_response = LIMS_API.tools.api.post(\n f\"{LIMS_API.tools.api.host}projects\", response_xml)\n\n prj_response_soup = BeautifulSoup(\n prj_response, \"xml\").find(\"prj:project\")\n prj = api_types.Project(\n prj_response_soup.find(\"name\"),\n DEFAULT_RES,\n datetime.today().date(),\n [],\n prj_response_soup[\"uri\"])\n\n return prj", "def make_project(id):\n return {\n \"type\": \"Project\",\n \"metrics\": [],\n \"tags\": [],\n \"id\": id,\n \"description\": \"\",\n \"applicant\": \"\",\n }", "def _create_dummy_project(self,projectname=\"testproject\"):\n # Create three types of users that exist: Root, can do anything, \n # projectadmin, cam do things to a project he or she owns. And logged in\n # user \n \n #created in _create_main_project_and_root.\n root = self.root\n # non-root users are created as if they signed up through the project, \n # to maximize test coverage. \n \n # A user who has created a project\n projectadmin = self._create_random_user(\"projectadmin_\")\n \n testproject = self._create_comicsite_in_admin(projectadmin,projectname)\n create_page_in_admin(testproject,\"testpage1\")\n create_page_in_admin(testproject,\"testpage2\")\n \n # a user who explicitly signed up to testproject\n participant = self._create_random_user(\"participant_\")\n self._register(participant,testproject)\n \n # a user who only signed up but did not register to any project\n registered_user = self._create_random_user(\"comicregistered_\")\n \n #TODO: How to do this gracefully? \n return [testproject,root,projectadmin,participant,registered_user]", "def run(opts, args):\n create_new_project()", "def test_create_account_project(self, create):\n row = {'PROJ_NAME1': 'Some Proj', 'PROJ_NO': '121-212',\n 'SECTOR': 'IT'}\n sync.create_account(row, None)\n self.assertTrue(create.called)\n account, row, issue_map = create.call_args[0]\n self.assertEqual(account.name, 'Some Proj')\n self.assertEqual(account.code, '121-212')\n self.assertEqual(account.category, Account.PROJECT)\n self.assertEqual(0, len(Account.objects.filter(pk=account.pk)))", "def project():", "def project():", "def project():", "def add_project(project):\n print('add_project: ' + str(project))\n try_insert_or_update(models.projects.insert(), # pylint: disable=no-value-for-parameter\n [dict(\n name=project['name'], path=project['name'], active=True, user_id=current_user.id)])\n return", "def create_project(self, name, description=None):\n description = description or ''\n data = self._run(\n url_path=\"projects/add\",\n name=name,\n description=description\n )\n return data['result']['project']['id']", "def create_new_project(self,\n customer_name,\n contract_date,\n project_info,\n project_datest,\n project_dateend,\n project_budget,\n project_actst=None,\n project_actend=None,\n project_cost=None):\n\n customer_info = self.query_customer(cus_name=customer_name)\n\n if customer_info:\n # Search for project manager in the same region as the customer.\n customer_region_id = customer_info[0][1]\n get_employee_query = \"select employee.emp_id, emp_lname, emp_fname from employee, \" \\\n \"empskill, skill, region where employee.emp_id = \" \\\n \"empskill.emp_id and empskill.skill_id = \" \\\n \"skill.skill_id and skill.skill_descrpt = \" \\\n \"'Project Manager' and region.region_id = \" \\\n \"employee.region_id and region.region_id = '{}' \"\n try:\n self.dbCursor.execute(\n get_employee_query.format(customer_region_id))\n employee_info = self.dbCursor.fetchall()\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)\n finally:\n if len(employee_info) == 0:\n ErrorMessageWindow(\"No suitable project manager found!\")\n else:\n if customer_info and employee_info:\n if len(customer_info) > 1:\n MultiRowScreen(customer_info, \"project\")\n else:\n cus_id = customer_info[0][0]\n emp_id = employee_info[0][0]\n optional_inputs = [project_actst, project_actend,\n project_cost]\n\n query = \"insert into project(cus_id, emp_id, proj_date, \" \\\n \"proj_descrpt, proj_estdatest, proj_estdateend, \" \\\n \"proj_estbudget) values ('{}', '{}', '{}', '{}', \" \\\n \"'{}', '{}', '{}') \".format(cus_id,\n emp_id,\n contract_date,\n project_info,\n project_datest,\n project_dateend,\n project_budget)\n\n yes_options = False\n for item in optional_inputs:\n if item != \"\":\n yes_options = True\n\n if yes_options is False:\n try:\n self.dbCursor.execute(query)\n SuccessMessageWindow(\"Insert success!\")\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)\n finally:\n self.dbConnection.commit()\n else:\n option_names = [\"proj_actdatest\",\n \"proj_actdateend\",\n \"proj_actcost\"]\n options_index = []\n filled_options = []\n\n index = 0\n for item in optional_inputs:\n if item != \"\":\n options_index.append(index)\n filled_options.append(item)\n index += 1\n update_query = \"update project set \"\n\n j = 0\n for i in options_index:\n if j < len(filled_options) - 1:\n update_query += \"{}='{}', \".format(\n option_names[i], filled_options[j]\n )\n else:\n update_query += \"{}='{}' \".format(\n option_names[i], filled_options[j]\n )\n j += 1\n\n try:\n try:\n self.dbCursor.execute(query)\n SuccessMessageWindow(\"Insert success!\")\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)\n finally:\n self.dbConnection.commit()\n\n self.dbCursor.execute(update_query)\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)\n finally:\n self.dbConnection.commit()\n else:\n ErrorMessageWindow(\"Customer not found!\")", "def create(self, request):\n lot = Lot.objects.get(pk=request.data[\"lotId\"])\n\n project = Project()\n project.name = request.data[\"name\"]\n project.estimatedCost = request.data[\"estimatedCost\"]\n project.estimatedCompletionDate = request.data[\"estimatedCompletionDate\"]\n project.lotId = lot\n #projectNote=projectNote\n\n\n try:\n project.save()\n serializer = ProjectSerializer(project, context={'request': request}) #converting data into json\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except ValidationError as ex:\n return Response({\"reason\": ex.message}, status=status.HTTP_400_BAD_REQUEST)", "def pre_project_create(self, resource_dict):\n pass", "def create_project(self, project_name=None, check=True):\n project_name = project_name or next(utils.generate_ids('project'))\n page_projects = self._page_projects()\n page_projects.button_create_project.click()\n\n with page_projects.form_create_project as form:\n form.field_name.value = project_name\n form.submit()\n\n if check:\n self.close_notification('success')\n page_projects.table_projects.row(\n name=project_name).wait_for_presence()\n\n return project_name", "def post_project():\n\n title = request.form.get('title')\n description = request.form.get('description')\n max_grade = request.form.get('max_grade')\n\n hackbright.make_new_project(title, description, max_grade)\n\n flash(\"Successfully added new project.\")\n\n return redirect(\"/project?title={}\".format(title))", "def test_new_project_existing_project_new_user(self):\n setup_identity_cache()\n\n # create signup#1 - project1 with user 1\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n\n # Create signup#2 - project1 with user 2\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n\n headers = {\n \"project_name\": \"admin_project\",\n \"project_id\": \"admin_project_id\",\n \"roles\": \"admin,member\",\n \"username\": \"admin\",\n \"user_id\": \"admin_id\",\n \"authenticated\": True,\n }\n # approve signup #1\n new_task1 = Task.objects.all()[0]\n url = \"/v1/tasks/\" + new_task1.uuid\n response = self.client.post(\n url, {\"approved\": True}, format=\"json\", headers=headers\n )\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"created token\"]})\n\n # Attempt to approve signup #2\n new_task2 = Task.objects.all()[1]\n url = \"/v1/tasks/\" + new_task2.uuid\n response = self.client.post(\n url, {\"approved\": True}, format=\"json\", headers=headers\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.json(), {\"errors\": [\"actions invalid\"]})", "def test_create_project_root(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': None,\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(Project.objects.count(), 2)", "def create_project(conn, project):\r\n sql = ''' INSERT INTO rq_table(id, date, co2x, o2xx, temp, humi, type)\r\n VALUES(%s,%s,%s,%s,%s,%s,%s) '''\r\n cur = conn.cursor()\r\n cur.execute(sql, project)\r\n conn.commit()\r\n return cur.lastrowid", "def create_project_if_necessary(ctx, org_name, project_name, ):\n org = cmd.get_one_organization_by_name(\n client=ctx.obj, organization_name=org_name)\n pprint(cmd.ensure_project(\n client=ctx.obj, project_name=project_name, organization_id=org.id))", "def newProject(self):\n dialog = NewProjectDialog()\n if not dialog.name is None and not dialog.path is None:\n self._app.createProject(str(dialog.name), str(dialog.path))", "def test_create_project_from_template(self):\n project_new = self.project_template.take_template()\n\n self.assertTrue(project_new)", "def create_pcpp(account, row, issue_map):\n country_name = row['LOCATION']\n country = Country.objects.filter(name__iexact=country_name).first()\n if not country:\n logging.getLogger('peacecorps.sync_accounting').warning(\n \"%s: Country does not exist: %s\", row['PROJ_NO'], row['LOCATION'])\n issue = issue_map.find(row['SECTOR'])\n if not issue and row['SECTOR'] != 'None':\n logging.getLogger('peacecorps.sync_accounting').warning(\n \"%s: Sector does not exist: %s\", row['PROJ_NO'], row['SECTOR'])\n\n if country and (issue or row['SECTOR'] == 'None'):\n set_balances(row, account)\n account.save()\n\n volunteername = row['PCV_NAME']\n if volunteername.startswith(row['STATE']):\n volunteername = volunteername[len(row['STATE']):].strip()\n\n summary = clean_description(row['SUMMARY'])\n sirtrevorobj = {\"data\": [{\"type\": \"text\", \"data\": {\"text\": summary}}]}\n description = json.dumps(sirtrevorobj)\n\n project = Project(\n title=row['PROJ_NAME1'], country=country, account=account,\n volunteername=volunteername, volunteerhomestate=row['STATE'],\n description=description\n )\n if issue:\n project.overflow = issue.account\n project.save()\n project.campaigns.add(issue)\n else:\n project.save()", "def new_project(self, rootdir=None):\n if rootdir is None:\n rootdir = Ui.instance().select_directory(user.home)\n if not os.path.exists(rootdir):\n os.makedirs(rootdir)\n\n print 'Weld.new_project in ', rootdir\n project = Project(rootdir)\n\n project.save()\n self.project = project\n self.current_project_path = rootdir\n Ui.instance().set_resources_draggable(True)\n Ui.instance().show_status('new project created')", "def create_project(self, project_name: str, domain_id: str = None, classification_type: str = None) -> Project:\n if not self.validate():\n raise SettingCustomVisionAccessFailed\n trainer = self.get_trainer_obj()\n if not domain_id:\n domain_id = self.obj_detection_domain_id if domain_id is None else domain_id\n logger.info(\"Creating object detection project.\")\n try:\n project = trainer.create_project(name=project_name, domain_id=domain_id, classification_type=classification_type)\n return project\n except CustomVisionErrorException:\n raise SettingCustomVisionCannotCreateProject", "def save_project(uid, song_notes, author_name, creation_date, project_name):", "def testSessionCreate(self):\n success = False\n project = None\n\n try:\n project = self.session.create_project()\n\n success = True\n except Exception:\n pass\n\n self.failUnless(success)\n self.failIf(project is None)", "def create_project(data):\n try:\n CheckConnection()\n with Connection.cursor() as cursor:\n name = str(data[\"name\"])\n end_date = str(data[\"enddate\"])\n team_size = str(data[\"teamsize\"])\n budget = str(data[\"budget\"])\n tools = str(data[\"tools\"])\n priority = str(data[\"priority\"])\n is_assignment_complete = str(0)\n\n query = \"INSERT INTO Project (ProjectName,ProjectEndDate,ProjectTeamSize,Budget,Tools,Priority,IsAssignmentComplete) VALUES (%s,%s,%s,%s,%s,%s,%s);\"\n cursor.execute(\n query,\n (\n name,\n end_date,\n team_size,\n budget,\n tools,\n priority,\n is_assignment_complete,\n ),\n )\n Connection.commit()\n return True\n\n except conn.Error as error:\n print(\"Failed to update record to database rollback: {}\".format(error))\n Connection.rollback()\n return False", "async def create(self, ctx, name: str,\n owner: discord.Member = None) -> discord.Message:\n if ctx.projects.find_project(name):\n project = ctx.projects.find_project(name)\n if ctx.guild.get_Channel(int(project.get(\"channel\"))):\n return await ctx.send(\"A project with that name exists.\")\n else:\n await ctx.send(\"A project with this name exists but, a related\"\n \" project channel was not found. \"\n \"I will be overwriting the previous project.\")\n ctx.projects.delete_project(name)\n\n owner = owner if owner else ctx.author\n if not ctx.bot.db(\"guilds\").find(str(ctx.guild.id)):\n ctx.bot.db(\"guilds\").insert(str(ctx.guild.id), ctx.bot.empty_guild)\n\n # await ctx.send(\"Creating project channel...\")\n if not ctx.bot.db(\"guilds\").find(\n str(ctx.guild.id)).get(\"project_category\"):\n overwrites = {\n ctx.guild.default_role: discord.PermissionOverwrite(\n read_messages=False\n ),\n ctx.me: discord.PermissionOverwrite(\n read_messages=True,\n send_messages=True,\n manage_channels=True\n )\n }\n category = await ctx.guild.create_category(\"Flux Projects\",\n overwrites=overwrites)\n ctx.bot.db(\"guilds\").update(str(ctx.guild.id), {\n \"project_category\": str(category.id)})\n\n else:\n category = ctx.guild.get_channel(\n int(ctx.bot.db(\"guilds\").find(\n str(ctx.guild.id)).get(\"project_category\")))\n\n overwrites = {owner: discord.PermissionOverwrite(read_messages=True,\n send_messages=False,\n add_reactions=True),\n ctx.me: discord.PermissionOverwrite(read_messages=True,\n send_messages=True),\n ctx.guild.default_role: discord.PermissionOverwrite(\n read_messages=False)}\n\n channel = await ctx.guild.create_text_channel(f\"{name}-project\",\n category=category,\n overwrites=overwrites)\n await channel.send(f\"Project Owner: {owner}\")\n message = await channel.send(self.empty_progress_bar)\n await message.pin()\n res = ctx.projects.create_project(\n owner.id, owner.id, name, channel.id, message.id)\n if not res:\n return await ctx.send(\"An error has occurred. Use `.contact`\"\n \" with error: `ERR_PROJECT_STILL_EXISTS`\")\n return await ctx.send(\"Project created!\")", "def test_create_project_target_remote(self):\n # Create source site\n source_site = self.make_site(\n name=REMOTE_SITE_NAME,\n url=REMOTE_SITE_URL,\n mode=SITE_MODE_SOURCE,\n description=REMOTE_SITE_DESC,\n secret=REMOTE_SITE_SECRET,\n )\n # Make category remote\n self.make_remote_project(\n project_uuid=self.category.sodar_uuid,\n project=self.category,\n site=source_site,\n level=SODAR_CONSTANTS['REMOTE_LEVEL_READ_ROLES'],\n )\n self.assertEqual(Project.objects.count(), 2)\n\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n\n self.assertEqual(response.status_code, 403, msg=response.content)\n self.assertEqual(Project.objects.count(), 2)", "def __init_project(self, presenter):\n # Get the project id correponding to the input presenter\n # (if it does not exists, create a new one)\n if presenter == None:\n raise Exception(\"Cannot publish tasks without specifying a presenter.\"\n \"Please use set_presenter() to specify a presenter.\")\n pbclient = self.cc.pbclient\n p = None\n #try:\n if len(pbclient.find_project(short_name = presenter.short_name)) > 0:\n # the presenter has been created\n p= pbclient.find_project(short_name = presenter.short_name)[0]\n elif len(pbclient.find_project(name = presenter.name)) > 0:\n # the presenter has been created\n p= pbclient.find_project(name = presenter.name)[0]\n else:\n # create a new project with the presente\n p = pbclient.create_project(presenter.name, presenter.short_name, presenter.description)\n\n try:\n self.project_id = p.id\n self.project_short_name = presenter.short_name\n self.project_name = presenter.name\n p= pbclient.find_project(short_name = presenter.short_name)[0]\n p.info['task_presenter'] = Template(presenter.template).safe_substitute(short_name = presenter.short_name, question = presenter.question)\n p.long_description = presenter.description\n p.name = presenter.name\n p.short_name = presenter.short_name\n pbclient.update_project(p)\n except:\n if type(p) is dict and \"exception_msg\" in p.keys():\n raise Exception(\"%s\" %(p[\"exception_msg\"]))\n else:\n print p\n raise", "def test_save(self):\n p = self.create_project(save=False,\n basecamp_url='https://foo.basecamphq.com/projects/1701/log/',\n basecamp_id=None,\n name='')\n p.save()\n self.assertEqual(1701, p.basecamp_id)\n self.assertEqual(\"Kobol's Last Gleaming\", p.name)", "def test_create_project_unknown_user(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': INVALID_UUID,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(Project.objects.count(), 2)", "def create_project(self, conn, name, description=\"\"):\n group = conn.group.allocate(name, description)\n # returns Project object\n return group", "def create(index):\n # Get the project root\n project_root = get_project_root()\n package_name = os.path.basename(project_root)\n logging.info(\"Creating package for current project: \" + package_name)\n Packager(package_name, project_root).create(index)", "def create(self, project_name: str):\n \n #check naming-convention\n Utility.matchNamingConvention(project_name)\n\n projects_folder = self.config.projects_folder\n\n #check if folder already existsts\n Utility.checkNotOccupied(project_name, projects_folder)\n\n target_path = projects_folder+project_name\n\n project_godot_file_path = target_path+'/project.godot'\n\n os.mkdir(target_path)\n os.makedirs(target_path+'/bin/plugins', exist_ok=True)\n os.mknod(project_godot_file_path)\n\n project_godot_file = open(project_godot_file_path, mode='w')\n project_godot_file.write('[application]\\n\\nconfig/name=\"'+project_name+'\"\\n')", "def test_new_project_existing_user(self):\n\n user = fake_clients.FakeUser(\n name=\"[email protected]\", password=\"123\", email=\"[email protected]\"\n )\n\n setup_identity_cache(users=[user])\n\n # unauthenticated sign up as existing user\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": user.email}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n\n # approve the sign-up as admin\n headers = {\n \"project_name\": \"admin_project\",\n \"project_id\": \"admin_project_id\",\n \"roles\": \"admin,member\",\n \"username\": \"admin\",\n \"user_id\": \"admin_id\",\n \"authenticated\": True,\n }\n new_task = Task.objects.all()[0]\n url = \"/v1/tasks/\" + new_task.uuid\n response = self.client.post(\n url, {\"approved\": True}, format=\"json\", headers=headers\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.json(), {\"notes\": [\"Task completed successfully.\"]})", "def create_project_in_gme(self, active_node, projectName, contract_term, series):\n core = self.core\n projectMeta = self.META[\"Project\"]\n projectNode = core.create_child(active_node, projectMeta)\n core.set_attribute(projectNode, \"name\", projectName)\n core.set_attribute(projectNode, \"date\", date.today().strftime(\"%d/%m/%Y\"))\n core.set_attribute(projectNode, \"contract term\", int(contract_term))\n core.set_attribute(projectNode, \"series\", series)\n return projectNode", "def test_fields_on_new_project(new_project) -> None:\n\n # assert isinstance(new_project.id, int) # not created yet, hasnt been committed\n assert isinstance(new_project.token, uuid.UUID)\n assert new_project.title == \"Lord of the Rings\"\n assert new_project.email == \"J. R. R. Tolkien\"\n assert new_project.phone == \"5558675309\"\n assert new_project.verification is None", "def create_project(name=None, defaultJobTimeoutMinutes=None):\n pass", "def test_projects_put(self):\n project = Project()\n response = self.client.open('/project-tracker/projects',\n method='PUT',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def create_new_project():\n readline.parse_and_bind('tab: complete')\n\n print \\\n\"\"\"\n xbmcswift2 - A micro-framework for creating XBMC plugins.\n [email protected]\n --\n\"\"\"\n print 'I\\'m going to ask you a few questions to get this project' \\\n ' started.'\n\n # noinspection PyDictCreation\n opts = {}\n\n # Plugin Name\n opts['plugin_name'] = get_valid_value(\n 'What is your plugin name?',\n validate_nonblank\n )\n\n # Plugin ID\n opts['plugin_id'] = get_valid_value(\n 'Enter your plugin id.',\n validate_pluginid,\n 'plugin.video.%s' % (opts['plugin_name'].lower().replace(' ', ''))\n )\n\n # Parent Directory\n opts['parent_dir'] = get_valid_value(\n 'Enter parent folder (where to create project)',\n validate_isfolder,\n getcwd()\n )\n opts['plugin_dir'] = os.path.join(opts['parent_dir'], opts['plugin_id'])\n assert not os.path.isdir(opts['plugin_dir']), \\\n 'A folder named %s already exists in %s.' % (opts['plugin_id'],\n opts['parent_dir'])\n\n # Provider\n opts['provider_name'] = get_valid_value(\n 'Enter provider name',\n validate_nonblank,\n )\n\n # Create the project folder by copying over skel\n copytree(SKEL, opts['plugin_dir'], ignore=ignore_patterns('*.pyc'))\n\n # Walk through all the new files and fill in with out options\n for root, dirs, files in os.walk(opts['plugin_dir']):\n for filename in files:\n update_file(os.path.join(root, filename), opts)\n\n print 'Projects successfully created in %s.' % opts['plugin_dir']\n print 'Done.'", "def init_project(self,project_name,project_dir):\n projectkey = id_generator(10)\n if \"towercrane\" not in os.listdir(project_dir):\n print(f'Initializing project:\"{project_name}\" with projectkey: \"{projectkey}\" ')\n self.TowercraneConfig = {\"project_name\":project_name,\n \"projectkey\":projectkey,\n \"publicurl\":\"private_project\"\n }\n write_config(project_dir,self.TowercraneConfig)\n project_insert_report = self.db.create_project(project_name,project_dir,projectkey)\n print(project_insert_report)\n \n elif \"towercrane\" in os.listdir(project_dir):\n self.TowercraneConfig = read_config(project_dir)\n print(f'project:\"{self.TowercraneConfig[\"project_name\"]}\" with projectkey: \"{self.TowercraneConfig[\"projectkey\"]}\" Already Exists')", "def createproject(project_name):\n app_clone_script = 'git clone https://github.com/jaarce/falcon-bp.git %s' % project_name\n subprocess.call(app_clone_script.split(' '))", "def test_user_can_create_a_project(self):\n self.assertEqual(project_model.Project.objects.get(user=self.test_user).pk, self.test_project.pk)", "def test_create_project_target_disabled(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 403, msg=response.content)\n self.assertEqual(Project.objects.count(), 2)", "def create(self, request, *args, **kwargs):\n project = Project.objects.get(id=kwargs[\"projects_pk\"])\n self.check_object_permissions(request, project)\n\n serializer = self.get_serializer(data=request.data)\n if serializer.is_valid():\n serializer.save(permission=\"contributor\", role=\"Contributor\")\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def test_new_project_invalid_on_submit(self):\n\n setup_identity_cache()\n\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": \"test_project_id\",\n \"roles\": \"admin,member\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n new_task = Task.objects.all()[0]\n url = \"/v1/tasks/\" + new_task.uuid\n response = self.client.post(\n url, {\"approved\": True}, format=\"json\", headers=headers\n )\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.data, {\"notes\": [\"created token\"]})\n self.assertEqual(len(mail.outbox), 3)\n\n fake_clients.identity_cache[\"projects\"] = {}\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n data = {\"password\": \"testpassword\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(len(mail.outbox), 3)", "def test_notification_CreateProjectAndUser(self):\n setup_identity_cache()\n\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n\n new_task = Task.objects.all()[0]\n\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": \"test_project_id\",\n \"roles\": \"admin,member\",\n \"username\": \"[email protected]\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n\n url = \"/v1/notifications\"\n response = self.client.get(url, headers=headers)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.json()[\"notifications\"][0][\"task\"], new_task.uuid)", "def create_project(opts):\n if opts['django']:\n structure.create_django_proj(opts)\n if opts['cookiecutter_template']:\n structure.create_cookiecutter(opts)\n proj_struct = structure.make_structure(opts)\n structure.create_structure(proj_struct,\n update=opts['update'] or opts['force'])\n if not opts['update'] and not repo.is_git_repo(opts['project']):\n repo.init_commit_repo(opts['project'], proj_struct)", "def create_new_python_project():\n\t# Create the different variables\n\tfolder_name = str(sys.argv[1])\n\tdir_name = my_project_folder + folder_name\n\tpy_file = dir_name + '/' + folder_name + '.py'\n\treadme_file = dir_name + '/' + 'README.md'\n\ttodo_file = dir_name + '/' + 'TODO.txt'\n\n\t# Create directory if it does not exist yet\n\tif not os.path.exists(dir_name):\n\t\tos.mkdir(dir_name)\n\t\tprint(\"Directory \" , dir_name , \" Created \")\n\n\t\t# Create Python file\n\t\tdata = ''\n\t\twith open(template_py, 'r') as file:\n\t\t\tdata += file.read()\n\n\t\twith open(py_file, 'w') as f:\n\t\t\tf.write(data)\n\t\t\tprint(\"Python file created\")\n\n\t\t# Create README file\n\t\tdata = ''\n\t\twith open(template_readme, 'r') as file:\n\t\t\tdata += file.read()\n\n\t\twith open(readme_file, 'w') as f:\n\t\t\tf.write(data)\n\t\t\tprint(\"Readme file created\")\n\n\t\t# Create Todo file\n\t\twith open(todo_file, 'w') as f:\n\t\t\tprint(\"TODO file created\")\n\n\t\t# Create Github repo\n\t\twith open(\".env\", \"r\") as f:\n\t\t\tdata = f.read()\n\n\t\tindex_1 = data.find('TOKEN=\"') + len('TOKEN=\"')\n\t\ttoken = data[index_1:-1]\n\t\tg = Github(token)\n\t\tuser = g.get_user()\n\t\trepo = user.create_repo(folder_name)\n\t\tprint(\"Succesfully created repository {}\".format(folder_name))\n\n\n\telse: \n\t\tprint(\"Directory \" , dir_name , \" already exists\")", "def create_project(conn, project):\n sql = ''' INSERT INTO projects(name,begin_date,end_date)\n VALUES(?,?,?) '''\n cur = conn.cursor()\n cur.execute(sql, project)\n return cur.lastrowid", "def create():", "def create():", "def _create_namespace(self):\n self.ocp.new_project(self.namespace)", "def setup_project(client, project_template, do_auth=True):\n client = deepcopy(client)\n email = \"[email protected]\"\n password = \"test\"\n urls = URLS()\n project_config = project_template()\n\n # we work in empty database, so let's create business user and login\n user = User.objects.create(email=email)\n user.set_password(password) # set password without hash\n\n create_business(user)\n org = Organization.create_organization(created_by=user, title=user.first_name)\n user.active_organization = org\n user.save()\n\n if do_auth:\n\n assert signin(client, email, password).status_code == 302\n # create project\n with requests_mock.Mocker() as m:\n m.register_uri('POST', re.compile(r'ml\\.heartex\\.net/\\d+/validate'), text=json.dumps({'status': 'ok'}))\n m.register_uri('GET', re.compile(r'ml\\.heartex\\.net/\\d+/health'), text=json.dumps({'status': 'UP'}))\n r = client.post(urls.project_create, data=project_config)\n print('Project create with status code:', r.status_code)\n assert r.status_code == 201, f'Create project result should be redirect to the next page'\n\n # get project id and prepare url\n project = Project.objects.filter(title=project_config['title']).first()\n urls.set_project(project.pk)\n print('Project id:', project.id)\n\n client.project = project\n\n client.user = user\n client.urls = urls\n client.project_config = project_config\n client.org = org\n return client", "def add_project(project, network, id):\n with BMI(_username, _password, constants.BMI_ADMIN_PROJECT) as bmi:\n ret = bmi.add_project(project, network, id)\n if ret[constants.STATUS_CODE_KEY] == 200:\n click.echo(\"Success\")\n else:\n click.echo(ret[constants.MESSAGE_KEY])", "def CreateProject(projectName='project'):\r\n projectName = input('''The project's name: ''')\r\n if not os.path.exists(projectName):\r\n os.mkdir(projectName)\r\n else:\r\n print('There is a file with the same name.')\r\n\r\n for dir in ['OPT', 'SCF', 'PHO']:\r\n if not os.path.exists(projectName + os.sep + dir):\r\n os.mkdir(projectName + os.sep + dir)", "def project(projectname,targetamount):\n if (validatename(projectname) and validatenum(targetamount)):\n targetamount=float(targetamount)\n con = lite.connect(databasefile)\n with con:\n cur = con.cursor() \n cur.execute(\"SELECT Id FROM projects where name=?\", (projectname,))\n exists = cur.fetchone()\n if exists:\n click.echo(\"Project name already exists!\")\n sys.exit()\n cur.execute(\"INSERT INTO projects (Name, Tamount) VALUES (?, ?)\", (projectname, targetamount))\n click.echo(\"Added %s project with target of $%-.2f\" % (projectname, targetamount))", "def test_create(self):\n\n responses.add(\n responses.POST,\n self.host + \"/manager\",\n json={\"path\": \"manager?project=ProjectTest\", \"action\": \"redirect\", \"status\": \"success\"},\n status=200\n )\n\n self.azk.create(self.project, self.description)", "def add_project(self, project):\n c = self.conn.cursor()\n cursor = c.execute(\"INSERT INTO projects VALUES (null, ?, ?, ?, ?)\", (project['owner'],\n project['title'],\n datetime.now(), datetime.now(),))\n\n self.conn.commit()\n project_id = cursor.lastrowid\n\n self.conn.cursor().execute(\"INSERT INTO users_projects VALUES (?,?)\", (project['owner'], project_id),)\n self.conn.commit()\n return self.get_project(project_id)", "async def create_project_flow(\n create_project_flow_request: CreateProjectFlow,\n token: str = Depends(oauth2_scheme),\n):\n try:\n logging.info(\"Calling /osairis/project_flow/create endpoint\")\n logging.debug(f\"Request: {create_project_flow_request}\")\n if decodeJWT(token=token):\n response = ProjectFlowController().create_project_flow_controller(\n request=create_project_flow_request\n )\n return CreateProjectFlowResponse(**response)\n else:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Invalid access token\",\n headers={\"WWW-Authenticate\": \"Bearer\"},\n )\n except Exception as error:\n logging.error(f\"Error in /osairis/project_flow/create endpoint: {error}\")\n raise error", "def create(self, values):\n TYPE = self.env['anytracker.ticket.type']\n SEQUENCE = self.env['ir.sequence']\n types = TYPE.search([('code', '=', 'node')])\n values.update({\n 'number': SEQUENCE.sudo().next_by_code('anytracker.ticket')})\n if values.get('parent_id'):\n values['project_id'] = self.browse(\n values['parent_id']).project_id.id\n\n # project creation: auto-assign the 'node' type\n if not values.get('parent_id') and types:\n values['type'] = types[0].id\n\n # add myself to the project at creation\n if not values.get('parent_id'):\n p_ids = values.get('participant_ids', [(6, 0, [])])\n if self.env.uid not in p_ids:\n values['participant_ids'] = [\n (6, 0, [self.env.uid] + p_ids[0][2])]\n\n # replace ticket numbers with permalinks\n if 'description' in values:\n values['description'] = add_permalinks(\n self.env.cr.dbname, values['description'])\n\n ticket = super(Ticket, self).create(values)\n\n if not values.get('parent_id'):\n ticket.write({'project_id': ticket.id})\n\n # turn the parent into a node\n if 'parent_id' in values and values['parent_id'] and types:\n ticket.browse(values['parent_id']).write({'type': types[0].id})\n\n # subscribe the followers of the parent,\n # or the participants if this is a project\n # This allows to subscribe or unsubscribe to ticket subtrees\n if ticket.project_id.participant_ids:\n if ticket.parent_id:\n ticket.message_subscribe(\n ticket.parent_id.message_follower_ids.ids)\n else:\n ticket.message_subscribe_users(\n ticket.participant_ids.ids)\n\n return ticket", "def save(self, project_id=None):\r\n if project_id is not None:\r\n project = Project.objects.get(pk=int(project_id))\r\n else:\r\n project = Project()\r\n # Fill out the data of the given project and prepare it\r\n # for saving into database.\r\n project.Name = self.cleaned_data['name']\r\n project.ProjectClient = self.cleaned_data['project_client']\r\n project.Start = self.cleaned_data['start']\r\n project.End = self.cleaned_data['end']\r\n project.ProjectManager = self.cleaned_data['project_manager']\r\n project.QualityAssurance = self.cleaned_data['quality_assurance']\r\n project.Price = self.cleaned_data['price']\r\n project.Segment = self.cleaned_data['segment']\r\n project.Type = self.cleaned_data['type']\r\n project.save()\r\n # If the item was just created, set up workflow for it\r\n if project_id is None:\r\n workflow = Workflow.objects.get(name='Project')\r\n utils.set_workflow(project, workflow)\r\n state = utils.get_state(project)\r\n project.Status = state\r\n project.save()\r\n return project" ]
[ "0.7641362", "0.74125963", "0.74125963", "0.74125963", "0.73800826", "0.7369805", "0.72611123", "0.7240642", "0.72367054", "0.7189181", "0.71309054", "0.70826024", "0.70679694", "0.7061639", "0.6987262", "0.698142", "0.696992", "0.6924102", "0.69188553", "0.6910896", "0.6910389", "0.6896292", "0.6886656", "0.6839304", "0.6798925", "0.67972517", "0.67882097", "0.67828375", "0.67761225", "0.67730415", "0.676983", "0.6765078", "0.67542094", "0.67119503", "0.67041045", "0.6658832", "0.66244453", "0.66219175", "0.66217315", "0.66213596", "0.66190153", "0.6615325", "0.65957326", "0.65957326", "0.65957326", "0.6580955", "0.6541764", "0.6540238", "0.6536616", "0.6535735", "0.651638", "0.6504959", "0.6502909", "0.64886916", "0.64804417", "0.64592373", "0.64453495", "0.6444375", "0.64364594", "0.6422805", "0.6422294", "0.64128405", "0.64121294", "0.6404425", "0.6391617", "0.63791203", "0.6376413", "0.636682", "0.6366483", "0.6353054", "0.63263005", "0.6308415", "0.63074535", "0.63015205", "0.6300897", "0.62934697", "0.62706727", "0.62626433", "0.62578815", "0.62483877", "0.6242479", "0.6237388", "0.622969", "0.62106353", "0.62096643", "0.62044406", "0.6191682", "0.61899376", "0.6186025", "0.6186025", "0.61714125", "0.617067", "0.61611396", "0.61572653", "0.61543864", "0.615152", "0.6142628", "0.6130133", "0.6122881", "0.61210936" ]
0.75777197
1
Static method for delete a project.
def delete_project(project_id): client = RequestManager() client.set_method("DELETE") client.set_endpoint("/projects/{0}".format(project_id)) client.execute_request()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_project(project):\n with BMI(_username, _password, constants.BMI_ADMIN_PROJECT) as bmi:\n ret = bmi.delete_project(project)\n if ret[constants.STATUS_CODE_KEY] == 200:\n click.echo(\"Success\")\n else:\n click.echo(ret[constants.MESSAGE_KEY])", "def delete_project(arn=None):\n pass", "def do_project_delete(cs, args):\n key = args.project\n if cs.projects.is_id(key):\n id = key\n else:\n id = cs.projects.get_id_by_name(key)\n cs.projects.delete(id)\n print(\"Delete Project '%s' successfully.\" % key)", "def delete_stored_project():\n client = RequestManager()\n client.set_method(\"DELETE\")\n client.set_endpoint(\"/projects/{0}\".format(STORED_ID['project_id']))\n client.execute_request()", "def delete_project(projectname):\n response = jsonify(admin.delete_project(current_app.scoped_session(), projectname))\n return response", "def delete(self):\n _url = f\"{self.connector.base_url}/projects/{self.project_id}\"\n\n self.connector.http_call(\"delete\", _url)\n\n self.project_id = None\n self.name = None", "def delete_project(\n name\n):\n\n cmd = dict()\n cmd[\"type_\"] = \"delete_project\"\n cmd[\"name_\"] = name\n\n comm.send(cmd)", "def delete_project(request, project_id):\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n project = get_object_or_404(Project, pk=project_id)\n project.delete()\n messages.success(request, 'Project deleted!')\n return redirect(reverse('portfolio'))", "def test_delete_project(self):\n pass", "def test_delete_project(self):\n pass", "def deleteProject(self, projectId):\n uri = \"/v1/projects/\" +str(projectId)\n response = self.client.delete(uri)\n return response", "def delete_project(self, project_id):\n self._run(\n url_path=\"projects/delete\",\n id=project_id,\n )\n return True", "def delete_project(request, project_id):\n\n profile = get_object_or_404(Profile, user=request.user)\n project = get_object_or_404(GameProject, pk=project_id)\n\n if not profile.is_creator:\n messages.error(request, 'Sorry, only creators can do that.')\n return redirect(reverse('home'))\n if project.owner != profile:\n messages.error(request, 'Sorry, only the project owner can do that.')\n return redirect(reverse('home'))\n\n project = get_object_or_404(GameProject, pk=project_id)\n project.delete()\n messages.success(request, 'Project deleted!')\n return redirect(reverse('all_projects'))", "def delete(self, guid):\n if helpers.authorized(self.request.params['UUID'], self.request.params['ATO'], self.request.params['action']):\n # search for the Project and delete if found\n key = db.Key.from_path('Project', int(guid))\n project = db.get(key)\n if not project == None:\n project.delete()\n self.response.set_status(204, \"Deleted\")\n else:\n self.response.set_status(404, \"Not Found\")\n else:\n self.response.set_status(401, \"Not Authorized\")", "def delete_project(self, project_id):\n return self._delete('/projects/{0}'.format(project_id))", "def delete_project(project_id):\n project = Project.query.filter_by(id=project_id).first()\n if not project:\n return {\n 'success': False,\n 'message': f\"No project with the specified id {project_id} found.\",\n }\n\n else:\n if is_project_manager(project, g.user):\n # delete related tasks\n Task.query.filter_by(project=project).delete()\n #delete related invites\n Invitation.query.filter_by(project=project).delete()\n db_session.delete(project)\n db_session.commit()\n return {\n 'success': True,\n 'result': {},\n 'message': \"Project Deleted Successfully.\",\n }", "def delete(conn, project):\n with conn:\n c = conn.cursor()\n c.execute(\"DELETE FROM projects WHERE project =?\", (project,))", "def delete_project(self, project_name):\n # type(project_name) == unicode\n project = self.db.get_project_by_name(project_name)\n if not project:\n print(u\"*** Error: The project '{}' was not found.\"\n \"\".format(project_name))\n return\n print('Caution! The related tracking will be deleted as well.{eol}'\n 'Do you really want to delete the project? [y/N] '\n ''.format(eol=os.linesep), end='')\n if not helpers.get_yes_no(default='n'):\n return\n self.db.delete_project_by_name(project_name)\n print(u\"The project '%s' has been deleted.\" % project_name)\n self.set_prompt()", "def destroy(self, request, pk=None):\n try:\n project = Project.objects.get(pk=pk)\n project.delete()\n\n return Response({}, status=status.HTTP_204_NO_CONTENT)\n\n except Project.DoesNotExist as ex:\n return Response({'message': ex.args[0]}, status=status.HTTP_404_NOT_FOUND)\n\n except Exception as ex:\n return Response({'message': ex.args[0]}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "def delete_project(self, project_id):\n _url = f\"{self.base_url}/projects/{project_id}\"\n self.http_call(\"delete\", _url)\n return", "def delete_project(project_id):\n \n project = mongo.db.projects\n project.delete_one({'_id': ObjectId(project_id)})\n flash('Your project has been deleted.', 'success')\n return redirect(url_for('projects'))", "def project_delete(cursor, project):\n haystack = (project['_id'], )\n\n query = \"DELETE FROM projects WHERE _id=?\"\n try:\n cursor.execute(query, haystack)\n except Exception as e:\n on_error(e)\n\n query = \"DELETE FROM namespaces WHERE project_id=?\"\n try:\n cursor.execute(query, haystack)\n except Exception as e:\n on_error(e)\n else:\n cursor.connection.commit()\n raise Return((True, None))", "def delete(self, oid):\n path = '/projects/%s' % oid\n res = self.client.call(path, 'DELETE', data='', token=self.manager.identity.token)\n self.logger.debug('Delete openstack project: %s' % truncate(res))\n return True", "def test_projects_delete(self):\n project = Project()\n response = self.client.open('/project-tracker/projects',\n method='DELETE',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def delete_project(proj_id):\n project_obj = Project.objects.get(id=proj_id)\n print('Deleting project the fastq files within the project: ', project_obj.description)\n\n description = project_obj.description.replace(' ', '') # remove any space in the project name\n project_dir = 'documents/%s/%s' % (str(project_obj.date.date()), description)\n shutil.rmtree(project_dir, ignore_errors=True)\n print(\"Files deleted.\")", "def delete(self, request, p_name):\n project = Project.objects.get(name=p_name)\n connectors = project.connector_set.all()\n connectors.delete()\n if os.path.isfile(project.project_location):\n os.remove(project.project_location)\n project.delete()\n return HttpResponse(HTTPStatus.OK)", "def delete_project(self, project_name, check=True):\n page_projects = self._page_projects()\n\n with page_projects.table_projects.row(\n name=project_name).dropdown_menu as menu:\n menu.button_toggle.click()\n menu.item_delete.click()\n\n page_projects.form_delete_project_confirm.submit()\n\n if check:\n self.close_notification('success')\n page_projects.table_projects.row(\n name=project_name).wait_for_absence()", "def delete_project_view(request, id):\n\n # retrieve the project to be deleted through his id. Raise an error if the project does not exist\n project = get_object_or_404(Projet, id=id)\n\n # Check if the logged in user is allowed to delete this project\n if request.user.has_perm('taskmanager.{}_project_permission'.format(project.id)):\n # Eventually delete the project\n project.delete()\n\n return redirect(\"projects\")", "def delete_project(request, project_id):\n try:\n project = Project.objects.get(pk=project_id)\n except Project.DoesNotExist:\n raise Http404(\"Project does not exist\")\n # check whether the user is the one who created this project\n if project.user.email != request.session['email']:\n return HttpResponseRedirect('/projects/' + str(project_id))\n else:\n if request.method == \"POST\":\n if project:\n project.delete()\n return HttpResponseRedirect('/projects/')\n else:\n return render(request, 'projects/delete_project.html',\n {'project': project})\n return render(request, 'projects/delete_project.html', {'project': project})", "def delete_project(self, name=None, delete_dir=False):\n victim = name or self.current\n if victim not in self:\n raise ValueError(\"{} is not a project\".format(victim))\n\n if len(self) == 1:\n raise ValueError(\"Can't delete only remaining project\")\n\n ProjectDataset.delete().where(ProjectDataset.name == victim).execute()\n\n if delete_dir:\n dir_path = self._base_data_dir / safe_filename(victim)\n assert dir_path.is_dir(), \"Can't find project directory\"\n shutil.rmtree(dir_path)\n\n if name is None or name == self.current:\n if \"default\" in self:\n self.set_current(\"default\")\n else:\n self.set_current(next(iter(self)).name)\n return self.current", "def post_project_delete(self, resource_id, resource_dict):\n pass", "def delete(self, project_id):\n try:\n authenticated_user_id = token_auth.current_user()\n if not ProjectAdminService.is_user_action_permitted_on_project(\n authenticated_user_id, project_id\n ):\n raise ValueError()\n except ValueError:\n return {\n \"Error\": \"User is not a manager of the project\",\n \"SubCode\": \"UserPermissionError\",\n }, 403\n\n try:\n ProjectAdminService.delete_project(project_id, authenticated_user_id)\n return {\"Success\": \"Project deleted\"}, 200\n except ProjectAdminServiceError as e:\n return {\"Error\": str(e).split(\"-\")[1], \"SubCode\": str(e).split(\"-\")[0]}, 403", "def test_projects_id_delete(self):\n response = self.client.open('/project-tracker/projects/{id}'.format(id=3.4),\n method='DELETE')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def delete(\n self, url: str\n ) -> pymongo.results.DeleteResult:\n return self._mongo.delete({\n 'url': url\n },\n 'projects'\n )", "def delete_project(id):\n result = delete_project_to_db(id)\n flash(result)\n return redirect(url_for(\"portfolio\"))", "def test_delete_project(self):\n self.assertEqual(Project.objects.count(), 1)\n self.assertEqual(Group.objects.count(), 2)\n\n delete_project(Project.objects.get(name=\"project A\"))\n\n self.assertEqual(Project.objects.count(), 0)\n self.assertEqual(Group.objects.count(), 0)", "def delete_remote_project(profile, project):\n return delete_remote_project_worker.delay(profile_id=profile.id,\n project_id=project.id)", "def delete(self, *args, **kwargs):\n if 'user' not in kwargs or not args:\n self.raise401()\n\n user = kwargs['user']\n path = parse_path(args[0])\n project = Project.objects(name=path[0], members__in=[user])\n if not project:\n self.raise401()\n try:\n project.delete()\n self.set_status(204)\n self.finish()\n except Exception as e:\n reason = e.message\n self.raise400(reason=reason)", "def delete_keystone_v3_project(self, project_id, domain_id):\n LOG_OBJ.debug(\"Disable the project.\")\n kwargs = {\"project_id\": project_id, \"enabled\": False}\n self.set_keystone_v3_project(**kwargs)\n\n LOG_OBJ.debug(\"Deleting the project.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/projects/\" + \\\n str(project_id)\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n _body = None\n response = self.request(\"DELETE\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while deleting the project\")\n print (\"No response from Server while deleting the project\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\" Deleting project Failed with status %s \"\n \"and error : %s\" % (response.status, response.data))\n print (\" Deleting project Failed with status %s and error : %s\" %\n (response.status, response.data))\n return response.status\n\n return True", "def es_delete(project=None):\n if project is not None:\n script_indexer.delete_project(project)\n else:\n script_indexer.delete_all()", "def test_remove_project(self):\n pass", "def destroy(config, args):\n log = logging.getLogger('kraftwerk.destroy')\n if confirm(\"Remove project %s from node %s along with all services and data?\" % \n (args.project.name, args.node.hostname)):\n args.node.ssh(config.template(\"scripts/project_destroy.sh\", project=args.project))\n print \"Project %s removed from node %s\" % \\\n (args.project.name, args.node.hostname )\n for service in args.project.services(args.node):\n args.node.ssh(service.destroy_script)", "def delete_project(self):\n tasks = self._get_all_tasks()\n\n task_id = tasks[self.tasks_view.currentRow()].Id\n task_status = tasks[self.tasks_view.currentRow()].Status\n\n if task_status is 0:\n warning = Warning(\n \"<html><head/><body><p align=\\\"center\\\"><span style=\\\" font-weight:600;\\\">\"\n \"Unable delete Task. \"\n \"Make sure the Task is Done\"\n \"</span></p></body></html>\"\n )\n warning.exec_()\n else:\n self.tasks_flow.delete_task(task_id)\n self.write_tasks_table()", "def project_delete_event(self, proj_info):\n\n LOG.debug(\"Processing project_delete_event...\")\n proj_id = proj_info.get('resource_info')\n proj_name = self.get_project_name(proj_id)\n if proj_name:\n try:\n self.dcnm_client.delete_project(proj_name,\n self.cfg.dcnm.\n default_partition_name)\n except dexc.DfaClientRequestFailed:\n # Failed to delete project in DCNM.\n # Save the info and mark it as failure and retry it later.\n LOG.error(_LE(\"Failed to create project %s on DCNM.\"),\n proj_name)\n self.update_project_info_cache(proj_id, name=proj_name,\n opcode='delete',\n result=constants.DELETE_FAIL)\n else:\n self.update_project_info_cache(proj_id, opcode='delete')\n LOG.debug('Deleted project:%s', proj_name)\n self.project_delete_notif(proj_id, proj_name)", "def pre_project_delete(self, resource_id):\n pass", "def delete(self, name, project=None):\n qlist = self._list(project)\n key = self._queue(project, name)\n self._db.delete(key)\n self._db.zremrangebyscore(qlist, -1, 1)", "def delete(self, team_id, project_id):\n try:\n if not ProjectAdminService.is_user_action_permitted_on_project(\n token_auth.current_user, project_id\n ):\n raise ValueError()\n TeamService.delete_team_project(team_id, project_id)\n return {\"Success\": True}, 200\n except ValueError:\n return {\n \"Error\": \"User is not a manager of the project\",\n \"SubCode\": \"UserPermissionError\",\n }, 403", "def delete_namespaced_project(self, name, **kwargs):\n\n all_params = ['name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_namespaced_project\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `delete_namespaced_project`\")\n\n resource_path = '/oapi/v1/projects/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='UnversionedStatus',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def remove_single_project(project_name):\n p = subprocess.Popen('rm -rf {}/{}'.format(context.__PROJECTS_PATH__, project_name), shell=True)\n p.wait()", "def delete(self, guid):\n if helpers.authorized(self.request.params['UUID'], self.request.params['ATO'], self.request.params['action']):\n # search for the Project and delete if found\n key = db.Key.from_path('Task', int(guid))\n task = db.get(key)\n wantsNotifications = {\"true\": True, \"false\": False}.get(self.request.params['notify'].lower())\n currentUserId = self.request.params['UUID']\n cukey = db.Key.from_path('User', int(currentUserId))\n user = db.get(cukey)\n if not task == None:\n # cache current values before updates\n taskName = task.name\n taskType = task.type\n taskPriority = task.priority\n taskStatus = task.developmentStatus\n taskValidation = task.validation\n taskSubmitterId = task.submitterId\n taskAssigneeId = task.assigneeId\n taskEffort = task.effort\n taskProjectId = task.projectId\n taskDescription = task.description\n # Push notification email on the queue if we need to notify\n if notification.should_notify(currentUserId,task,\"deleteTask\",wantsNotifications):\n taskqueue.add(url='/mailer', params={'taskId': int(guid), 'currentUUID': self.request.params['UUID'], 'action': \"deleteTask\", 'name': taskName, 'type': taskType, 'priority': taskPriority, 'status': taskStatus, 'validation': taskValidation, 'submitterId': taskSubmitterId, 'assigneeId': taskAssigneeId, 'effort': taskEffort, 'projectId': taskProjectId, 'description': taskDescription})\n task.delete()\n self.response.set_status(204, \"Deleted\")\n else:\n self.response.set_status(404, \"Not Found\")\n else:\n self.response.set_status(401, \"Not Authorized\")", "def delete_api(self, project_id, api_id, hbase_manager=None):\n\n self._project_service.get_nondeleted_project(project_id)\n\n proj_api_col = \"apis:{0}\".format(api_id)\n project_id = bytes(project_id, self._charset)\n api_id = bytes(api_id, self._charset)\n\n self.get_nondeleted_api(project_id, api_id)\n\n connection = hbase_manager.connection\n api_tbl = connection.table(\"apis\")\n projects_tbl = connection.table(\"projects\")\n\n projects_tbl.delete(project_id, [bytes(proj_api_col, self._charset)])\n api_tbl.put(api_id, {\n b\"attrs:state\": b\"deleted\"\n })", "def delete(self, project_id):\n project_model = ProjectDBModel.query.get(project_id)\n if not project_model:\n ns.abort(404, status=PROJECT_NOT_FOUND_ERROR)\n try:\n data = request.get_json()\n # cambiarlo cuando se vuelva a tener dos PKs\n deleted = FavoritesProjectDBModel.delete(\n data['user_id'], project_id)\n if deleted:\n users = \\\n FavoritesProjectDBModel.get_favorites_of_project_id(\n project_id)\n response_object = {\n \"project_id\": project_id,\n \"users_id\": users,\n }\n return response_object, 200\n else:\n ns.abort(404, status=PROJECT_NOT_FOUND_ERROR)\n except KeyError:\n ns.abort(404, status=MISSING_VALUES_ERROR)", "def delete_project_file(self, project=None):\n if type(project) is not Project:\n return False\n\n path = self.data_path + self.project_dir\n\n # generate filenames\n filename = path + '/' + self.us(project.project_id()) + '.flproject'\n\n # check if the file exists and delete it\n if os.path.isfile(filename):\n os.remove(filename)\n return True\n else:\n return False", "async def delete(self, ctx, project_name: str) -> None:\n if not ctx.projects.find_project(project_name):\n channel = discord.utils.get(\n ctx.guild.channels, name=f\"{project_name}-project\")\n\n if channel and channel.category.name == \"Flux Projects\":\n if ctx.author.permissions_in(channel).manage_channels:\n message = await ctx.send(\"That project doesn't appear to\"\n \" exist in my database, but the \"\n \"channel still exists. \"\n \"Would you like to delete it?\")\n yes = \"<:greenTick:596576670815879169>\"\n no = \"<:redTick:596576672149667840>\"\n await message.add_reaction(yes)\n await message.add_reaction(no)\n reaction, user = await ctx.bot.wait_for(\n \"reaction_add\",\n check=lambda reaction, user: (user == ctx.author) and\n (str(reaction.emoji) == yes or no) and\n (reaction.message.channel == ctx.channel)\n )\n if reaction.emoji.id == ctx.bot.config.tick_yes:\n await channel.delete(reason=\"Project not found.\")\n await ctx.send(\"The channel was deleted sucessfully.\")\n return\n\n elif reaction.emoji.id == ctx.bot.config.tick_no:\n await ctx.send(\"Not deleting the channel.\")\n return\n\n else: # If author doesn't have access to deleting channels.\n await ctx.send(\"That project does not appear to be in my \"\n \"database, but the channel for it still \"\n \"exists. Please have someone with\"\n \" manage channels run this chommand.\"\n )\n return\n else:\n await ctx.send(\"I could not find this project.\")\n return\n\n if str(ctx.author.id) != ctx.projects.find_project(project_name).get(\n \"owner\"):\n await ctx.send(\"Only the project owner \"\n \"can delete this project.\")\n return\n message = await ctx.send(\"This action __cannot__ be undone. \"\n \"Once you do this, everything is gone. \"\n \"Are you sure you want to continue?\")\n yes = \"<:greenTick:596576670815879169>\"\n no = \"<:redTick:596576672149667840>\"\n await message.add_reaction(yes)\n await message.add_reaction(no)\n reaction, user = await ctx.bot.wait_for(\n \"reaction_add\", check=lambda reaction, user:\n (user == ctx.author) and\n (str(reaction.emoji) == yes or no) and\n (reaction.message.channel == ctx.channel)\n )\n if reaction.emoji.id == ctx.bot.config.tick_yes:\n channel = ctx.projects.find_project(\n project_name).get(\"channel\")\n channel = discord.utils.get(ctx.guild.channels,\n id=int(channel))\n ctx.projects.delete_project(project_name)\n if channel:\n await channel.delete(reason=\"Project deleted.\")\n await ctx.send(\"The project has been deleted.\")\n elif reaction.emoji.id == ctx.bot.config.tick_no:\n await ctx.send(\"Not deleting the project.\")", "def delete(self):\n return self.client._perform_empty(\n \"DELETE\", \"/project-folders/%s\" % self.project_folder_id)", "def delete(self):\r\n delete_tracks(self.project, [self])", "def project_post_delete(sender, instance, **kwargs):\n instance.url.delete(False)", "def remove_project(project_id):\n response_object = {'status': 'success'}\n with database.engine.begin() as connection:\n\n stmt = select([models.projects.c.path]).where(\n models.projects.c.project_id == project_id)\n project = connection.execute(stmt).first()\n\n if project:\n app = flask.current_app\n project_path = os.path.join(\n app.root_path, app.config['DATA_DIRECTORY'], project['path'])\n if 'morphocut' in project_path and app.config['DATA_DIRECTORY'] in project_path:\n print('removing project with id {}'.format(project_id))\n if os.path.exists(project_path):\n helpers.remove_directory(project_path)\n\n stmt = models.projects.delete().where( # pylint: disable=no-value-for-parameter\n models.projects.c.project_id == project_id)\n\n connection.execute(stmt)\n\n return jsonify(response_object)", "def delete_project_by_name(self, project_name):\n with self._transaction.cursor() as cur:\n # delete associations between this project and any barcodes\n cur.execute(\"DELETE FROM barcodes.project_barcode \"\n \"WHERE project_id in (\"\n \"SELECT project_id FROM barcodes.project \"\n \"WHERE project = %s)\",\n (project_name,))\n\n # now delete the project itself\n cur.execute(\"DELETE FROM barcodes.project WHERE project = %s\",\n (project_name,))\n return cur.rowcount == 1", "def _delete_dm_project(self, project_uuid: str) -> None:\n _LOGGER.warning('Deleting DM Project %s...', project_uuid)\n\n dm_rv: DmApiRv = DmApi.delete_project(self.__org_owner_dm_token,\n project_id=project_uuid)\n if not dm_rv.success:\n _LOGGER.error('Failed to delete DM Project %s', project_uuid)\n return\n\n _LOGGER.warning('Deleted DM Project %s', project_uuid)", "def delete(project, zone, instance):\n print >>sys.stderr, 'WARNING: duplicated jobs may fail/corrupt results'\n print >>sys.stderr, ('TODO(fejta): See http://stackoverflow.com/'\n 'questions/19645430/changing-jenkins-build-number')\n answer = raw_input('Delete %s [yes/NO]: ')\n if not answer or answer != 'yes':\n print >>sys.stderr, 'aborting'\n sys.exit(1)\n gcloud(\n project,\n 'compute',\n 'instances',\n 'delete',\n '--zone=%s' % zone,\n instance,\n )\n gcloud(\n project,\n 'compute',\n 'disks',\n 'delete',\n '--zone=%s' % zone,\n *get_disks(instance))", "def delete_orphan_project(apps, schema_editor):\n Project = apps.get_model('data_manager', 'Project')\n Project.objects.filter(dataset__isnull=True).delete()\n return", "def delete(ctx: click.Context, repository_path):\n root_commands.cmd_delete(ctx.obj, repository_path)", "def delete_project_quotas(self, project_id, extra_headers=None,\n use_auth=True, user_name=None):\n resp = self.client.delete('project-quotas/' + project_id,\n extra_headers=extra_headers,\n use_auth=use_auth, user_name=user_name)\n return resp", "def DelProject(projname):\n\tif projname == \"\" or projname == None:\n\t\tpjnm = raw_input(\"\\nNombre del proyecto: \").lower()\n\t\tif pjnm == \"\" or pjnm == None:\n\t\t\tcancel()\n\telse:\n\t\t# Proceso para borrar todo el proyecto\n\t\tpass\n\n\tpa = open(\"author_name.txt\", \"r\")\t#Abre el archivo con el nombre del autor\n\tpa.read()\n\tpc = open(\"project_code.txt\", \"r\")\t#Abre el archivo con el codigo de proyecto\n\tpc.read()\n\n\tuserpa = raw_input(\"Ingrese el nombre del autor: \").lower()\n\tuserpc = raw_input(\"Ingrese el codigo del proyecto: \").lower()\n\n\tif userpa == pa and userpc == pc:\t#Se verifica que userpa(nombre del autor por el usuario) sea igual a pa(nombre original del autor) y lo mismo con el codigo del proyecto\n\t\tprint \"Iniciando el Borrado del Proyecto...\"\n\t\tpcommands.del_project()\n\t\tprint \"El proyecto se ha borrado con exito!\"\n\telse:\n\t\tprint \"El codigo del proyecto o el nombre del autor no es correcto.\"\n\t\tcancel()", "def delete_all_projects():\n client = RequestManager()\n client.set_method(\"GET\")\n client.set_endpoint(\"/projects\")\n response = client.execute_request()\n for project in response.json():\n try:\n ProjectHelper.delete_project(project[\"id\"])\n except TypeError:\n LOGGER.info(project)", "def delete(self, endpoint, params=None):\n params = params or dict()\n return self.request(verb=requests.delete, address=self.project_address + endpoint,\n params=params)", "def delete(self):\n return self.client._perform_empty(\n \"DELETE\", \"/projects/%s/managedfolders/%s\" % (self.project_key, self.odb_id))", "def command_delete(self):\n if self.gcp_env.project not in SUPPORTED_PROJECT_CONFIGS.keys():\n _logger.error(f'Project config not supported {self.gcp_env.project}')\n return 1\n\n if not self.args.bucket and not self.args.id:\n _logger.error(\"--bucket and --id required for delete.\")\n return 1\n\n # Get notification\n client = storage.Client()\n bucket = client.get_bucket(self.args.bucket)\n target = bucket.get_notification(self.args.id, client)\n\n if self.gcp_env.project != target.topic_project:\n _logger.error(\"Notification project and specified project do not match.\")\n return 1\n\n # Delete the notification\n try:\n target.delete(client=client)\n\n except NotFound:\n _logger.error(f\"Notification ID {self.args.id} not found.\")\n return 1\n\n _logger.info(f\"Notification id {self.args.id} has been deleted.\")\n\n _logger.info(\"Removing notification from config...\")\n self.delete_notification_from_config()\n\n return 0", "def delete(self, *args, **kwargs):\n self.request(\"delete\", *args, **kwargs)", "def _delete(self, *args, **kwargs):\n return self._request('delete', *args, **kwargs)", "def test_projects_id_contacts_delete(self):\n project = Contact()\n response = self.client.open('/project-tracker/projects/{id}/contacts'.format(id=56),\n method='DELETE',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def cmd_apps__destroy(args):\n \n if args.name is None and in_git_repo():\n args.name = _get_current_project_name()\n\n if args.name is None:\n print \"Please provide a project name.\"\n sys.exit(1)\n\n print \"Destroying project %s...\" % args.name\n remote.destroy_project(args.name)\n print \"Project %s destroyed.\" % args.name\n if in_git_repo() and _get_current_project_name() == args.name:\n git(None, 'remote', 'rm', 'tinyserv')\n print \"Removed remote '%s'.\" % args.name", "def delete():", "def delete(self, application_id):", "def _on_del_project(self):\n project = self.ddnCurProject.get()\n# if len(project) > 0:\n if project:\n if '.prj'!= project[-4:]:\n project += '.prj'\n if os.path.exists(self.BibTerm + '/'+ project):\n os.remove(self.BibTerm + '/'+ project)\n self.list_projects = [f.rstrip('.prj') \\\n for f in os.listdir(self.BibTerm) \\\n if f.endswith('.prj')]\n self.ddnCurProject['values'] = self.list_projects\n# if len(self.list_projects) > 0:\n if self.list_projects:\n self.ddnCurProject.set(self.list_projects[0])\n else:\n self.ddnCurProject.set('')\n pass", "def delete(self, *args, **kw):\n kw['method'] = 'DELETE'\n return self.open(*args, **kw)", "def delete_language_from_project(self, project_id, language_code):\n self._run(\n url_path=\"languages/delete\",\n id=project_id,\n language=language_code\n )\n return True", "def record_destroy_for_project(project_id):\n session = get_session()\n with session.begin():\n session.query(models.ProjectAccountRecord).\\\n filter_by(project_id=project_id).\\\n update({'deleted': True,\n 'deleted_at': datetime.datetime.utcnow(),\n 'updated_at': datetime.datetime.utcnow()})", "def remove_deleted_project(self, project_id):\n self.db.make_query(\n '''\n DELETE FROM deleted_project WHERE project_id = \"{}\";\n '''.format(project_id)\n )\n\n if self.get_deleted_project(project_id):\n return False\n\n return True", "def delete(self):\n\n headers = self._default_headers()\n\n return self._request(self.name,\n ok_status=None,\n data=None,\n headers=headers,\n method=\"DELETE\")", "def remove_project(self, project_id):\n project_file_path = '{}/{}'.format(self._storage_location, project_id)\n if os.path.exists(project_file_path):\n os.remove(project_file_path)\n else:\n raise ValueError('The project id {} does not exist!'.format(project_id))", "def test_projects_id_comments_delete(self):\n project = Comment()\n response = self.client.open('/project-tracker/projects/{id}/comments'.format(id=56),\n method='DELETE',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "async def delete_user_byid(*_):\n return web.Response(text=\"PUT project not implemented\", status=501)", "def delete(self, *args, **kwargs):\n pass", "def delete(self, *args, **kwargs):\n pass", "def userproject_post_delete(sender, instance, **kwargs):\n instance.document.delete(False)", "def delete(repo):\n print('Repo: %s' % repo)\n print('Deleted')", "def delete(project_id, integration_id):\n IntegrationService.delete(project_id, integration_id)\n\n return {\"status\": \"deleted\"}, 200", "def delete():\n run('rm -r {}'.format(utils.home('apps', env.PROJECT_NAME)))", "def delete_record(request, slug, pk):\n # Try except to make sure the user is a member of this project\n try:\n ProjectMember.objects.get(user=request.user, project=Project.objects.get(slug=slug))\n except ObjectDoesNotExist:\n # User is not a member\n return HttpResponse(\"You're trying to access a project you're not a member of or a project that does not exist.\")\n else:\n # User is a member\n pm = ProjectMember.objects.get(user=request.user, project=Project.objects.get(slug=slug))\n # Access control.. if not owner or editor - access denied.\n if pm.is_owner or pm.is_editor:\n # User has access\n record = get_object_or_404(models.Record, pk=pk)\n # Delete record\n models.Record.objects.filter(project=get_object_or_404(models.Project, slug=slug), pk=pk).delete()\n # Send user back to project detail, the overview of all records in the project.\n return redirect('projects:single', slug=slug)\n else:\n # Access denied...\n return HttpResponse(\"You don't have the permission to do this\")", "def delete(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"delete\"), kwargs)", "def delete(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"delete\"), kwargs)", "def delete_bucket_from_project(projectname, bucketname):\n return jsonify(\n admin.delete_bucket_on_project(\n current_app.scoped_session(), projectname, bucketname\n )\n )", "def delete(self, *args, **kwargs) -> Any:\n pass", "def delete_project_entities(cls, project_id,\n suppress_exception=False,\n session=None):\n cls.db_repo.delete_project_entities(\n project_id, suppress_exception=suppress_exception, session=session)", "def delete():\n # Must be logged in to perform any delete commands.\n auth_required()\n pass", "def delete(self, api_path, *args, **kwargs):\n\n\t\treturn self._do_operation(u'delete', api_path, *args, **kwargs)", "def test_Projects_CanBeCreatedAndDeleted_Successfully(self):\n\n name = \"Test Project \"\n # Test create a new project successfully\n project = self.api.create_project(name, \"Py\")\n self.assertTrue(project['success'])\n self.assertTrue(project['projects'][0]['projectId'] > 0)\n self.assertTrue(project['projects'][0]['name'] == name)\n\n # Delete the project\n deleteProject = self.api.delete_project(project['projects'][0]['projectId'])\n self.assertTrue(deleteProject['success'])\n\n # Make sure the project is really deleted\n projectList = self.api.list_projects()\n self.assertFalse(any(project['projects'][0]['projectId'] == projectList['projects'][i]['projectId']\n for i in range(len(projectList['projects']))))", "def delete(self, *args, **kwargs):\n return self.handle_delete_request()" ]
[ "0.8128038", "0.80633974", "0.8028948", "0.78569174", "0.78183025", "0.7728668", "0.76615155", "0.7630471", "0.76140267", "0.76140267", "0.7583802", "0.75670856", "0.75328547", "0.74872017", "0.7375651", "0.7375104", "0.7346846", "0.7337656", "0.7324851", "0.73165905", "0.7305317", "0.72964025", "0.7267763", "0.7258648", "0.7213935", "0.7125384", "0.7116282", "0.70950204", "0.706899", "0.7000849", "0.69945395", "0.69926566", "0.6986645", "0.69539607", "0.6940761", "0.6912442", "0.6906145", "0.68681186", "0.6781119", "0.6760477", "0.672421", "0.6641833", "0.6641625", "0.6600566", "0.659823", "0.6588102", "0.65876395", "0.6575108", "0.65713507", "0.646392", "0.6437244", "0.6382265", "0.6318649", "0.6315186", "0.6313249", "0.6301197", "0.6264303", "0.6234369", "0.6231617", "0.6203759", "0.6194927", "0.6164004", "0.61594623", "0.6148683", "0.6122723", "0.60906553", "0.6069815", "0.6045421", "0.6026991", "0.6014933", "0.59974253", "0.59901166", "0.5956138", "0.5945484", "0.5943498", "0.59382206", "0.5935798", "0.590966", "0.587238", "0.587045", "0.58671933", "0.5863532", "0.58536106", "0.58473176", "0.5835139", "0.5835139", "0.58201915", "0.58050674", "0.5803282", "0.5803265", "0.5790737", "0.57829326", "0.57829326", "0.5764744", "0.57596505", "0.5751724", "0.5710703", "0.5706264", "0.5700631", "0.56915003" ]
0.76985204
6
Static method for delete all projects.
def delete_all_projects(): client = RequestManager() client.set_method("GET") client.set_endpoint("/projects") response = client.execute_request() for project in response.json(): try: ProjectHelper.delete_project(project["id"]) except TypeError: LOGGER.info(project)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear(self):\n for project in Project.objects:\n project.delete()", "def __remove_all_projects__():\n p = subprocess.Popen('rm -rf {}/.wcscanner/*'.format(context.__BASE_PATH__), shell=True)\n p.wait()", "def clean_project(self, app_name=None, delete_all=False):\n\n if not app_name and not delete_all:\n ConuException(\"You need to specify either app_name or set delete_all=True\")\n\n if delete_all:\n args = [\"--all\"]\n logger.info('Deleting all objects in current project')\n else:\n args = \"-l app=%s\" % app_name\n logger.info('Deleting all objects with label app=%s', app_name)\n\n try:\n o = run_cmd(self._oc_command([\"delete\", \"all\", args]),\n return_output=True)\n o_lines = o.split('\\n')\n for line in o_lines:\n logger.info(line)\n except subprocess.CalledProcessError as ex:\n raise ConuException(\"Cleanup failed because of exception: %s\" % ex)", "def clear_all(delete_id):\n Tasks.query.filter(Tasks.project_id == delete_id).delete()\n Projects.query.filter(Projects.project_id == delete_id).delete()\n db.session.commit()\n\n return redirect('/')", "def clean(self):\n\n if not self.__projects:\n return\n\n Console.info(\"Cleaning session...\")\n Console.indent()\n\n for project in self.__projects:\n project.clean()\n\n path = os.path.abspath(os.path.join(\".jasy\", \"locale\"))\n if os.path.exists(path):\n Console.info(\"Cleaning up locale project...\")\n shutil.rmtree(path)\n\n path = os.path.abspath(os.path.join(\".jasy\", \"virtual\"))\n if os.path.exists(path):\n Console.info(\"Cleaning up virtual project...\")\n shutil.rmtree(path)\n\n Console.outdent()", "def delete_project(arn=None):\n pass", "def delete(self):\n _url = f\"{self.connector.base_url}/projects/{self.project_id}\"\n\n self.connector.http_call(\"delete\", _url)\n\n self.project_id = None\n self.name = None", "def test_delete_project(self):\n pass", "def test_delete_project(self):\n pass", "def project_delete(cursor, project):\n haystack = (project['_id'], )\n\n query = \"DELETE FROM projects WHERE _id=?\"\n try:\n cursor.execute(query, haystack)\n except Exception as e:\n on_error(e)\n\n query = \"DELETE FROM namespaces WHERE project_id=?\"\n try:\n cursor.execute(query, haystack)\n except Exception as e:\n on_error(e)\n else:\n cursor.connection.commit()\n raise Return((True, None))", "def tearDown(self):\n Project.objects.all().delete()", "def delete_stored_project():\n client = RequestManager()\n client.set_method(\"DELETE\")\n client.set_endpoint(\"/projects/{0}\".format(STORED_ID['project_id']))\n client.execute_request()", "def delete_orphan_project(apps, schema_editor):\n Project = apps.get_model('data_manager', 'Project')\n Project.objects.filter(dataset__isnull=True).delete()\n return", "def tearDownClass(cls):\n projects = ['arc_project_for_testing_delete_after_usage1', 'arc_project_for_testing_delete_after_usage2',\n 'ar c', 'ar:c', 'ar<c', 'ar%c']\n for project in projects:\n project_directory = os.path.join(arc_path, 'Projects', project)\n shutil.rmtree(project_directory)", "def test_projects_delete(self):\n project = Project()\n response = self.client.open('/project-tracker/projects',\n method='DELETE',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def test_delete_project(self):\n self.assertEqual(Project.objects.count(), 1)\n self.assertEqual(Group.objects.count(), 2)\n\n delete_project(Project.objects.get(name=\"project A\"))\n\n self.assertEqual(Project.objects.count(), 0)\n self.assertEqual(Group.objects.count(), 0)", "def es_delete(project=None):\n if project is not None:\n script_indexer.delete_project(project)\n else:\n script_indexer.delete_all()", "def do_project_delete(cs, args):\n key = args.project\n if cs.projects.is_id(key):\n id = key\n else:\n id = cs.projects.get_id_by_name(key)\n cs.projects.delete(id)\n print(\"Delete Project '%s' successfully.\" % key)", "def delete_project(project):\n with BMI(_username, _password, constants.BMI_ADMIN_PROJECT) as bmi:\n ret = bmi.delete_project(project)\n if ret[constants.STATUS_CODE_KEY] == 200:\n click.echo(\"Success\")\n else:\n click.echo(ret[constants.MESSAGE_KEY])", "def delete(self, request, p_name):\n project = Project.objects.get(name=p_name)\n connectors = project.connector_set.all()\n connectors.delete()\n if os.path.isfile(project.project_location):\n os.remove(project.project_location)\n project.delete()\n return HttpResponse(HTTPStatus.OK)", "def delete(self, oid):\n path = '/projects/%s' % oid\n res = self.client.call(path, 'DELETE', data='', token=self.manager.identity.token)\n self.logger.debug('Delete openstack project: %s' % truncate(res))\n return True", "def delete_project(\n name\n):\n\n cmd = dict()\n cmd[\"type_\"] = \"delete_project\"\n cmd[\"name_\"] = name\n\n comm.send(cmd)", "def delete_project(projectname):\n response = jsonify(admin.delete_project(current_app.scoped_session(), projectname))\n return response", "def delete_project(project_id):\n client = RequestManager()\n client.set_method(\"DELETE\")\n client.set_endpoint(\"/projects/{0}\".format(project_id))\n client.execute_request()", "def delete_all(self):\n raise NotImplementedError()", "def delete(conn, project):\n with conn:\n c = conn.cursor()\n c.execute(\"DELETE FROM projects WHERE project =?\", (project,))", "def remove_single_project(project_name):\n p = subprocess.Popen('rm -rf {}/{}'.format(context.__PROJECTS_PATH__, project_name), shell=True)\n p.wait()", "def destroy(config, args):\n log = logging.getLogger('kraftwerk.destroy')\n if confirm(\"Remove project %s from node %s along with all services and data?\" % \n (args.project.name, args.node.hostname)):\n args.node.ssh(config.template(\"scripts/project_destroy.sh\", project=args.project))\n print \"Project %s removed from node %s\" % \\\n (args.project.name, args.node.hostname )\n for service in args.project.services(args.node):\n args.node.ssh(service.destroy_script)", "def delete_project_files(self, project, logStat):\n from corrdb.common.models import FileModel\n from corrdb.common.models import EnvironmentModel\n\n for _file in project.resources:\n file_ = FileModel.objects.with_id(_file)\n if file_:\n result = self.storage_delete_file(file_.group, file_.storage)\n if result:\n logStat(deleted=True, file_obj=file_)\n file_.delete()\n\n for record in project.records:\n result = self.delete_record_files(record, logStat)\n if result:\n logStat(deleted=True, record=record)\n record.delete()\n\n for environment_id in project.history:\n _environment = EnvironmentModel.objects.with_id(environment_id)\n if _environment:\n if _environment.bundle and _environment.bundle.scope == \"local\":\n result = self.storage_delete_file('bundle', _environment.bundle.storage)\n if result:\n # logStat(deleted=True, bundle=_environment.bundle)\n # logStat(deleted=True, environment=_environment)\n _environment.bundle.delete()\n # else:\n # logStat(deleted=True, environment=_environment)\n _environment.delete()", "def delete_all(self, prog:progress=None): \n\t\tself.__output_status('Delete all files')\n\t\tif (self.__check_terminated()):\n\t\t\treturn;\t\n\t\tdelete_dir(self.root)\n\t\ttime.sleep(0.3)", "def delete_project(proj_id):\n project_obj = Project.objects.get(id=proj_id)\n print('Deleting project the fastq files within the project: ', project_obj.description)\n\n description = project_obj.description.replace(' ', '') # remove any space in the project name\n project_dir = 'documents/%s/%s' % (str(project_obj.date.date()), description)\n shutil.rmtree(project_dir, ignore_errors=True)\n print(\"Files deleted.\")", "def delete_all(self):\n # delete everything\n shutil.rmtree(self.location)", "def destroy(self, request, pk=None):\n try:\n project = Project.objects.get(pk=pk)\n project.delete()\n\n return Response({}, status=status.HTTP_204_NO_CONTENT)\n\n except Project.DoesNotExist as ex:\n return Response({'message': ex.args[0]}, status=status.HTTP_404_NOT_FOUND)\n\n except Exception as ex:\n return Response({'message': ex.args[0]}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "def delete(\n self, url: str\n ) -> pymongo.results.DeleteResult:\n return self._mongo.delete({\n 'url': url\n },\n 'projects'\n )", "def delete(self):\r\n delete_tracks(self.project, [self])", "def test_remove_project(self):\n pass", "def test_projects_id_delete(self):\n response = self.client.open('/project-tracker/projects/{id}'.format(id=3.4),\n method='DELETE')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def delete_project(request, project_id):\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n project = get_object_or_404(Project, pk=project_id)\n project.delete()\n messages.success(request, 'Project deleted!')\n return redirect(reverse('portfolio'))", "def delete(self):\n return self.client._perform_empty(\n \"DELETE\", \"/project-folders/%s\" % self.project_folder_id)", "def remove_all(lists_id):\n Tasks.query.filter(Tasks.project_id == lists_id).delete()\n db.session.commit()\n\n return redirect('/')", "def delete_project(request, project_id):\n\n profile = get_object_or_404(Profile, user=request.user)\n project = get_object_or_404(GameProject, pk=project_id)\n\n if not profile.is_creator:\n messages.error(request, 'Sorry, only creators can do that.')\n return redirect(reverse('home'))\n if project.owner != profile:\n messages.error(request, 'Sorry, only the project owner can do that.')\n return redirect(reverse('home'))\n\n project = get_object_or_404(GameProject, pk=project_id)\n project.delete()\n messages.success(request, 'Project deleted!')\n return redirect(reverse('all_projects'))", "def delete_users(project):\n for user_id in project.user_id.all():\n project.user_id.remove(user_id.pk)\n project.save()", "def tearDown(self):\n User.objects.all().delete()\n Project.objects.all().delete()", "def delete(self, name, project=None):\n qlist = self._list(project)\n key = self._queue(project, name)\n self._db.delete(key)\n self._db.zremrangebyscore(qlist, -1, 1)", "def delete_all(self):\n models.CourseLearningOutcome.objects.all().delete()\n #models.CoreLearningOutcome.objects.all().delete()\n #models.CreditType.objects.all().delete()\n models.Course.objects.all().delete()\n models.DegreeProgram.objects.all().delete()\n models.DPCourseSpecific.objects.all().delete()\n models.DPCourseGeneric.objects.all().delete()\n models.DPCourseSubstituteSpecific.objects.all().delete()\n models.DPCourseSubstituteGeneric.objects.all().delete()", "def delete_all(cls):\n with sqlite3.connect(cls.dbpath) as connection:\n connection.row_factory = sqlite3.Row\n cursor = connection.cursor()\n SQL = \"DELETE FROM accounts;\"\n cursor.execute(SQL)", "def delete_project_entities(cls, project_id,\n suppress_exception=False,\n session=None):\n cls.db_repo.delete_project_entities(\n project_id, suppress_exception=suppress_exception, session=session)", "def delete():\n run('rm -r {}'.format(utils.home('apps', env.PROJECT_NAME)))", "def deleteAll():\n _table.deleteAll()\n _initialiseGlobals()\n\n return", "def close(self):\n\n if not self.__projects:\n return\n\n Console.debug(\"Closing session...\")\n Console.indent()\n\n for project in self.__projects:\n project.close()\n\n self.__projects = None\n\n Console.outdent()", "def delete_project(project_id):\n \n project = mongo.db.projects\n project.delete_one({'_id': ObjectId(project_id)})\n flash('Your project has been deleted.', 'success')\n return redirect(url_for('projects'))", "def delete_teams_all(self, team_name):\n self.execute(TABELLE['teams']['delete']['all'])", "def delete_project(project_id):\n project = Project.query.filter_by(id=project_id).first()\n if not project:\n return {\n 'success': False,\n 'message': f\"No project with the specified id {project_id} found.\",\n }\n\n else:\n if is_project_manager(project, g.user):\n # delete related tasks\n Task.query.filter_by(project=project).delete()\n #delete related invites\n Invitation.query.filter_by(project=project).delete()\n db_session.delete(project)\n db_session.commit()\n return {\n 'success': True,\n 'result': {},\n 'message': \"Project Deleted Successfully.\",\n }", "def project_clear_files(request, **kwargs):\n project = kwargs.get(\"project\")\n if request.user.is_authenticated and request.user == project.user:\n project.clear_project_folder()\n return Response(status=status.HTTP_200_OK)\n else:\n raise PermissionDenied", "def delete_api(self, project_id, api_id, hbase_manager=None):\n\n self._project_service.get_nondeleted_project(project_id)\n\n proj_api_col = \"apis:{0}\".format(api_id)\n project_id = bytes(project_id, self._charset)\n api_id = bytes(api_id, self._charset)\n\n self.get_nondeleted_api(project_id, api_id)\n\n connection = hbase_manager.connection\n api_tbl = connection.table(\"apis\")\n projects_tbl = connection.table(\"projects\")\n\n projects_tbl.delete(project_id, [bytes(proj_api_col, self._charset)])\n api_tbl.put(api_id, {\n b\"attrs:state\": b\"deleted\"\n })", "def cleanup(self):\r\n session = self.get_session()\r\n project = session.create(self._config.name)\r\n \r\n session.home = self._config['dir']\r\n \r\n result = self.__find_project(project)\r\n \r\n path = os.path.join(session.home, project.name)\r\n project.work_area(False, True, True, path=path)\r\n \r\n if (result != None):\r\n _logger.info(\"Project found: '%s'\" % result)\r\n role = session.role\r\n co_role = ccm.get_role_for_purpose(session, str(self._config['purpose']))\r\n session.role = co_role\r\n try:\r\n delResult = result.delete(scope='project_and_subproject_hierarchy')\r\n finally:\r\n session.role = role\r\n ccm.log_result(delResult, ccm.CHECKOUT_LOG_RULES, _logger)", "def deleteProject(self, projectId):\n uri = \"/v1/projects/\" +str(projectId)\n response = self.client.delete(uri)\n return response", "def reset():\n local('cd {{ project_name }} && \\\n rm -rf static && rm -rf gzip && rm -rf build')", "def delete_project(self, name=None, delete_dir=False):\n victim = name or self.current\n if victim not in self:\n raise ValueError(\"{} is not a project\".format(victim))\n\n if len(self) == 1:\n raise ValueError(\"Can't delete only remaining project\")\n\n ProjectDataset.delete().where(ProjectDataset.name == victim).execute()\n\n if delete_dir:\n dir_path = self._base_data_dir / safe_filename(victim)\n assert dir_path.is_dir(), \"Can't find project directory\"\n shutil.rmtree(dir_path)\n\n if name is None or name == self.current:\n if \"default\" in self:\n self.set_current(\"default\")\n else:\n self.set_current(next(iter(self)).name)\n return self.current", "def delete_all():\n if os.path.exists(DATA_DIR):\n shutil.rmtree(DATA_DIR)", "def unlink(self):\n analytic_accounts_to_delete = self.env['account.analytic.account']\n for project in self:\n if project.analytic_account_id and not project.analytic_account_id.line_ids:\n analytic_accounts_to_delete |= project.analytic_account_id\n result = super(Project, self).unlink()\n analytic_accounts_to_delete.unlink()\n return result", "def DeleteAllItems(self):\r\n\r\n self.DeleteRoot()", "def delete_project(self, project_name):\n # type(project_name) == unicode\n project = self.db.get_project_by_name(project_name)\n if not project:\n print(u\"*** Error: The project '{}' was not found.\"\n \"\".format(project_name))\n return\n print('Caution! The related tracking will be deleted as well.{eol}'\n 'Do you really want to delete the project? [y/N] '\n ''.format(eol=os.linesep), end='')\n if not helpers.get_yes_no(default='n'):\n return\n self.db.delete_project_by_name(project_name)\n print(u\"The project '%s' has been deleted.\" % project_name)\n self.set_prompt()", "def delete_project(self, project_id):\n _url = f\"{self.base_url}/projects/{project_id}\"\n self.http_call(\"delete\", _url)\n return", "async def delete_all_games(self):\n all_games = await ex.conn.fetch(\"SELECT gameid FROM blackjack.games\")\n for games in all_games:\n game_id = games[0]\n await self.delete_game(game_id)", "def destroy_all(self):\n for name in self.app.config['SIMPLE_DOMAINS']:\n self.connection.delete_domain(name)", "def delete_project(self, project_id):\n self._run(\n url_path=\"projects/delete\",\n id=project_id,\n )\n return True", "def DeleteContainers(self):\n for container in itertools.chain(*list(self.containers.values())):\n container.Delete()", "def destroy_all(self) -> None:\n try:\n containers = self.docker.containers.list(\n all=True,\n filters={\n 'label': LABEL_TASK_ID,\n },\n )\n\n for container in containers:\n container.remove(force=True)\n\n except requests.exceptions.ConnectionError:\n raise ProviderError('Docker engine unavailable')", "def delete_all():\n answer = ['YES', 'NO']\n str = rs.GetString(\"Delete all objects?\", 'YES', answer)\n\n if str == 'YES':\n obs = rs.ObjectsByType(0)\n rs.DeleteObjects(obs)\n elif str == 'NO':\n pass\n else:\n sys.exit()", "def clean_all_db():\n for model in [\n Component, Arch, AutoCase, AutoCaseFailure, Bug, Linkage, WorkItem,\n Document, Project, Framework]:\n model.objects.all().delete()", "def remove_all_users(request):\n id_project = request.POST.get(\"project_id\")\n project = UtilsData.get_object_by_type_and_id(\"project\", id_project)\n if request.user.can_delete(project):\n roles = project.affecteds_set.all()\n for role in roles:\n if role.role not in (Affecteds.ROLE.Manager, Affecteds.ROLE.Admin):\n role.role = Affecteds.ROLE.Nill\n role.save()\n return HttpResponse(json.dumps(\"Ok\"),\n content_type=\"application/json\")\n else:\n logger.error(\"user %s try to remove all users to project %d \" % (request.user.username, id_project))\n return HttpResponse(json.dumps(\"error\"),\n content_type=\"application/json\")", "def pre_project_delete(self, resource_id):\n pass", "def delete_project(self, project_id):\n return self._delete('/projects/{0}'.format(project_id))", "def delete_namespaced_project(self, name, **kwargs):\n\n all_params = ['name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_namespaced_project\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `delete_namespaced_project`\")\n\n resource_path = '/oapi/v1/projects/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='UnversionedStatus',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def delete_all(self):\n self.session.query(TodoItem).delete()\n self.session.query(TodoList).delete()", "def delete_keystone_v3_project(self, project_id, domain_id):\n LOG_OBJ.debug(\"Disable the project.\")\n kwargs = {\"project_id\": project_id, \"enabled\": False}\n self.set_keystone_v3_project(**kwargs)\n\n LOG_OBJ.debug(\"Deleting the project.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/projects/\" + \\\n str(project_id)\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n _body = None\n response = self.request(\"DELETE\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while deleting the project\")\n print (\"No response from Server while deleting the project\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\" Deleting project Failed with status %s \"\n \"and error : %s\" % (response.status, response.data))\n print (\" Deleting project Failed with status %s and error : %s\" %\n (response.status, response.data))\n return response.status\n\n return True", "def delete(self):\n return self.client._perform_empty(\n \"DELETE\", \"/projects/%s/managedfolders/%s\" % (self.project_key, self.odb_id))", "def delete(self, guid):\n if helpers.authorized(self.request.params['UUID'], self.request.params['ATO'], self.request.params['action']):\n # search for the Project and delete if found\n key = db.Key.from_path('Project', int(guid))\n project = db.get(key)\n if not project == None:\n project.delete()\n self.response.set_status(204, \"Deleted\")\n else:\n self.response.set_status(404, \"Not Found\")\n else:\n self.response.set_status(401, \"Not Authorized\")", "def deleteAll(klass, where=None, transaction=None):\n config = Registry.getConfig()\n tablename = klass.tablename()\n return config.delete(tablename, where, transaction)", "def delete_all_onprogress_domains():\r\n db = connect()\r\n cursor = db.cursor()\r\n try:\r\n cursor.execute(\"DELETE FROM on_progress_domains\")\r\n db.commit()\r\n except:\r\n cursor.close()\r\n db.close()\r\n raise RuntimeError(\"An Exception happened with the Database, make sure you are connected\")\r\n cursor.close()\r\n db.close()", "def delete_project_by_name(self, project_name):\n with self._transaction.cursor() as cur:\n # delete associations between this project and any barcodes\n cur.execute(\"DELETE FROM barcodes.project_barcode \"\n \"WHERE project_id in (\"\n \"SELECT project_id FROM barcodes.project \"\n \"WHERE project = %s)\",\n (project_name,))\n\n # now delete the project itself\n cur.execute(\"DELETE FROM barcodes.project WHERE project = %s\",\n (project_name,))\n return cur.rowcount == 1", "def Delete(ids):\n db = catocommon.new_conn()\n\n # we have to check each cloud and see if it's used as the default...\n # if so, you can't delete it without first fixing the account.\n # ACCOUNTS REQUIRE A DEFAULT CLOUD\n existed = False\n for delete_id in ids:\n sql = \"select count(*) from cloud_account where default_cloud_id = %s\" % (delete_id)\n exists = db.select_col_noexcep(sql)\n\n if not exists:\n sql = \"delete from clouds_keypair where cloud_id = %s\" % (delete_id)\n db.tran_exec(sql)\n \n sql = \"delete from clouds where cloud_id = %s\" % (delete_id)\n db.tran_exec(sql)\n \n db.tran_commit()\n else:\n existed = True\n \n db.close()\n\n msg = \"\"\n if existed:\n msg = \"Some of the selected Clouds were not deleted because they are referenced by a Cloud Account. Delete the Account first, or assign it a new Default Cloud.\"\n \n return True, msg", "def delete(self):\n return self.client._perform_empty(\"DELETE\", \"/workspaces/%s\" % self.workspace_key)", "def delete_project(id):\n result = delete_project_to_db(id)\n flash(result)\n return redirect(url_for(\"portfolio\"))", "def delete_contributions(self, project, per_page=100, max_workers=5):\n tic = time.perf_counter()\n\n if max_workers > MAX_WORKERS:\n max_workers = MAX_WORKERS\n print(f\"max_workers reset to max {MAX_WORKERS}\")\n\n cids = self.get_contributions(project)[\"ids\"]\n total = len(cids)\n\n if cids:\n with FuturesSession(max_workers=max_workers) as session:\n while cids:\n futures = [\n session.delete(\n f\"{self.url}/contributions/\",\n headers=self.headers,\n params={\n \"project\": project,\n \"id__in\": \",\".join(chunk),\n \"per_page\": per_page,\n },\n )\n for chunk in chunks(cids, n=per_page)\n ]\n\n self._run_futures(futures, total=len(cids))\n cids = self.get_contributions(project)[\"ids\"]\n\n self.load()\n\n # reset columns to be save (sometimes not all are reset BUGFIX?)\n self.projects.update_entry(pk=project, project={\"columns\": []}).result()\n toc = time.perf_counter()\n dt = (toc - tic) / 60\n print(f\"It took {dt:.1f}min to delete {total} contributions.\")", "def update_projects(self, new_projects_list):\n to_stop = [project for project in self if project not in new_projects_list]\n for project_id in to_stop:\n self[project_id].stop()\n del self[project_id]\n\n for project_id in new_projects_list:\n if project_id not in self:\n self[project_id] = ProjectManager(self.zk_client, project_id,\n self.callback)", "def purge_project(request):\n data = json.loads(request.body.decode('utf-8'))\n if 'item_id' not in data or not data['item_id']:\n return JsonResponse({'state': 'error', 'error': 'No project specified for reactivating'}, status=400)\n projquery = models.Project.objects.filter(pk=data['item_id'], active=False)\n if not projquery:\n return JsonResponse({'state': 'error', 'error': 'Project does not exist or is still active'}, status=403)\n dsetowners = models.DatasetOwner.objects.filter(dataset__runname__experiment__project_id=data['item_id'], dataset__purged=False).select_related('dataset')\n if not request.user.is_staff:\n return JsonResponse({'state': 'error', 'error': 'User has no permission to purge this project, must be staff'}, status=403)\n result = {'errormsgs': []}\n for dso in dsetowners.distinct('dataset'):\n purged = delete_dataset_from_cold(dso.dataset)\n if purged['state'] == 'error':\n result.update({'state': 'error', 'error': 'Not all project datasets could be purged'})\n result['errormsgs'].append(purged['error'])\n # if any dataset cannot be purged, report it, do not mark proj as purged\n if result['errormsgs']:\n result['error'] = '{} Errors: {}'.format(result['error'], '; '.join(result.pop('errormsgs')))\n return JsonResponse(result, status=500)\n else:\n projquery.update(active=False)\n return JsonResponse({})", "def resources(request):\n projects, secrets, pools, storageclasses, pvcs, pods = ([] for i in range(6))\n\n def finalizer():\n \"\"\"\n Delete the resources created during the test\n \"\"\"\n for resource_type in pods, pvcs, storageclasses, secrets:\n for resource in resource_type:\n resource.delete()\n resource.ocp.wait_for_delete(resource.name)\n if pools:\n # Delete only the RBD pool\n pools[0].delete()\n if projects:\n for project in projects:\n project.delete(resource_name=project.namespace)\n project.wait_for_delete(project.namespace)\n\n request.addfinalizer(finalizer)\n\n return projects, secrets, pools, storageclasses, pvcs, pods", "def delete_all(submission_client, program, project, batch_size=200, types=['submitted_methylation', 'aliquot', 'sample', 'demographic', 'case', 'experiment']):\n for t in types:\n print('{}-{}.{}'.format(program, project, t))\n try:\n delete_type(submission_client, program, project, batch_size, t)\n except Exception as e:\n print(e)", "def delete(self):\n for i in set(self.instances.values()):\n i.delete()\n shutil.rmtree(self.dirpath, True)", "def delete_all_domain_pages():\r\n db = connect()\r\n cursor = db.cursor()\r\n try:\r\n cursor.execute(\"DELETE FROM domain_pages\")\r\n db.commit()\r\n except:\r\n cursor.close()\r\n db.close()\r\n raise RuntimeError(\"An Exception happened with the Database, make sure you are connected\")\r\n cursor.close()\r\n db.close()", "def DeleteServices(self):\n for service in self.services.values():\n service.Delete()", "def delete_project(self, project_name, check=True):\n page_projects = self._page_projects()\n\n with page_projects.table_projects.row(\n name=project_name).dropdown_menu as menu:\n menu.button_toggle.click()\n menu.item_delete.click()\n\n page_projects.form_delete_project_confirm.submit()\n\n if check:\n self.close_notification('success')\n page_projects.table_projects.row(\n name=project_name).wait_for_absence()", "def post_project_delete(self, resource_id, resource_dict):\n pass", "async def clear_all(self) -> None:", "def cmd_apps__destroy(args):\n \n if args.name is None and in_git_repo():\n args.name = _get_current_project_name()\n\n if args.name is None:\n print \"Please provide a project name.\"\n sys.exit(1)\n\n print \"Destroying project %s...\" % args.name\n remote.destroy_project(args.name)\n print \"Project %s destroyed.\" % args.name\n if in_git_repo() and _get_current_project_name() == args.name:\n git(None, 'remote', 'rm', 'tinyserv')\n print \"Removed remote '%s'.\" % args.name", "def delete_all(sid):\n Game.objects.all().delete()", "def DeleteAll(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def cleanUp(self):\n import evoware.fileutil as F\n F.tryRemove(self.f_project, verbose=(self.VERBOSITY>1), tree=1)" ]
[ "0.7438387", "0.74334717", "0.7418019", "0.6865698", "0.6836307", "0.6812033", "0.6775198", "0.6739514", "0.6739514", "0.6619207", "0.6606535", "0.65799767", "0.65798426", "0.65713173", "0.6563861", "0.6534334", "0.64856863", "0.6476536", "0.6473611", "0.6437883", "0.6420292", "0.6313049", "0.62967867", "0.62856716", "0.62752104", "0.6199454", "0.6188569", "0.61832356", "0.6182946", "0.61698896", "0.6166517", "0.61383075", "0.6132611", "0.61003214", "0.60853916", "0.6070335", "0.6067106", "0.6063806", "0.6057775", "0.60422105", "0.60123265", "0.5980971", "0.5917976", "0.5909036", "0.58866143", "0.58822", "0.58793515", "0.587845", "0.58618027", "0.5849153", "0.5843611", "0.583917", "0.58360976", "0.5828697", "0.581492", "0.58137405", "0.58069944", "0.5786529", "0.5766742", "0.5762417", "0.5739808", "0.57331496", "0.5728839", "0.57157767", "0.57091016", "0.5707375", "0.5704416", "0.5701312", "0.5699013", "0.56961083", "0.5686833", "0.56839", "0.56755704", "0.56638396", "0.5655906", "0.56262493", "0.5622429", "0.5622028", "0.56012005", "0.55888313", "0.55857366", "0.55827016", "0.55722743", "0.55707765", "0.5570158", "0.5568325", "0.55676097", "0.5564349", "0.555637", "0.5554291", "0.5550858", "0.5550755", "0.5546927", "0.55461496", "0.5543163", "0.5541949", "0.55288345", "0.5527618", "0.5519352", "0.5515107" ]
0.8597782
0
Static method who read all projects
def read_project(response): STORED_ID['project_id'] = response.json()["id"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_read_project(self):\n pass", "def test_read_project(self):\n pass", "def project():", "def project():", "def project():", "def test_get_projects(self):\n pass", "def get_projects_data():\n wcscanner_path = context.__BASE_PATH__ + '/.wcscanner'\n\n data = []\n for project in os.listdir(wcscanner_path):\n if (os.path.isdir(os.path.join(wcscanner_path, project))):\n update_project_data(project)\n project_path = '{}/{}'.format(wcscanner_path, project)\n f = open('{}/.project'.format(project_path), 'r')\n data.append(json.load(f))\n f.close()\n return data", "def list_projects():\n if '.wcscanner' not in os.listdir(context.__BASE_PATH__):\n return []\n return os.listdir(context.__PROJECTS_PATH__)", "def get_projects(self):\n return conf.projects", "def test_list_project(self):\n pass", "def get_projects(self):\n unaligned_path = self.get_unaligned_path()\n logger.debug(\"collecting list of projects\")\n return [p for p in os.listdir(unaligned_path)\n if len(parsing.get_project_label(p))]", "def test_list_projects(self):\n pass", "def test_list_projects(self):\n pass", "def get_projects(self):\n res = self.conn.cursor().execute(\"SELECT * FROM projects\")\n return res.fetchall()", "def get_projects():\n return Project.query.all()", "def _get_projects(project_ids):\n if _ALL in project_ids:\n return projects_lib.get_all()\n return projects_lib.get_selective(project_ids)", "def test_get_project(self):\n pass", "def get_projects(self):\n projects = []\n page = 1\n while not len(projects) % 100:\n projects += self._get('/projects?{0}'.format(urllib.urlencode({'per_page': 100, 'page': page})))\n if not projects:\n break\n page += 1\n return projects", "def _page_projects(self):\n return self._open(self.app.page_projects)", "def get_all_projects(self, org):\n return [proj for proj in Project.objects.filter(org=org)]", "def test_get_projects_expanded(self):\n pass", "def get_projects(self):\n return self.http_call(\"get\", url=f\"{self.base_url}/projects\").json()", "def project_list(self):\n try:\n ids = self.request[api.DATA][api.DATA][\"ids\"]\n return self._get_keystone_projects(ids)\n except Exception as e:\n LOG.exception(\"Error occurred: %s\" % e)", "def projects(self):\r\n return p.Projects(self)", "def test_list_project_request(self):\n pass", "def get_projects(self):\n if not self.validate():\n raise SettingCustomVisionAccessFailed\n return self.get_trainer_obj().get_projects()", "def atlas_projects():\n pass", "def list(self):\n\n for name in self.projects:\n self.projects[name].show()\n print(\"\\n\")", "def get_projects(self, _is_simple=False):\n req_url = f\"{self.url}/projects\"\n if _is_simple:\n req_url += \"?simple=true\"\n ret = requests.get(req_url, headers = self.req_header)\n return ret.json()", "def all(cls):\n projects_url = 'https://www.pivotaltracker.com/services/v5/projects'\n root = _perform_pivotal_get(projects_url)\n if root is not None:\n return [Project.from_json(project_node) for project_node in root]", "def get_project_list(config):\n eggs_dir = config.get('eggs_dir', 'eggs')\n if os.path.exists(eggs_dir):\n projects = os.listdir(eggs_dir)\n else:\n projects = []\n try:\n projects += [x[0] for x in config.cp.items('settings')]\n except NoSectionError:\n pass\n return projects", "def project(self):\n return read_small_file(self.homeDirectory + \"/.project\")", "def db_projects():\n return [{\"name\": \"IT\"}, {\"name\": \"Financial\"}, {\"name\": \"Failed\"}]", "def test_projects_get(self):\n response = self.client.open('/project-tracker/projects',\n method='GET')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def get_all_projects():\n return jsonify(admin.get_all_projects(current_app.scoped_session()))", "def all(cls):\r\n projects_url = 'https://www.pivotaltracker.com/services/v3/projects'\r\n response = _perform_pivotal_get(projects_url)\r\n\r\n root = ET.fromstring(response.text)\r\n if root is not None:\r\n return [Project.from_node(project_node) for project_node in root]", "def get_projects(cls):\n projects = []\n for project in Project.select():\n project_dict = {\n \"id\": project.id,\n \"title\": project.title,\n \"link\": project.link,\n \"description\": project.description\n }\n projects.append(project_dict)\n return projects", "def get_projects(self):\n response = self.request(verb=requests.get, address=\"projects\")\n # FIXME: if no results, must we raise an exception?\n return response[\"results\"] if \"results\" in response else response", "def _loadProjects(self):\n logger.debug(\"Func: _loadProjects\")\n\n if not os.path.isfile(self._pathsDict[\"projectsFile\"]):\n return\n else:\n projectsData = self._loadJson(self._pathsDict[\"projectsFile\"])\n if projectsData == -2:\n return -2\n return projectsData", "def get_projects(self, *criterion):\n from wkcdd.models.helpers import get_project_list\n return get_project_list([self.id], *criterion)", "def all_projects(self):\n projects_list = []\n for path in DAVOS_PROJECT_DIR.iterdir():\n if path.is_dir():\n projects_list.append(Project(path.name))\n return projects_list", "def list_projects():\n\n cmd = dict()\n cmd[\"type_\"] = \"list_projects\"\n cmd[\"name_\"] = \"\"\n \n s = comm.send_and_receive_socket(cmd)\n\n msg = comm.recv_string(s)\n\n if msg != \"Success!\":\n raise Exception(msg)\n \n json_str = comm.recv_string(s) \n \n s.close() \n\n return json.loads(json_str)[\"projects\"]", "def get_project_list():\n return parse_list_output(Popen(\n 'openstack project list'.split(), stdout=STDOUT, stderr=STDERR\n ).communicate()[0])", "def project_all(request, format=None):\n if request.method == 'GET':\n projects = Project.objects.all().order_by('key')\n serializer = ProjectSerializer(projects, many=True)\n return Response(serializer.data)", "def get_project(self, i):\r\n return self.__projects[i]", "def getprojects(self):\n resp = self.conn.request('GET', self.URLS['allprojects'], dict(api_key=self.api_key))\n data = resp.data.decode('utf-8')\n jdata = json.loads(data)['projects']\n # Convert nested JSON documents\n for project_index in range(len(jdata)):\n for field in ('options_json', 'templates_json'):\n jdata[project_index][field] = json.loads(jdata[project_index][field])\n # Pass project details dictionaries to constructors, return array\n return [PhProject(self, project) for project in jdata]", "def projects(args):\n _projects = lib.get_projects(\n args.target, username=args.username, password=args.password\n )\n if _projects:\n print(\"\\n\".join(_projects))", "def getProjects(self):\n catalog = plone.api.portal.get_tool('portal_catalog')\n path = '{}/projects'.format('/'.join(plone.api.portal.get().getPhysicalPath()))\n query = dict(portal_type='Project', sort_on='sortable_title', path=path)\n result = list()\n for brain in catalog(**query):\n result.append((brain.getId, brain.Title))\n return result", "def list_projects(arn=None, nextToken=None):\n pass", "def _get_project_tina_entries(self,pool='archive',refresh=False,path_folder=None):\n\t\tif not path_folder: path_folder = self.catalog_path\n\t\tif not refresh:\n\t\t\ttry:\n\t\t\t\treturn self.tina_archive_entries\n\t\t\texcept: pass \n\t\tself.tina_archive_entries = Tina.tina_find(\n\t\t\tpath_folder=path_folder,\n\t\t\tapplication=self.application,\n\t\t\tstrat='A',\n\t\t\tskip_filter=self.skip_filter)\n\t\treturn self.tina_archive_entries", "def test_demo_project_call(self):\n resp = DemoAivenStorage(os.environ[\"AIVEN_API_URL\"],\n os.environ[\"AIVEN_TOKEN\"]).get_project_names()\n assert isinstance(resp, list)\n assert len(resp) == 1\n assert 'romainducarrouge-31f2' in resp", "def get_project_list(self, dummy_project):\n # TODO: domain scope 403 is probably to do with faulty keystone policy config -- revise?\n if not self._projects:\n self._projects = self._get_keystone_client(dummy_project).projects.list()\n\n return self._projects", "async def get_project_info(project_urls):\n project_info = []\n for url in project_urls:\n soup = await get_page(url)\n about = soup.find_all(\"p\")\n title = soup.find(\"h3\").text\n student = about[0].text.splitlines()[2].strip()\n details = about[1].text\n name = about[0].find(\"a\").text\n project_info.append({'Organization': name, 'title': title,\n 'student': student, 'details': details,\n 'link': url})\n\n return project_info", "def test_get_projects_filters(fc: fetcher.Fetcher, test_project_name):\n projects = fc.get_projects(test_project_name)\n assert isinstance(projects, list)\n assert len(projects) == 1\n assert projects[0].name == test_project_name", "def _project_files(project_name, folder):\n _authenticate()\n if project_name.startswith(\"project-\"):\n project_id = project_name\n else:\n query = dxpy.api.system_find_projects({\"name\": project_name, \"level\": \"VIEW\"})\n if len(query[\"results\"]) == 1:\n project_id = query[\"results\"][0][\"id\"]\n else:\n raise ValueError(\"Did not find DNAnexus project %s: %s\" % (project_name, query))\n dx_proj = dxpy.get_handler(project_id)\n return _recursive_ls(dx_proj, project_name, folder)", "def get_projects(self):\n session = self.session_factory()\n results = [row.project for row in session.query(PipelineRun.project.distinct().label('project')).all()]\n session.close()\n return results", "def getProjects(self):\n\n return self.__projects", "def update_projects(self):\n self._read_directory()\n print(self._filenames)\n for filename in self._filenames:\n project = self._make_project(self._read_project(filename))\n self.projects.append(\n (int(project.get_id()), project)\n )\n self.projects = sorted(self.projects, reverse=True)", "def get_projects(self, source=\"all\"):\n self.projects = []\n self._project_indices_by_id = {}\n self._project_indices_by_name = {}\n\n if self.hub_type == self.NAMESPACES[\"a.\"]:\n if not self.auth.three_legged:\n self.logger.warning(\n \"Failed to get projects. '{}' hubs only supports 3-legged access token.\".format( # noqa:E501\n self.NAMESPACES[\"a.\"]\n )\n )\n else:\n for project in self.api.dm.get_projects():\n self.projects.append(\n Project(\n project[\"attributes\"][\"name\"],\n project[\"id\"][2:],\n data=project,\n app=self,\n )\n )\n\n self._project_indices_by_id[project[\"id\"][2:]] = (\n len(self.projects) - 1\n )\n self._project_indices_by_name[\n project[\"attributes\"][\"name\"]\n ] = (len(self.projects) - 1)\n\n elif self.hub_type == self.NAMESPACES[\"b.\"]:\n\n if source.lower() in (\"all\", \"docs\"):\n for project in self.api.dm.get_projects():\n self.projects.append(\n Project(\n project[\"attributes\"][\"name\"],\n project[\"id\"][2:],\n data=project,\n app=self,\n )\n )\n\n self._project_indices_by_id[project[\"id\"][2:]] = (\n len(self.projects) - 1\n )\n self._project_indices_by_name[\n project[\"attributes\"][\"name\"]\n ] = (len(self.projects) - 1)\n\n if (\n source.lower() in (\"all\", \"admin\")\n and not self.auth.three_legged\n ):\n\n for project in self.api.hq.get_projects():\n if project[\"id\"] in self._project_indices_by_id:\n self.projects[\n self._project_indices_by_id[project[\"id\"]]\n ].data = project\n else:\n self.projects.append(\n Project(\n project[\"name\"],\n project[\"id\"],\n data=project,\n app=self,\n )\n )\n self._project_indices_by_id[project[\"id\"]] = (\n len(self.projects) - 1\n )\n\n self._project_indices_by_name[project[\"name\"]] = (\n len(self.projects) - 1\n )\n\n elif source.lower() in (\"all\", \"admin\"):\n self.logger.debug(\n \"Failed to get projects. The BIM 360 API only supports 2-legged access tokens\" # noqa:E501\n )", "def list(self, *args, **kwargs):\n projects = Project.objects.all()\n return self.list_by(projects, self.serializer_class)", "def get_created_projects(self):\n project_ouessant1 = Project.objects.get(name='Ouessant Tidal Power Phase I')\n project_ouessant2 = Project.objects.get(name='Ouessant Tidal Power Phase II')\n project_liaoning = Project.objects.get(\n name='Liaoning Linghai China Resource Power Wind Power Wind Farm'\n )\n return [project_ouessant1, project_ouessant2, project_liaoning]", "def open_projects(request):\n return Project.objects.prefetch_related('task_set').filter(user=request.user, open=True)", "def _get_resource_projects(resource):\n resource_type = resource.get('type', '').upper()\n resource_values = resource.get('include', tuple())\n\n projects = tuple()\n if resource_type == _FOLDER:\n projects = _get_folder_projects(resource_values)\n elif resource_type == _PROJECT:\n projects = _get_projects(resource_values)\n elif resource_type == _FILTER:\n projects = _get_filtered_projects(resource_values)\n else:\n logging.info('Projects: No projects for resource %s', resource_type)\n return projects", "def get_project_list(token):\n session = requests.Session()\n session.headers.update({'Authorization': f'Token {token}'})\n url = get_project_list_url()\n r = session.get(url=url)\n return r", "def test_get_projects(self):\n for project in ['TEST', 'NEWTEST', 'MYPROJECT']:\n self.db.insert_single_result(generate_mock_result(project=project))\n projects = self.db.get_projects()\n self.assertItemsEqual(['MYPROJECT', 'NEWTEST', 'TEST'], projects)", "def list_projects(self) -> List['RadsProject']:\n ret = []\n base = self.fspath(\"projects\")\n for name in os.listdir(base):\n if os.path.isdir(f\"{base}/{name}/releases\"):\n ret.append(RadsProject(self, name))\n return ret", "def get_projects(self):\n return self.jira.projects()", "def load_project_list_from_file(self):\n # cycle through the files and append them converted from json to the list\n out = []\n\n path = self.data_path + self.project_dir\n\n # check if the data_path/clients directory exists and cancel otherwise\n if not os.path.isdir(str(path)):\n return []\n\n for file in sorted(os.listdir(path)):\n if file.endswith('.flproject'):\n # load the file\n f = open(path + '/' + file, 'r')\n load = f.read()\n f.close()\n\n # generate main object\n out.append(Project().from_json(js=load))\n\n return out", "def get_all_projects(self, scope):\n url = \"{0}/{1}/{2}\".format(self.keystone_server_url, DEFAULT_KEYSTONE_API_VERSION, \"projects\")\n headers = {'X-Auth-Token': scope.auth_token}\n try:\n r = self._make_request_with_auth_fallback(url, headers)\n return r['projects']\n\n except Exception as e:\n self.warning('Unable to get projects: %s', e)\n raise e\n\n return None", "def get_project(self, project):\n return Dict(self.projects.get_entry(pk=project, _fields=[\"_all\"]).result())", "def do_projects(self, arg):\n args = shlex.split(arg)\n limit = 10\n from_date = to_date = ''\n if args:\n limit = 0\n try:\n from_date, to_date = helpers.parse_date_parameters(args)\n except ValueError, msg:\n print(msg)\n return\n projects = self.db.get_projects_with_activity_field(\n from_date, to_date, limit=limit)\n refined = map(lambda x: [\n x['pid'], x['name'],\n '[Active]' if x['active'] else '[closed]',\n datetime.datetime.strftime(x['created'], '%c').decode('utf8'),\n x['description']], projects)\n print(tabulate(refined, ['ID', 'Project', 'Activity', 'Created',\n 'Description']))", "def scan_all_projects():\n from projectscanner import ProjectScanner\n #ensure that new items get scanned by setting their last scan time to this.\n needs_scan_time = datetime.now() - timedelta(days=30)\n\n s = ProjectScanner()\n for projectinfo in s.scan_all():\n projectinfo.save_receipt(needs_scan_time)", "def projects(request):\n return Project.objects.prefetch_related('task_set').filter(user=request.user)", "def getAllVcProj (self, inDEV):\n result = []\n def filterVcProj (list, dirname, names):\n for name in names:\n if name [-7:] == '.vcproj':\n fullpath = os.path.join (dirname, name)\n list.append (fullpath)\n os.path.walk (inDEV, filterVcProj, result)\n result = filter (self.isValidPattern, result)\n return result", "def list_projects():\n with BMI(_username, _password, constants.BMI_ADMIN_PROJECT) as bmi:\n ret = bmi.list_projects()\n if ret[constants.STATUS_CODE_KEY] == 200:\n table = PrettyTable(\n field_names=[\"Id\", \"Name\", \"Provision Network\"])\n projects = ret[constants.RETURN_VALUE_KEY]\n for project in projects:\n table.add_row(project)\n click.echo(table.get_string())\n else:\n click.echo(ret[constants.MESSAGE_KEY])", "def get_all_project_records():\r\n records = flask.request.db_api.get_all_project_record()\r\n return flask.jsonify(records=records)", "def project_list(cursor):\n query = \"SELECT * FROM projects\"\n try:\n cursor.execute(query, {},)\n except Exception as e:\n on_error(e)\n else:\n projects = cursor.fetchall()\n raise Return((projects, None))", "def query_project(self):\n\n # Find stylesheets.\n found = False\n for filename in self.project.namelist():\n if os.path.basename(filename) == 'styles.xml':\n found = True\n print(filename)\n if not found:\n print(\"not found!\")", "def find_projects(self):\n\n attrs = ['name', 'remote', 'revision', 'path', 'groups', 'upstream']\n projects = defaultdict(list)\n\n for project in self.tree.findall('project'):\n values = [project.get(attr) for attr in attrs]\n project_dict = dict(zip(attrs, values))\n project_name = project_dict.pop('name')\n\n if project_dict['groups'] is not None:\n project_dict['groups'] = project_dict['groups'].split(',')\n\n if project_name is None:\n if self.fail_on_invalid:\n raise InvalidManifest(\n 'Project entry missing \"name\" attribute'\n )\n else:\n continue\n\n if project_name in projects:\n paths = [\n p_attr.get('path', project_name)\n for name, p_attrs in projects.items()\n for p_attr in p_attrs if name == project_name\n ]\n\n if project_dict['path'] in paths:\n raise InvalidManifest(\n 'Duplicate project entry with matching \"name\" '\n 'and \"path\" attributes'\n )\n\n children = project.getchildren()\n if children:\n for child in children:\n subelement = child.tag\n\n # Only run the following if the element tag\n # is a string (avoids comments)\n if isinstance(subelement, str):\n subdict = getattr(\n self, 'create_{}_dict'.format(subelement)\n )(child)\n\n if subdict is not None:\n project_dict.setdefault(subelement, []).append(\n subdict\n )\n\n projects[project_name].append(\n self.generate_data_dict(project_dict)\n )\n\n self.projects = projects", "def getSubProjects(self):\n logger.debug(\"Func: getSubProjects\")\n\n return self._subProjectsList", "def index(self):\n return {'projects': [p for p in self.server.projects.values()]}", "def project_show(ctx, args):\n for project_id in args:\n data = ctx.obj.get_project_by_project_id(project_id)\n output_json_data(data)", "def do_project_list(cs, args):\n _, projects = cs.projects.list()\n fields = [\n 'project_id',\n 'name',\n 'owner_id',\n 'current_user_role_id',\n 'repo_count',\n 'creation_time',\n 'public',\n ]\n utils.print_list(projects, fields, formatters={}, sortby=args.sortby)", "def projects_settings():\n return map_settings(settings_repository.settings)", "def list_projects(ctx):\n pprint(ctx.obj.groups.get().data)", "def test_get_project_list_with_projects(self):\n # Add two test projects.\n projects = [\n add_project(title='1', description='1'),\n add_project(title='2', description='2'),\n ]\n\n result = get_project_list()\n result_projects = result['projects'].object_list\n\n # Make sure two test projects are retrieved.\n for project in projects:\n self.assertTrue(project in result_projects)\n self.assertEqual(len(result_projects), len(projects))\n self.assertIsNone(result['tag'])\n self.assertFalse(result['filtered'])", "def test_get_projects_returns_projects(fc: fetcher.Fetcher):\n projects = fc.get_projects()\n assert isinstance(projects, list)\n assert isinstance(projects[0], models.Project)", "def get_projects(selection):\n project=[]\n\n # project with keyname\n\n if 'project' in selection.facets:\n project+=selection.facets['project']\n\n\n # project without keyname\n\n # WARNING\n #\n # The code below uses sdinference and query the database to retrieve ESGF parameters.\n # Doing those things here may raise circular dependencies\n # as well as making the whole thing very complex.\n #\n # We do this to make this syntax work (i.e. project value without key)\n # synda search GeoMIP\n #\n # Note that this syntax always works (i.e. load the project level default file), even without this code.\n # synda search project=GeoMIP\n #\n #\n pending_projects=sdearlystreamutils.get_facet_values_early([selection.facets],'project',extract_item=True) # project without keyname or project as part of an identifier.\n\n li=pending_projects+project\n\n li=list(set(li)) # remove duplicate\n\n return li", "def _read_directory(self):\n self._filenames = glob.glob(self._directory + \"/*.project\")", "def get_projects(self):\n ret = self.v1_projects.get()\n return [each.metadata.name for each in ret.items]", "def test_findTwistedStyleProjects(self):\n baseDirectory = self.makeProjects((\"foo\", 2, 3, 0), (\"foo.bar\", 0, 7, 4))\n projects = findTwistedProjects(baseDirectory)\n self.assertProjectsEqual(\n projects,\n [\n Project(baseDirectory.child(\"foo\")),\n Project(baseDirectory.child(\"foo\").child(\"bar\")),\n ],\n )", "def project(self, request):\n return self._project(request, 'project')", "def projects(self):\n sql = \"\"\"SELECT project\n FROM barcodes.sample\n LEFT JOIN barcodes.project_sample_sets USING (sample_set_id)\n LEFT JOIN barcodes.project USING (project_id)\n WHERE sample_id = %s\n UNION\n SELECT project\n FROM barcodes.project_samples\n LEFT JOIN barcodes.project USING (project_id)\n WHERE sample_id = %s\n \"\"\"\n with pm.sql.TRN:\n pm.sql.TRN.add(sql, [self.id, self.id])\n projects = pm.sql.TRN.execute_fetchflatten()\n return None if not projects else projects", "def get_projects():\n data = sql.list_projects()\n names = [(d['id'], d['name']) for d in data]\n return names", "def parse_one_project(self, args, project_arg):\n project = self.linguist_worktree.get_linguist_project(project_arg, raises=True)\n return [project]", "def filter_projects():\n with open('../results/01.crawling/01.project_ci_services.json', 'r') as infile:\n projects = json.load(infile)\n tr_projects = []\n for project, value in projects.items():\n if \"GitHub\" in value or \"Travis\" in value:\n tr_projects.append(project)\n return tr_projects", "def test_findTwistedStyleProjects(self):\n baseDirectory = self.makeProjects(\n Version('foo', 2, 3, 0), Version('foo.bar', 0, 7, 4))\n projects = findTwistedProjects(baseDirectory)\n self.assertProjectsEqual(\n projects,\n [Project(baseDirectory.child('foo')),\n Project(baseDirectory.child('foo').child('bar'))])", "def _get_projects(current_project_name):\n projects = []\n\n unique_project_changes = Change.objects.order_by().values(\n 'project_name').distinct()\n for change in unique_project_changes:\n projects.append(change['project_name'])\n\n # sort alphabetically\n projects.sort()\n\n # insert 'all' option as it should be present always\n projects.insert(0, PROJECT_ALL)\n\n # if current_project_name is valid, make it the first element in list so\n # that it shows up as selected in project choice drop down\n if current_project_name != PROJECT_ALL and current_project_name in projects:\n projects.remove(current_project_name)\n projects.insert(0, current_project_name)\n elif current_project_name != PROJECT_ALL:\n logging.error(\"Currently selected project %s not found in any changes.\"\n \" Removing from list.\", current_project_name)\n logging.debug(\"Returning list of projects: %r\", projects)\n return projects", "def get_project(arn=None):\n pass", "def test_project_list(self):\n rv = self.app.get(\"/\")\n self.assertIn(\"Assignment0\", rv.data)\n self.assertIn(\"Assignment1.0\", rv.data)\n self.assertIn(\"Assignment2.0\", rv.data)", "def read_all(self):\r\n pass" ]
[ "0.7262337", "0.7262337", "0.7255745", "0.7255745", "0.7255745", "0.7112538", "0.7085046", "0.7018626", "0.70128983", "0.6937012", "0.6859167", "0.67931646", "0.67931646", "0.67831695", "0.6778349", "0.6751696", "0.6723736", "0.67085075", "0.67051655", "0.66691184", "0.66677755", "0.66299576", "0.65674734", "0.65627205", "0.65493214", "0.6533076", "0.64972013", "0.64756566", "0.6430738", "0.6412717", "0.6397353", "0.6368578", "0.6351308", "0.63415426", "0.63260525", "0.63130385", "0.63127124", "0.6309737", "0.6306162", "0.63023627", "0.6293193", "0.62898564", "0.628373", "0.6240411", "0.6226287", "0.6211344", "0.6205491", "0.6202546", "0.61943465", "0.6186949", "0.6164675", "0.6157014", "0.61496836", "0.61486226", "0.6142417", "0.6120459", "0.61026466", "0.6093286", "0.607034", "0.6055063", "0.60542786", "0.605276", "0.6047426", "0.6046009", "0.60450107", "0.60331094", "0.6029376", "0.6014959", "0.6012668", "0.6011489", "0.60057104", "0.6003364", "0.6003353", "0.5978157", "0.5977512", "0.5975524", "0.59740865", "0.5969523", "0.59671205", "0.5960949", "0.5955031", "0.594825", "0.5945483", "0.59451365", "0.5934162", "0.59289324", "0.5926126", "0.5924337", "0.59067947", "0.5898985", "0.58954567", "0.5876392", "0.58754516", "0.5874488", "0.58735293", "0.58688724", "0.58623403", "0.58619994", "0.5859464", "0.58594143", "0.58534926" ]
0.0
-1
Static method for delete a project.
def delete_stored_project(): client = RequestManager() client.set_method("DELETE") client.set_endpoint("/projects/{0}".format(STORED_ID['project_id'])) client.execute_request()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_project(project):\n with BMI(_username, _password, constants.BMI_ADMIN_PROJECT) as bmi:\n ret = bmi.delete_project(project)\n if ret[constants.STATUS_CODE_KEY] == 200:\n click.echo(\"Success\")\n else:\n click.echo(ret[constants.MESSAGE_KEY])", "def delete_project(arn=None):\n pass", "def do_project_delete(cs, args):\n key = args.project\n if cs.projects.is_id(key):\n id = key\n else:\n id = cs.projects.get_id_by_name(key)\n cs.projects.delete(id)\n print(\"Delete Project '%s' successfully.\" % key)", "def delete_project(projectname):\n response = jsonify(admin.delete_project(current_app.scoped_session(), projectname))\n return response", "def delete(self):\n _url = f\"{self.connector.base_url}/projects/{self.project_id}\"\n\n self.connector.http_call(\"delete\", _url)\n\n self.project_id = None\n self.name = None", "def delete_project(project_id):\n client = RequestManager()\n client.set_method(\"DELETE\")\n client.set_endpoint(\"/projects/{0}\".format(project_id))\n client.execute_request()", "def delete_project(\n name\n):\n\n cmd = dict()\n cmd[\"type_\"] = \"delete_project\"\n cmd[\"name_\"] = name\n\n comm.send(cmd)", "def delete_project(request, project_id):\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n project = get_object_or_404(Project, pk=project_id)\n project.delete()\n messages.success(request, 'Project deleted!')\n return redirect(reverse('portfolio'))", "def test_delete_project(self):\n pass", "def test_delete_project(self):\n pass", "def deleteProject(self, projectId):\n uri = \"/v1/projects/\" +str(projectId)\n response = self.client.delete(uri)\n return response", "def delete_project(self, project_id):\n self._run(\n url_path=\"projects/delete\",\n id=project_id,\n )\n return True", "def delete_project(request, project_id):\n\n profile = get_object_or_404(Profile, user=request.user)\n project = get_object_or_404(GameProject, pk=project_id)\n\n if not profile.is_creator:\n messages.error(request, 'Sorry, only creators can do that.')\n return redirect(reverse('home'))\n if project.owner != profile:\n messages.error(request, 'Sorry, only the project owner can do that.')\n return redirect(reverse('home'))\n\n project = get_object_or_404(GameProject, pk=project_id)\n project.delete()\n messages.success(request, 'Project deleted!')\n return redirect(reverse('all_projects'))", "def delete(self, guid):\n if helpers.authorized(self.request.params['UUID'], self.request.params['ATO'], self.request.params['action']):\n # search for the Project and delete if found\n key = db.Key.from_path('Project', int(guid))\n project = db.get(key)\n if not project == None:\n project.delete()\n self.response.set_status(204, \"Deleted\")\n else:\n self.response.set_status(404, \"Not Found\")\n else:\n self.response.set_status(401, \"Not Authorized\")", "def delete_project(self, project_id):\n return self._delete('/projects/{0}'.format(project_id))", "def delete_project(project_id):\n project = Project.query.filter_by(id=project_id).first()\n if not project:\n return {\n 'success': False,\n 'message': f\"No project with the specified id {project_id} found.\",\n }\n\n else:\n if is_project_manager(project, g.user):\n # delete related tasks\n Task.query.filter_by(project=project).delete()\n #delete related invites\n Invitation.query.filter_by(project=project).delete()\n db_session.delete(project)\n db_session.commit()\n return {\n 'success': True,\n 'result': {},\n 'message': \"Project Deleted Successfully.\",\n }", "def delete(conn, project):\n with conn:\n c = conn.cursor()\n c.execute(\"DELETE FROM projects WHERE project =?\", (project,))", "def delete_project(self, project_name):\n # type(project_name) == unicode\n project = self.db.get_project_by_name(project_name)\n if not project:\n print(u\"*** Error: The project '{}' was not found.\"\n \"\".format(project_name))\n return\n print('Caution! The related tracking will be deleted as well.{eol}'\n 'Do you really want to delete the project? [y/N] '\n ''.format(eol=os.linesep), end='')\n if not helpers.get_yes_no(default='n'):\n return\n self.db.delete_project_by_name(project_name)\n print(u\"The project '%s' has been deleted.\" % project_name)\n self.set_prompt()", "def destroy(self, request, pk=None):\n try:\n project = Project.objects.get(pk=pk)\n project.delete()\n\n return Response({}, status=status.HTTP_204_NO_CONTENT)\n\n except Project.DoesNotExist as ex:\n return Response({'message': ex.args[0]}, status=status.HTTP_404_NOT_FOUND)\n\n except Exception as ex:\n return Response({'message': ex.args[0]}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "def delete_project(self, project_id):\n _url = f\"{self.base_url}/projects/{project_id}\"\n self.http_call(\"delete\", _url)\n return", "def delete_project(project_id):\n \n project = mongo.db.projects\n project.delete_one({'_id': ObjectId(project_id)})\n flash('Your project has been deleted.', 'success')\n return redirect(url_for('projects'))", "def project_delete(cursor, project):\n haystack = (project['_id'], )\n\n query = \"DELETE FROM projects WHERE _id=?\"\n try:\n cursor.execute(query, haystack)\n except Exception as e:\n on_error(e)\n\n query = \"DELETE FROM namespaces WHERE project_id=?\"\n try:\n cursor.execute(query, haystack)\n except Exception as e:\n on_error(e)\n else:\n cursor.connection.commit()\n raise Return((True, None))", "def delete(self, oid):\n path = '/projects/%s' % oid\n res = self.client.call(path, 'DELETE', data='', token=self.manager.identity.token)\n self.logger.debug('Delete openstack project: %s' % truncate(res))\n return True", "def test_projects_delete(self):\n project = Project()\n response = self.client.open('/project-tracker/projects',\n method='DELETE',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def delete_project(proj_id):\n project_obj = Project.objects.get(id=proj_id)\n print('Deleting project the fastq files within the project: ', project_obj.description)\n\n description = project_obj.description.replace(' ', '') # remove any space in the project name\n project_dir = 'documents/%s/%s' % (str(project_obj.date.date()), description)\n shutil.rmtree(project_dir, ignore_errors=True)\n print(\"Files deleted.\")", "def delete(self, request, p_name):\n project = Project.objects.get(name=p_name)\n connectors = project.connector_set.all()\n connectors.delete()\n if os.path.isfile(project.project_location):\n os.remove(project.project_location)\n project.delete()\n return HttpResponse(HTTPStatus.OK)", "def delete_project(self, project_name, check=True):\n page_projects = self._page_projects()\n\n with page_projects.table_projects.row(\n name=project_name).dropdown_menu as menu:\n menu.button_toggle.click()\n menu.item_delete.click()\n\n page_projects.form_delete_project_confirm.submit()\n\n if check:\n self.close_notification('success')\n page_projects.table_projects.row(\n name=project_name).wait_for_absence()", "def delete_project_view(request, id):\n\n # retrieve the project to be deleted through his id. Raise an error if the project does not exist\n project = get_object_or_404(Projet, id=id)\n\n # Check if the logged in user is allowed to delete this project\n if request.user.has_perm('taskmanager.{}_project_permission'.format(project.id)):\n # Eventually delete the project\n project.delete()\n\n return redirect(\"projects\")", "def delete_project(request, project_id):\n try:\n project = Project.objects.get(pk=project_id)\n except Project.DoesNotExist:\n raise Http404(\"Project does not exist\")\n # check whether the user is the one who created this project\n if project.user.email != request.session['email']:\n return HttpResponseRedirect('/projects/' + str(project_id))\n else:\n if request.method == \"POST\":\n if project:\n project.delete()\n return HttpResponseRedirect('/projects/')\n else:\n return render(request, 'projects/delete_project.html',\n {'project': project})\n return render(request, 'projects/delete_project.html', {'project': project})", "def delete_project(self, name=None, delete_dir=False):\n victim = name or self.current\n if victim not in self:\n raise ValueError(\"{} is not a project\".format(victim))\n\n if len(self) == 1:\n raise ValueError(\"Can't delete only remaining project\")\n\n ProjectDataset.delete().where(ProjectDataset.name == victim).execute()\n\n if delete_dir:\n dir_path = self._base_data_dir / safe_filename(victim)\n assert dir_path.is_dir(), \"Can't find project directory\"\n shutil.rmtree(dir_path)\n\n if name is None or name == self.current:\n if \"default\" in self:\n self.set_current(\"default\")\n else:\n self.set_current(next(iter(self)).name)\n return self.current", "def post_project_delete(self, resource_id, resource_dict):\n pass", "def delete(self, project_id):\n try:\n authenticated_user_id = token_auth.current_user()\n if not ProjectAdminService.is_user_action_permitted_on_project(\n authenticated_user_id, project_id\n ):\n raise ValueError()\n except ValueError:\n return {\n \"Error\": \"User is not a manager of the project\",\n \"SubCode\": \"UserPermissionError\",\n }, 403\n\n try:\n ProjectAdminService.delete_project(project_id, authenticated_user_id)\n return {\"Success\": \"Project deleted\"}, 200\n except ProjectAdminServiceError as e:\n return {\"Error\": str(e).split(\"-\")[1], \"SubCode\": str(e).split(\"-\")[0]}, 403", "def test_projects_id_delete(self):\n response = self.client.open('/project-tracker/projects/{id}'.format(id=3.4),\n method='DELETE')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def delete(\n self, url: str\n ) -> pymongo.results.DeleteResult:\n return self._mongo.delete({\n 'url': url\n },\n 'projects'\n )", "def delete_project(id):\n result = delete_project_to_db(id)\n flash(result)\n return redirect(url_for(\"portfolio\"))", "def test_delete_project(self):\n self.assertEqual(Project.objects.count(), 1)\n self.assertEqual(Group.objects.count(), 2)\n\n delete_project(Project.objects.get(name=\"project A\"))\n\n self.assertEqual(Project.objects.count(), 0)\n self.assertEqual(Group.objects.count(), 0)", "def delete_remote_project(profile, project):\n return delete_remote_project_worker.delay(profile_id=profile.id,\n project_id=project.id)", "def delete(self, *args, **kwargs):\n if 'user' not in kwargs or not args:\n self.raise401()\n\n user = kwargs['user']\n path = parse_path(args[0])\n project = Project.objects(name=path[0], members__in=[user])\n if not project:\n self.raise401()\n try:\n project.delete()\n self.set_status(204)\n self.finish()\n except Exception as e:\n reason = e.message\n self.raise400(reason=reason)", "def delete_keystone_v3_project(self, project_id, domain_id):\n LOG_OBJ.debug(\"Disable the project.\")\n kwargs = {\"project_id\": project_id, \"enabled\": False}\n self.set_keystone_v3_project(**kwargs)\n\n LOG_OBJ.debug(\"Deleting the project.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/projects/\" + \\\n str(project_id)\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n _body = None\n response = self.request(\"DELETE\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while deleting the project\")\n print (\"No response from Server while deleting the project\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\" Deleting project Failed with status %s \"\n \"and error : %s\" % (response.status, response.data))\n print (\" Deleting project Failed with status %s and error : %s\" %\n (response.status, response.data))\n return response.status\n\n return True", "def es_delete(project=None):\n if project is not None:\n script_indexer.delete_project(project)\n else:\n script_indexer.delete_all()", "def test_remove_project(self):\n pass", "def delete_project(self):\n tasks = self._get_all_tasks()\n\n task_id = tasks[self.tasks_view.currentRow()].Id\n task_status = tasks[self.tasks_view.currentRow()].Status\n\n if task_status is 0:\n warning = Warning(\n \"<html><head/><body><p align=\\\"center\\\"><span style=\\\" font-weight:600;\\\">\"\n \"Unable delete Task. \"\n \"Make sure the Task is Done\"\n \"</span></p></body></html>\"\n )\n warning.exec_()\n else:\n self.tasks_flow.delete_task(task_id)\n self.write_tasks_table()", "def destroy(config, args):\n log = logging.getLogger('kraftwerk.destroy')\n if confirm(\"Remove project %s from node %s along with all services and data?\" % \n (args.project.name, args.node.hostname)):\n args.node.ssh(config.template(\"scripts/project_destroy.sh\", project=args.project))\n print \"Project %s removed from node %s\" % \\\n (args.project.name, args.node.hostname )\n for service in args.project.services(args.node):\n args.node.ssh(service.destroy_script)", "def project_delete_event(self, proj_info):\n\n LOG.debug(\"Processing project_delete_event...\")\n proj_id = proj_info.get('resource_info')\n proj_name = self.get_project_name(proj_id)\n if proj_name:\n try:\n self.dcnm_client.delete_project(proj_name,\n self.cfg.dcnm.\n default_partition_name)\n except dexc.DfaClientRequestFailed:\n # Failed to delete project in DCNM.\n # Save the info and mark it as failure and retry it later.\n LOG.error(_LE(\"Failed to create project %s on DCNM.\"),\n proj_name)\n self.update_project_info_cache(proj_id, name=proj_name,\n opcode='delete',\n result=constants.DELETE_FAIL)\n else:\n self.update_project_info_cache(proj_id, opcode='delete')\n LOG.debug('Deleted project:%s', proj_name)\n self.project_delete_notif(proj_id, proj_name)", "def pre_project_delete(self, resource_id):\n pass", "def delete(self, team_id, project_id):\n try:\n if not ProjectAdminService.is_user_action_permitted_on_project(\n token_auth.current_user, project_id\n ):\n raise ValueError()\n TeamService.delete_team_project(team_id, project_id)\n return {\"Success\": True}, 200\n except ValueError:\n return {\n \"Error\": \"User is not a manager of the project\",\n \"SubCode\": \"UserPermissionError\",\n }, 403", "def delete(self, name, project=None):\n qlist = self._list(project)\n key = self._queue(project, name)\n self._db.delete(key)\n self._db.zremrangebyscore(qlist, -1, 1)", "def delete_namespaced_project(self, name, **kwargs):\n\n all_params = ['name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_namespaced_project\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `delete_namespaced_project`\")\n\n resource_path = '/oapi/v1/projects/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='UnversionedStatus',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def remove_single_project(project_name):\n p = subprocess.Popen('rm -rf {}/{}'.format(context.__PROJECTS_PATH__, project_name), shell=True)\n p.wait()", "def delete(self, guid):\n if helpers.authorized(self.request.params['UUID'], self.request.params['ATO'], self.request.params['action']):\n # search for the Project and delete if found\n key = db.Key.from_path('Task', int(guid))\n task = db.get(key)\n wantsNotifications = {\"true\": True, \"false\": False}.get(self.request.params['notify'].lower())\n currentUserId = self.request.params['UUID']\n cukey = db.Key.from_path('User', int(currentUserId))\n user = db.get(cukey)\n if not task == None:\n # cache current values before updates\n taskName = task.name\n taskType = task.type\n taskPriority = task.priority\n taskStatus = task.developmentStatus\n taskValidation = task.validation\n taskSubmitterId = task.submitterId\n taskAssigneeId = task.assigneeId\n taskEffort = task.effort\n taskProjectId = task.projectId\n taskDescription = task.description\n # Push notification email on the queue if we need to notify\n if notification.should_notify(currentUserId,task,\"deleteTask\",wantsNotifications):\n taskqueue.add(url='/mailer', params={'taskId': int(guid), 'currentUUID': self.request.params['UUID'], 'action': \"deleteTask\", 'name': taskName, 'type': taskType, 'priority': taskPriority, 'status': taskStatus, 'validation': taskValidation, 'submitterId': taskSubmitterId, 'assigneeId': taskAssigneeId, 'effort': taskEffort, 'projectId': taskProjectId, 'description': taskDescription})\n task.delete()\n self.response.set_status(204, \"Deleted\")\n else:\n self.response.set_status(404, \"Not Found\")\n else:\n self.response.set_status(401, \"Not Authorized\")", "def delete_api(self, project_id, api_id, hbase_manager=None):\n\n self._project_service.get_nondeleted_project(project_id)\n\n proj_api_col = \"apis:{0}\".format(api_id)\n project_id = bytes(project_id, self._charset)\n api_id = bytes(api_id, self._charset)\n\n self.get_nondeleted_api(project_id, api_id)\n\n connection = hbase_manager.connection\n api_tbl = connection.table(\"apis\")\n projects_tbl = connection.table(\"projects\")\n\n projects_tbl.delete(project_id, [bytes(proj_api_col, self._charset)])\n api_tbl.put(api_id, {\n b\"attrs:state\": b\"deleted\"\n })", "def delete(self, project_id):\n project_model = ProjectDBModel.query.get(project_id)\n if not project_model:\n ns.abort(404, status=PROJECT_NOT_FOUND_ERROR)\n try:\n data = request.get_json()\n # cambiarlo cuando se vuelva a tener dos PKs\n deleted = FavoritesProjectDBModel.delete(\n data['user_id'], project_id)\n if deleted:\n users = \\\n FavoritesProjectDBModel.get_favorites_of_project_id(\n project_id)\n response_object = {\n \"project_id\": project_id,\n \"users_id\": users,\n }\n return response_object, 200\n else:\n ns.abort(404, status=PROJECT_NOT_FOUND_ERROR)\n except KeyError:\n ns.abort(404, status=MISSING_VALUES_ERROR)", "def delete_project_file(self, project=None):\n if type(project) is not Project:\n return False\n\n path = self.data_path + self.project_dir\n\n # generate filenames\n filename = path + '/' + self.us(project.project_id()) + '.flproject'\n\n # check if the file exists and delete it\n if os.path.isfile(filename):\n os.remove(filename)\n return True\n else:\n return False", "async def delete(self, ctx, project_name: str) -> None:\n if not ctx.projects.find_project(project_name):\n channel = discord.utils.get(\n ctx.guild.channels, name=f\"{project_name}-project\")\n\n if channel and channel.category.name == \"Flux Projects\":\n if ctx.author.permissions_in(channel).manage_channels:\n message = await ctx.send(\"That project doesn't appear to\"\n \" exist in my database, but the \"\n \"channel still exists. \"\n \"Would you like to delete it?\")\n yes = \"<:greenTick:596576670815879169>\"\n no = \"<:redTick:596576672149667840>\"\n await message.add_reaction(yes)\n await message.add_reaction(no)\n reaction, user = await ctx.bot.wait_for(\n \"reaction_add\",\n check=lambda reaction, user: (user == ctx.author) and\n (str(reaction.emoji) == yes or no) and\n (reaction.message.channel == ctx.channel)\n )\n if reaction.emoji.id == ctx.bot.config.tick_yes:\n await channel.delete(reason=\"Project not found.\")\n await ctx.send(\"The channel was deleted sucessfully.\")\n return\n\n elif reaction.emoji.id == ctx.bot.config.tick_no:\n await ctx.send(\"Not deleting the channel.\")\n return\n\n else: # If author doesn't have access to deleting channels.\n await ctx.send(\"That project does not appear to be in my \"\n \"database, but the channel for it still \"\n \"exists. Please have someone with\"\n \" manage channels run this chommand.\"\n )\n return\n else:\n await ctx.send(\"I could not find this project.\")\n return\n\n if str(ctx.author.id) != ctx.projects.find_project(project_name).get(\n \"owner\"):\n await ctx.send(\"Only the project owner \"\n \"can delete this project.\")\n return\n message = await ctx.send(\"This action __cannot__ be undone. \"\n \"Once you do this, everything is gone. \"\n \"Are you sure you want to continue?\")\n yes = \"<:greenTick:596576670815879169>\"\n no = \"<:redTick:596576672149667840>\"\n await message.add_reaction(yes)\n await message.add_reaction(no)\n reaction, user = await ctx.bot.wait_for(\n \"reaction_add\", check=lambda reaction, user:\n (user == ctx.author) and\n (str(reaction.emoji) == yes or no) and\n (reaction.message.channel == ctx.channel)\n )\n if reaction.emoji.id == ctx.bot.config.tick_yes:\n channel = ctx.projects.find_project(\n project_name).get(\"channel\")\n channel = discord.utils.get(ctx.guild.channels,\n id=int(channel))\n ctx.projects.delete_project(project_name)\n if channel:\n await channel.delete(reason=\"Project deleted.\")\n await ctx.send(\"The project has been deleted.\")\n elif reaction.emoji.id == ctx.bot.config.tick_no:\n await ctx.send(\"Not deleting the project.\")", "def delete(self):\n return self.client._perform_empty(\n \"DELETE\", \"/project-folders/%s\" % self.project_folder_id)", "def delete(self):\r\n delete_tracks(self.project, [self])", "def project_post_delete(sender, instance, **kwargs):\n instance.url.delete(False)", "def remove_project(project_id):\n response_object = {'status': 'success'}\n with database.engine.begin() as connection:\n\n stmt = select([models.projects.c.path]).where(\n models.projects.c.project_id == project_id)\n project = connection.execute(stmt).first()\n\n if project:\n app = flask.current_app\n project_path = os.path.join(\n app.root_path, app.config['DATA_DIRECTORY'], project['path'])\n if 'morphocut' in project_path and app.config['DATA_DIRECTORY'] in project_path:\n print('removing project with id {}'.format(project_id))\n if os.path.exists(project_path):\n helpers.remove_directory(project_path)\n\n stmt = models.projects.delete().where( # pylint: disable=no-value-for-parameter\n models.projects.c.project_id == project_id)\n\n connection.execute(stmt)\n\n return jsonify(response_object)", "def delete_project_by_name(self, project_name):\n with self._transaction.cursor() as cur:\n # delete associations between this project and any barcodes\n cur.execute(\"DELETE FROM barcodes.project_barcode \"\n \"WHERE project_id in (\"\n \"SELECT project_id FROM barcodes.project \"\n \"WHERE project = %s)\",\n (project_name,))\n\n # now delete the project itself\n cur.execute(\"DELETE FROM barcodes.project WHERE project = %s\",\n (project_name,))\n return cur.rowcount == 1", "def _delete_dm_project(self, project_uuid: str) -> None:\n _LOGGER.warning('Deleting DM Project %s...', project_uuid)\n\n dm_rv: DmApiRv = DmApi.delete_project(self.__org_owner_dm_token,\n project_id=project_uuid)\n if not dm_rv.success:\n _LOGGER.error('Failed to delete DM Project %s', project_uuid)\n return\n\n _LOGGER.warning('Deleted DM Project %s', project_uuid)", "def delete(project, zone, instance):\n print >>sys.stderr, 'WARNING: duplicated jobs may fail/corrupt results'\n print >>sys.stderr, ('TODO(fejta): See http://stackoverflow.com/'\n 'questions/19645430/changing-jenkins-build-number')\n answer = raw_input('Delete %s [yes/NO]: ')\n if not answer or answer != 'yes':\n print >>sys.stderr, 'aborting'\n sys.exit(1)\n gcloud(\n project,\n 'compute',\n 'instances',\n 'delete',\n '--zone=%s' % zone,\n instance,\n )\n gcloud(\n project,\n 'compute',\n 'disks',\n 'delete',\n '--zone=%s' % zone,\n *get_disks(instance))", "def delete_orphan_project(apps, schema_editor):\n Project = apps.get_model('data_manager', 'Project')\n Project.objects.filter(dataset__isnull=True).delete()\n return", "def delete(ctx: click.Context, repository_path):\n root_commands.cmd_delete(ctx.obj, repository_path)", "def delete_project_quotas(self, project_id, extra_headers=None,\n use_auth=True, user_name=None):\n resp = self.client.delete('project-quotas/' + project_id,\n extra_headers=extra_headers,\n use_auth=use_auth, user_name=user_name)\n return resp", "def DelProject(projname):\n\tif projname == \"\" or projname == None:\n\t\tpjnm = raw_input(\"\\nNombre del proyecto: \").lower()\n\t\tif pjnm == \"\" or pjnm == None:\n\t\t\tcancel()\n\telse:\n\t\t# Proceso para borrar todo el proyecto\n\t\tpass\n\n\tpa = open(\"author_name.txt\", \"r\")\t#Abre el archivo con el nombre del autor\n\tpa.read()\n\tpc = open(\"project_code.txt\", \"r\")\t#Abre el archivo con el codigo de proyecto\n\tpc.read()\n\n\tuserpa = raw_input(\"Ingrese el nombre del autor: \").lower()\n\tuserpc = raw_input(\"Ingrese el codigo del proyecto: \").lower()\n\n\tif userpa == pa and userpc == pc:\t#Se verifica que userpa(nombre del autor por el usuario) sea igual a pa(nombre original del autor) y lo mismo con el codigo del proyecto\n\t\tprint \"Iniciando el Borrado del Proyecto...\"\n\t\tpcommands.del_project()\n\t\tprint \"El proyecto se ha borrado con exito!\"\n\telse:\n\t\tprint \"El codigo del proyecto o el nombre del autor no es correcto.\"\n\t\tcancel()", "def delete_all_projects():\n client = RequestManager()\n client.set_method(\"GET\")\n client.set_endpoint(\"/projects\")\n response = client.execute_request()\n for project in response.json():\n try:\n ProjectHelper.delete_project(project[\"id\"])\n except TypeError:\n LOGGER.info(project)", "def delete(self, endpoint, params=None):\n params = params or dict()\n return self.request(verb=requests.delete, address=self.project_address + endpoint,\n params=params)", "def delete(self):\n return self.client._perform_empty(\n \"DELETE\", \"/projects/%s/managedfolders/%s\" % (self.project_key, self.odb_id))", "def command_delete(self):\n if self.gcp_env.project not in SUPPORTED_PROJECT_CONFIGS.keys():\n _logger.error(f'Project config not supported {self.gcp_env.project}')\n return 1\n\n if not self.args.bucket and not self.args.id:\n _logger.error(\"--bucket and --id required for delete.\")\n return 1\n\n # Get notification\n client = storage.Client()\n bucket = client.get_bucket(self.args.bucket)\n target = bucket.get_notification(self.args.id, client)\n\n if self.gcp_env.project != target.topic_project:\n _logger.error(\"Notification project and specified project do not match.\")\n return 1\n\n # Delete the notification\n try:\n target.delete(client=client)\n\n except NotFound:\n _logger.error(f\"Notification ID {self.args.id} not found.\")\n return 1\n\n _logger.info(f\"Notification id {self.args.id} has been deleted.\")\n\n _logger.info(\"Removing notification from config...\")\n self.delete_notification_from_config()\n\n return 0", "def delete(self, *args, **kwargs):\n self.request(\"delete\", *args, **kwargs)", "def _delete(self, *args, **kwargs):\n return self._request('delete', *args, **kwargs)", "def test_projects_id_contacts_delete(self):\n project = Contact()\n response = self.client.open('/project-tracker/projects/{id}/contacts'.format(id=56),\n method='DELETE',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def cmd_apps__destroy(args):\n \n if args.name is None and in_git_repo():\n args.name = _get_current_project_name()\n\n if args.name is None:\n print \"Please provide a project name.\"\n sys.exit(1)\n\n print \"Destroying project %s...\" % args.name\n remote.destroy_project(args.name)\n print \"Project %s destroyed.\" % args.name\n if in_git_repo() and _get_current_project_name() == args.name:\n git(None, 'remote', 'rm', 'tinyserv')\n print \"Removed remote '%s'.\" % args.name", "def delete():", "def delete(self, application_id):", "def _on_del_project(self):\n project = self.ddnCurProject.get()\n# if len(project) > 0:\n if project:\n if '.prj'!= project[-4:]:\n project += '.prj'\n if os.path.exists(self.BibTerm + '/'+ project):\n os.remove(self.BibTerm + '/'+ project)\n self.list_projects = [f.rstrip('.prj') \\\n for f in os.listdir(self.BibTerm) \\\n if f.endswith('.prj')]\n self.ddnCurProject['values'] = self.list_projects\n# if len(self.list_projects) > 0:\n if self.list_projects:\n self.ddnCurProject.set(self.list_projects[0])\n else:\n self.ddnCurProject.set('')\n pass", "def delete(self, *args, **kw):\n kw['method'] = 'DELETE'\n return self.open(*args, **kw)", "def delete_language_from_project(self, project_id, language_code):\n self._run(\n url_path=\"languages/delete\",\n id=project_id,\n language=language_code\n )\n return True", "def record_destroy_for_project(project_id):\n session = get_session()\n with session.begin():\n session.query(models.ProjectAccountRecord).\\\n filter_by(project_id=project_id).\\\n update({'deleted': True,\n 'deleted_at': datetime.datetime.utcnow(),\n 'updated_at': datetime.datetime.utcnow()})", "def remove_deleted_project(self, project_id):\n self.db.make_query(\n '''\n DELETE FROM deleted_project WHERE project_id = \"{}\";\n '''.format(project_id)\n )\n\n if self.get_deleted_project(project_id):\n return False\n\n return True", "def delete(self):\n\n headers = self._default_headers()\n\n return self._request(self.name,\n ok_status=None,\n data=None,\n headers=headers,\n method=\"DELETE\")", "def remove_project(self, project_id):\n project_file_path = '{}/{}'.format(self._storage_location, project_id)\n if os.path.exists(project_file_path):\n os.remove(project_file_path)\n else:\n raise ValueError('The project id {} does not exist!'.format(project_id))", "def test_projects_id_comments_delete(self):\n project = Comment()\n response = self.client.open('/project-tracker/projects/{id}/comments'.format(id=56),\n method='DELETE',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "async def delete_user_byid(*_):\n return web.Response(text=\"PUT project not implemented\", status=501)", "def delete(self, *args, **kwargs):\n pass", "def delete(self, *args, **kwargs):\n pass", "def userproject_post_delete(sender, instance, **kwargs):\n instance.document.delete(False)", "def delete(repo):\n print('Repo: %s' % repo)\n print('Deleted')", "def delete(project_id, integration_id):\n IntegrationService.delete(project_id, integration_id)\n\n return {\"status\": \"deleted\"}, 200", "def delete():\n run('rm -r {}'.format(utils.home('apps', env.PROJECT_NAME)))", "def delete_record(request, slug, pk):\n # Try except to make sure the user is a member of this project\n try:\n ProjectMember.objects.get(user=request.user, project=Project.objects.get(slug=slug))\n except ObjectDoesNotExist:\n # User is not a member\n return HttpResponse(\"You're trying to access a project you're not a member of or a project that does not exist.\")\n else:\n # User is a member\n pm = ProjectMember.objects.get(user=request.user, project=Project.objects.get(slug=slug))\n # Access control.. if not owner or editor - access denied.\n if pm.is_owner or pm.is_editor:\n # User has access\n record = get_object_or_404(models.Record, pk=pk)\n # Delete record\n models.Record.objects.filter(project=get_object_or_404(models.Project, slug=slug), pk=pk).delete()\n # Send user back to project detail, the overview of all records in the project.\n return redirect('projects:single', slug=slug)\n else:\n # Access denied...\n return HttpResponse(\"You don't have the permission to do this\")", "def delete(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"delete\"), kwargs)", "def delete(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"delete\"), kwargs)", "def delete_bucket_from_project(projectname, bucketname):\n return jsonify(\n admin.delete_bucket_on_project(\n current_app.scoped_session(), projectname, bucketname\n )\n )", "def delete(self, *args, **kwargs) -> Any:\n pass", "def delete_project_entities(cls, project_id,\n suppress_exception=False,\n session=None):\n cls.db_repo.delete_project_entities(\n project_id, suppress_exception=suppress_exception, session=session)", "def delete():\n # Must be logged in to perform any delete commands.\n auth_required()\n pass", "def delete(self, api_path, *args, **kwargs):\n\n\t\treturn self._do_operation(u'delete', api_path, *args, **kwargs)", "def test_Projects_CanBeCreatedAndDeleted_Successfully(self):\n\n name = \"Test Project \"\n # Test create a new project successfully\n project = self.api.create_project(name, \"Py\")\n self.assertTrue(project['success'])\n self.assertTrue(project['projects'][0]['projectId'] > 0)\n self.assertTrue(project['projects'][0]['name'] == name)\n\n # Delete the project\n deleteProject = self.api.delete_project(project['projects'][0]['projectId'])\n self.assertTrue(deleteProject['success'])\n\n # Make sure the project is really deleted\n projectList = self.api.list_projects()\n self.assertFalse(any(project['projects'][0]['projectId'] == projectList['projects'][i]['projectId']\n for i in range(len(projectList['projects']))))", "def delete(self, *args, **kwargs):\n return self.handle_delete_request()" ]
[ "0.8127805", "0.8062877", "0.802956", "0.7817713", "0.7729044", "0.7698603", "0.766216", "0.7631897", "0.7615524", "0.7615524", "0.7582872", "0.7567336", "0.7534077", "0.7488375", "0.73751706", "0.73750573", "0.7347409", "0.7338061", "0.73248214", "0.7316373", "0.73060185", "0.72965384", "0.726874", "0.7259552", "0.7214621", "0.71254116", "0.7117865", "0.7096892", "0.70704055", "0.70005244", "0.6996522", "0.6993252", "0.6988129", "0.6954122", "0.69425964", "0.69133276", "0.69070077", "0.6868677", "0.6782314", "0.67599183", "0.67256665", "0.6643733", "0.6642487", "0.66032606", "0.6600892", "0.6588286", "0.6587878", "0.65749705", "0.6571652", "0.6465736", "0.6437227", "0.6383195", "0.63188666", "0.6316665", "0.63126683", "0.63010097", "0.6266155", "0.6234201", "0.62318283", "0.62032133", "0.6195249", "0.61637163", "0.6159762", "0.6148217", "0.6122427", "0.6091299", "0.6069954", "0.60464305", "0.60276425", "0.60156506", "0.59977347", "0.5990414", "0.59567595", "0.59464866", "0.5943301", "0.59403336", "0.59358454", "0.59080243", "0.5872413", "0.58704764", "0.58660847", "0.5863077", "0.58542436", "0.58480847", "0.5836415", "0.5836415", "0.58215916", "0.5806642", "0.58037794", "0.58031696", "0.579138", "0.57824296", "0.57824296", "0.5764153", "0.576068", "0.57511276", "0.5711893", "0.5704863", "0.57022804", "0.5691741" ]
0.7856347
3
Decorator that returns 403 status if user isn't logged in instead of redirecting to the LOGIN_URL
def login_required_403(view): @wraps(view) def dec_view(request, *args, **kwargs): if not request.user.is_authenticated(): return JsonResponse({"detail": "You have to log in"}, status=403) return view(request, *args, **kwargs) return dec_view
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def login_required(f):\n @functools.wraps(f)\n def wrap(*args, **kwargs):\n if not user_session.is_auth:\n raise Forbidden()\n return f(*args, **kwargs)\n return wrap", "def not_authenticated(func):\n def decorated(request, *args, **kwargs):\n if request.user.is_authenticated():\n return HttpResponseRedirect(get_next_url(request))\n return func(request, *args, **kwargs)\n return decorated", "def login_required(func):\n\t@wraps(func)\n\tdef decorated_view(*args, **kwargs):\n\t\tif not users.get_current_user():\n\t\t\treturn redirect(users.create_login_url(request.url))\n\t\treturn func(*args, **kwargs)\n\treturn decorated_view", "def require_login(func):\n @wraps(func)\n def wrapper(*args, **kwrds):\n try:\n user = get_user()\n if user:\n setattr(g, 'user', user)\n return func(*args, **kwrds)\n else:\n url = url_for('auth.login', redir=request.url)\n return redirect(url)\n except:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n log = app_logger()\n log.warning(\"Unexpected error: %s\", exc_value)\n log.error(''.join(traceback.format_exception(\n exc_type, exc_value, exc_traceback\n )))\n return abort(500)\n\n return wrapper", "def not_logged_in(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if 'idToken' in session:\n return redirect(url_for('index'))\n else:\n return f(*args, **kwargs)\n return decorated_function", "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if 'user_id' not in login_session:\n flash('User not allowed to access')\n return redirect('/login')\n return f(*args, **kwargs)\n return decorated_function", "def login_required(func):\n @wraps(func)\n def decorated_view(*args, **kwargs):\n current = users.get_current_user()\n if not current:\n return redirect(users.create_login_url(request.url))\n elif current.email() == '[email protected]':\n return func(*args, **kwargs)\n else:\n return redirect(users.create_logout_url(request.url))\n return decorated_view", "def login_required(func):\n @wraps(func)\n def f(*args, **kwargs):\n if g.user is None:\n app.logger.info('redirecting not logged in user')\n return redirect(url_for('index'))\n elif not g.user.initialized and f.__name__ not in ['profile_create','logout']:\n return redirect(url_for('profile_create'))\n else:\n return func(*args, **kwargs)\n return f", "def user_login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if not current_user.is_authenticated:\n # Flash warning that user login is required\n flash(\"Logged in user only.\", category=\"error\")\n # Return redirect to login\n return redirect(url_for('main.login'))\n if current_user.banned:\n # Log user out so they can't access their account\n logout_user()\n # Flash warning that user has been banned\n flash(\"You have been banned, please contact an admin.\",\n category=\"error\")\n # Return redirect to index\n return redirect(url_for('main.index'))\n return f(*args, **kwargs)\n return decorated_function", "def unauthorized_handler(self):\n return flask.redirect(\"/login\")", "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(url_for(\"index\"))\n return f(*args, **kwargs)\n return decorated_function", "def require_logged_in():\n def handler(f, *args, **kwargs):\n if args[0].current_user is not None:\n return f(*args, **kwargs)\n else:\n raise HTTPFound(args[0].route_url('user.login', _query={'redirect': encode_route(args[0])}))\n return decorator(handler)", "def login_required(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n if g.user is None:\n flash('You have to log in first')\n return redirect(url_for('authentication.login', next=url_for(request.endpoint)))\n return func(*args, **kwargs)\n return wrapper", "def admin_required(func):\n @wraps(func)\n def decorated_view(*args, **kwargs):\n if users.get_current_user():\n if not users.is_current_user_admin():\n abort(401) # Unauthorized\n return func(*args, **kwargs)\n return redirect(users.create_login_url(request.url))\n return decorated_view", "def login_required(f):\n\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if g.USER is None:\n return redirect(\n url_for(\"home\", force_login=True, next=request.url)\n )\n\n return f(*args, **kwargs)\n\n return decorated_function", "def login_required(func):\n @wraps(func)\n def decorated_view(*args, **kwargs):\n if current_app.login_manager._login_disabled:\n return func(*args, **kwargs)\n elif not current_user.is_authenticated and not current_user.is_active:\n return current_app.login_manager.unauthorized()\n return func(*args, **kwargs)\n\n return decorated_view", "def login_required(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n if 'username' not in login_session:\n return redirect('login')\n else:\n return func(*args, **kwargs)\n return wrapper", "def anonymous_required(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n if g.user:\n flash('You have been already logged in')\n return redirect(url_for('view.index'))\n return func(*args, **kwargs)\n return wrapper", "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n try:\n if session['user_id']:\n pass\n except KeyError:\n return redirect(url_for('users.login', next=request.url))\n return f(*args, **kwargs)\n return decorated_function", "def login_required(function):\n\n @wraps(function)\n def wrapper(self, *args, **kw):\n \"\"\"Redirect to main if user doesn't logged in. \"\"\"\n\n if not self.user:\n return self.redirect('/blog/login')\n return function(self, *args, **kw)\n return wrapper", "def login_required(func):\n @wraps(func)\n def wrapped(*args, **kwargs):\n if not session.get('logged_in', False):\n abort(401)\n return func(*args, **kwargs)\n return wrapped", "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n # checks is user login\n if session.get(\"user_id\") is None:\n return redirect(url_for(\"login\", next=request.url))\n return f(*args, **kwargs)\n return decorated_function", "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if 'username' in login_session:\n return f(*args, **kwargs)\n else:\n flash(\"You are not allowed to access there\")\n return redirect('/login')\n return decorated_function", "def admin_required(func):\n\t@wraps(func)\n\tdef decorated_view(*args, **kwargs):\n\t\tif users.get_current_user():\n\t\t\tif not users.is_current_user_admin():\n\t\t\t\tabort(401) # Unauthorized\n\t\t\treturn func(*args, **kwargs)\n\t\treturn redirect(users.create_login_url(request.url))\n\treturn decorated_view", "def decorated_function(*args, **kwargs):\n if not session.get('username'):\n flash('Login to continue', 'warning')\n return redirect(url_for('sign_in', next=request.url))\n return func(*args, **kwargs)", "def require_user(func):\n\n @wraps(func)\n def decorator(*args, **kwargs):\n if not g.user:\n # flash('此操作需要登录账户')\n return render_template('account/error.html', error='抱歉,您无权进行该操作!')\n # return redirect(url_for('site.login'))\n return func(*args, **kwargs)\n\n return decorator", "def login_required(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n if 'username' in session:\n return func(*args, **kwargs)\n else:\n flash(\"You must be logged in to access that page.\", 'danger')\n return redirect(url_for('login'))\n return wrapper", "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/\")\n return f(*args, **kwargs)\n return decorated_function", "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/\")\n return f(*args, **kwargs)\n return decorated_function", "def login_required(f):\r\n @wraps(f)\r\n def wrap(*args, **kwargs):\r\n if \"logged_in\" in session:\r\n return f(*args, **kwargs)\r\n return redirect(\"/user/login\")\r\n return wrap", "def login_required(func):\n @wraps(func)\n def decorated_function(*args, **kwargs):\n \"\"\" Modified descriprition of the decorated function \"\"\"\n if not session.get('username'):\n flash('Login to continue', 'warning')\n return redirect(url_for('sign_in', next=request.url))\n return func(*args, **kwargs)\n return decorated_function", "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if 'username' not in login_session:\n flash('You need to be logged in to access here', 'alert-info')\n return redirect(url_for('showLogin'))\n else:\n return f(*args, **kwargs)\n return decorated_function", "def account_login_required(fn):\n @wraps(fn)\n def _wrapper(request, *args, **kwargs):\n if request.user is None:\n return HTTPFound(request.route_url(\"login\"))\n\n return fn(request, *args, **kwargs)\n\n return _wrapper", "def login_required(func):\n @wraps(func)\n def login(*args, **kwargs):\n # Redirect to login if user not logged in, else execute func.\n if 'username' not in login_session:\n return redirect('/login')\n return func(*args, **kwargs)\n return login", "def api_login_required(view):\n\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if g.user is None:\n return abort(401)\n\n return view(**kwargs)\n\n return wrapped_view", "def login_required(f):\r\n @wraps(f)\r\n def decorated_function(*args, **kwargs):\r\n if session.get(\"user_id\") is None:\r\n flash(\"You must log in to view that page.\")\r\n return redirect(url_for(\"login\"))\r\n return f(*args, **kwargs)\r\n return decorated_function", "def require_login(f):\n\n @wraps(f)\n def wrapper(*args, **kwds):\n if not api.user.is_logged_in():\n raise PicoException(\"You must be logged in\", 401)\n return f(*args, **kwds)\n\n return wrapper", "def login_required(f):\n @wraps(f)\n def login_decorator(*args, **kwargs):\n if not session.get('logged_in'):\n abort(401)\n else:\n return f(*args, **kwargs)\n return login_decorator", "def login_required(func):\n @wraps(func)\n def check_login(*args, **kwargs):\n # Redirect to login if user not logged in, else execute func.\n if 'username' not in login_session:\n return redirect('/login')\n else:\n return func(*args, **kwargs)\n return check_login", "def login_required(view):\n\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if g.user is None:\n return redirect(url_for(\"login\"))\n\n return view(**kwargs)\n\n return wrapped_view", "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/sign_in\")\n return f(*args, **kwargs)\n return decorated_function", "def check_login_required(views_func):\n @wraps(views_func)\n def wrapper(request, *args, **kwargs):\n if request.user.is_authenticated:\n return views_func(request, *args, **kwargs)\n else:\n return HttpResponse(status=401)\n return wrapper", "def user_required(handler):\n def check_login(self, *args, **kwargs):\n auth = self.auth\n if not auth.get_user_by_session():\n # If handler has no login_url specified invoke a 403 error\n try:\n self.redirect(self.auth_config['login_url'], abort=True)\n except (AttributeError, KeyError), e:\n self.abort(403)\n else:\n return handler(self, *args, **kwargs)\n\n return check_login", "def login_required(func):\n @functools.wraps(func)\n @private_cache_headers\n def is_user_logged_in(*args, **kwargs):\n if not authentication.is_authenticated(flask.session):\n return flask.redirect('login?next=' + flask.request.path)\n\n return func(*args, **kwargs)\n return is_user_logged_in", "def login_required(f):\r\n @wraps(f)\r\n def decorated_function(*args, **kwargs):\r\n if session.get(\"user_id\") is None:\r\n return redirect(\"/login\")\r\n return f(*args, **kwargs)\r\n return decorated_function", "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"username\") is None:\n return redirect(\"/signin\")\n return f(*args, **kwargs)\n return decorated_function", "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/login\")\n return f(*args, **kwargs)\n return decorated_function", "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/login\")\n return f(*args, **kwargs)\n return decorated_function", "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/login\")\n return f(*args, **kwargs)\n return decorated_function", "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/login\")\n return f(*args, **kwargs)\n return decorated_function", "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/login\")\n return f(*args, **kwargs)\n return decorated_function", "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/login\")\n return f(*args, **kwargs)\n return decorated_function", "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/login\")\n return f(*args, **kwargs)\n return decorated_function", "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/login\")\n return f(*args, **kwargs)\n return decorated_function", "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/login\")\n return f(*args, **kwargs)\n return decorated_function", "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/login\")\n return f(*args, **kwargs)\n return decorated_function", "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/login\")\n return f(*args, **kwargs)\n return decorated_function", "def loginrequired(f):\n def securedf(request, *args, **kwargs):\n if request.user.is_authenticated():\n return f(request, *args, **kwargs)\n else:\n return HttpResponseRedirect(ADMINURL)\n return securedf", "def login_required(view):\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if g.user is None:\n return redirect(url_for(\"handlers.login\"))\n\n return view(**kwargs)\n\n return wrapped_view", "def login_required(view):\n\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if g.user is None:\n return redirect(url_for(\"auth.login\"))\n\n return view(**kwargs)\n\n return wrapped_view", "def login_required(view):\n\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if g.user is None:\n return redirect(url_for(\"auth.login\"))\n\n return view(**kwargs)\n\n return wrapped_view", "def login_required(f):\n\n @wraps(f)\n def wrapper(*args, **kwargs):\n if \"user_id\" in session:\n return f(*args, **kwargs)\n else:\n flash(\"Please login to access this page.\")\n return redirect('/login')\n\n return wrapper", "def auth_required(func):\n @wraps(func)\n def wrapper(request):\n if not request.user:\n return web.json_response({'status': 'error', 'message': 'auth required'}, status=401)\n return func(request)\n return wrapper", "def login_required(f):\n\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/login\")\n return f(*args, **kwargs)\n return decorated_function", "def user_required(handler):\n\n def check_login(self, *args, **kwargs):\n auth = self.auth\n if not auth.get_user_by_session():\n # If handler has no login_url specified invoke a 403 error\n try:\n self.redirect(self.auth_config['login_url'], abort=True)\n except (AttributeError, KeyError), e:\n self.abort(403)\n else:\n return handler(self, *args, **kwargs)\n\n return check_login", "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if 'user' not in session:\n if len(get_flashed_messages()) == 0:\n flash(\"Please sign in to view this page\", \"error\")\n return redirect(url_for('sign_in', next=request.url))\n else:\n flash(\"Sign Out Successful\", \"success\")\n return redirect(url_for('index'))\n return f(*args, **kwargs)\n return decorated_function", "def login_required_for_token(f):\n\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if g.USER is None:\n return redirect(url_for(\"api_v1_login\", next=request.url))\n return f(*args, **kwargs)\n\n return decorated_function", "def unauthorized_only(view_func):\n def is_anonymous(user):\n return user.is_anonymous()\n\n return user_passes_test(is_anonymous, login_url='/', redirect_field_name=None)(view_func)", "def authenticated(decoratee):\n\n @wraps(decoratee)\n def wrapper(*args, **kwargs):\n if \"username\" in session.keys() and is_user(session[\"username\"]) and is_enabled(session[\"username\"]):\n return decoratee(*args, **kwargs)\n else:\n session[\"last_error\"] = \"You need to be logged in to view this page.\"\n return redirect(url_for(\"error\"))\n\n return wrapper", "def login_required(view):\n\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if g.user is None:\n return redirect(url_for('auth.login'))\n\n return view(**kwargs)\n\n return wrapped_view", "def validate_login_status(func):\n def wrapped_f(request, *args, **kwargs):\n if request.session.get(\"Uid\") is None:\n return HttpResponseRedirect('/')\n return func(request,*args, **kwargs)\n return wrapped_f", "def requires_entrepreneur(func):\n def decorator(request, *args, **kwargs):\n if request.user.is_authenticated() and not request.user.is_entrepreneur():\n return redirect('dashboard')\n else:\n return func(request, *args, **kwargs)\n return decorator", "def check_logged_in(function):\n @wraps(function)\n def wrapper(*args, **kwargs):\n if 'username' in login_session:\n return function(*args, **kwargs)\n else:\n return redirect(url_for('category_list_handler'))\n return wrapper", "def require_admin(func):\n\n @wraps(func)\n def decorator(*args, **kwargs):\n if not g.user:\n # flash('此操作需要登录账户')\n return redirect(url_for('admin.login'))\n if g.user.name != 'admin':\n abort(403)\n return func(*args, **kwargs)\n\n return decorator", "def deny_access():\n flash('You must login first.')\n return redirect(url_for('home'))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def anonymous_required(func):\n\n async def wrapped(self, *args, **kwargs):\n if self.request.user is not None:\n print(\"Login please.\")\n # redirect(self.request, 'index')\n\n return await func(self, *args, **kwargs)\n\n return wrapped", "def api_login_required(func):\n\n @wraps(func)\n def decorated_view(*args, **kwargs):\n \"\"\"decorator\"\"\"\n if request.method in EXEMPT_METHODS: # pragma: no cover\n return func(*args, **kwargs)\n # 'func' is a Flask.view.MethodView so we have access to some special\n # params\n cls = func.view_class\n login_required = getattr(cls, \"login_required\", True)\n if (\n bui.auth != \"none\"\n and login_required\n and not bui.config.get(\"LOGIN_DISABLED\", False)\n ):\n if not current_user.is_authenticated:\n if request.headers.get(\"X-From-UI\", False):\n abort(403)\n return Response(\n \"Could not verify your access level for that URL.\\n\"\n \"You have to login with proper credentials\",\n 401,\n {\"WWW-Authenticate\": 'Basic realm=\"Login Required\"'},\n )\n return func(*args, **kwargs)\n\n return decorated_view", "def requires_company_user(func):\n def decorator(request, *args, **kwargs):\n if request.user.is_authenticated() and not request.user.is_company_user():\n return redirect('dashboard')\n else:\n return func(request, *args, **kwargs)\n return decorator", "def require_visitor(func):\n\n @wraps(func)\n def decorator(*args, **kwargs):\n if g.user:\n return redirect(url_for('site.home'))\n return func(*args, **kwargs)\n\n return decorator", "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"email\") is None:\n return redirect(url_for(\"login\"))\n return f(*args, **kwargs)\n return decorated_function", "def require_login_or_401(function):\n def wrap(request, *args, **kwargs):\n if request.user.is_anonymous:\n return Response({\"detail\": \"Must be logged in.\"}, status=401)\n return function(request, *args, **kwargs)\n return wrap", "def require_admin(handler_method):\n def Decorate(self):\n if not users.is_current_user_admin():\n self.error(401)\n html = '<html><body><a href=\"%s\">Sign in</a></body></html>'\n self.response.out.write(html % (users.create_login_url(self.request.url)))\n return\n return handler_method(self)\n return Decorate", "def anonymous_required(func):\n\n async def wrapped(self, *args, **kwargs):\n if self.request.user is not None:\n add_message(self.request, \"Please log-out to continue.\")\n redirect(self.request, \"index\")\n return await func(self, *args, **kwargs)\n\n return wrapped", "def handle_forbidden_for_homepage(self, request):\n\n login_url = request.link(Auth.from_request_path(request), name='login')\n\n if URL(request.url).path() == '/':\n return morepath.redirect(login_url)\n\n return handle_forbidden(self, request)", "def login_required(view_func):\n @wraps(view_func)\n def _checklogin(request, *args, **kwargs):\n if request.user.is_active:\n # The user is valid. Continue to the admin page.\n return view_func(request, *args, **kwargs)\n return site.login(request)\n return _checklogin", "def login_required(func):\n def wrapper(self, request, *args, **kwargs):\n if not request.user.is_authenticated():\n raise ApiLoginRequired\n return func(self, request, *args, **kwargs)\n\n wrapper.requires_login = True\n return wraps(func, wrapper)", "def login_view(func):\n @wraps(func)\n def decorator(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except MultipassException as e:\n return get_state().multipass.handle_auth_error(e, True)\n\n return decorator", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))" ]
[ "0.7650609", "0.7616408", "0.7589018", "0.7559412", "0.7558609", "0.7510281", "0.7494149", "0.7491331", "0.7440667", "0.74237275", "0.74189824", "0.74179274", "0.7398192", "0.73772556", "0.7373833", "0.73519063", "0.7316154", "0.7309339", "0.7299229", "0.7293337", "0.7288219", "0.7288136", "0.72664815", "0.7252768", "0.7223765", "0.72029126", "0.71969515", "0.7176953", "0.7176953", "0.71739274", "0.71707964", "0.7169246", "0.7163182", "0.7160635", "0.7157922", "0.71536475", "0.7148594", "0.71485794", "0.71484095", "0.7148189", "0.71213526", "0.71195287", "0.71183044", "0.7107039", "0.7105749", "0.7094289", "0.70859474", "0.70859474", "0.70859474", "0.70859474", "0.70859474", "0.70859474", "0.70859474", "0.70859474", "0.70859474", "0.70859474", "0.70859474", "0.70779645", "0.70745003", "0.7074323", "0.7074323", "0.7068309", "0.7065025", "0.706294", "0.7059705", "0.70589256", "0.7057396", "0.7053338", "0.7051544", "0.7040513", "0.70184165", "0.70072705", "0.7006843", "0.7003318", "0.6994858", "0.69735837", "0.69735837", "0.69735837", "0.697095", "0.6949922", "0.6948893", "0.6948788", "0.6947899", "0.69380677", "0.6933175", "0.69311786", "0.69141454", "0.69003016", "0.689259", "0.68923545", "0.68908197", "0.68908197", "0.68908197", "0.68908197", "0.68908197", "0.68908197", "0.68908197", "0.68908197", "0.68908197", "0.68908197" ]
0.79399925
0
Decide where to go, dashboard if logged in, login form if not
def start_view(request): if request.user and Employee.objects.filter(user__pk=request.user.pk).exists(): if Employee.objects.get(user__pk=request.user.pk).is_manager: return HttpResponseRedirect('/dashboard') else: return HttpResponseRedirect('/employee/show/%d/' % request.user.employee_user.first().pk) else: return HttpResponseRedirect('/login/')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def login(self):\n identity = request.environ.get('repoze.who.identity')\n came_from = str(request.GET.get('came_from', '')) or \\\n url('/')\n if identity:\n redirect(url(came_from))\n else:\n c.came_from = came_from\n c.login_counter = request.environ['repoze.who.logins'] + 1\n return render('/forms/login.mako')", "def ShowLogin():\n current_user = helpers.get_current_user()\n if current_user is None:\n return render_template('login.html')\n else:\n return redirect('/')", "def handleLogin(self):\n aVar = self.session.getAttribute(self.settings.authenvar)\n self.loggedin = False\n if not aVar:\n self.currenttemplate = self.settings.logintemplate \n self.logger.debug(\"Not logged in, Login-Mask activated.\")\n return\n\n self.loggedin = True\n self.logger.debug('Loged in as: \"{}\"'.format(aVar))", "def GET_login(self):\r\n\r\n # dest is the location to redirect to upon completion\r\n dest = request.get.get('dest','') or request.referer or '/'\r\n return LoginPage(dest = dest).render()", "def defaultlanding():\n #send user to description page if not logged in\n if not g.user:\n return redirect(url_for('description'))\n #display leaderboard for competition if logged in\n return redirect(url_for('leaderboard'))", "def index(self):\n\n # try and pull the user's data\n user = get_active_user_data()\n\n if not user:\n # they are not logged in give them the login form\n return render('/login_form.html')\n\n # they are logged in, pass them to the home page\n redirect('/')", "def login_menu(self):\n print(\"\\nPlease enter your email and password\")\n email = self.validate_email()\n password = self.validate_password()\n self.authenticate_user(email, password)", "def login():", "def login():", "def login():\n login_page = Login()\n login_page.login_main_page()", "def goto_login(self):\n self.driver.find_element(*BasePageLocators.MY_ACCOUNT_DROPDOWN).click()\n self.driver.find_element(*BasePageLocators.GO_LOGIN).click()\n return LoginPage(self.driver)", "def login(self):\n\t\treturn", "def post_login(self, came_from=lurl('/')):\n if not request.identity:\n login_counter = request.environ.get('repoze.who.logins', 0) + 1\n redirect('/login', params=dict(came_from=came_from, __logins=login_counter))\n return\n userid = request.identity['repoze.who.userid']\n flash(_('Welcome back, %s!') % userid)\n redirect(came_from)", "def loginView(request):\n username = request.POST.get('username')\n password = request.POST.get('password')\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n dologin(request, user)\n if isOperator(user): # login as an operator\n return redirect('/operator/map')\n elif isAdmin(user): # login as an admin\n return redirect('/admin/map')\n return HttpResponse('ok')\n else:\n # Return a 'disabled account' error message\n return HttpResponse(\"Disabled account\")\n else:\n # Return an 'invalid login' error message.\n return HttpResponse(\"Invalid login\")", "def log_in(self):\n\t\tpass", "def post_login(self, came_from='/'):\n if not request.identity:\n login_counter = request.environ['repoze.who.logins'] + 1\n redirect('/login', came_from=came_from, __logins=login_counter)\n userid = request.identity['repoze.who.userid']\n flash(_('Welcome back, %s!') % userid)\n redirect(came_from)", "def login(self):\n logging.debug(\"login called\")\n\n # Apply settings\n self.localisationsettings.apply_to_upcoming_session()\n self.admin_setting.apply_to_upcoming_session()\n self.macspoof_setting.apply_to_upcoming_session()\n self.network_setting.apply_to_upcoming_session()\n\n self.mainwindow.hide()\n self.gdmclient.do_login()", "def view_login(self):\n with self.client.get(\"/login\", catch_response=True) as response:\n for r_hist in response.history:\n if r_hist.status_code > 200 and r_hist.status_code < 400:\n response.failure(\"Logged on: Got redirect to /home\")", "def login(self):", "def login_form():\n # if request.method == \"GET\":\n return render_template('login.html')", "def do_login(self):\n if self.app.authentication_only:\n self.app.stop()\n else:\n self.set_screen(EXPLORER)", "def post_login(self, came_from='/'):\n if not request.identity:\n login_counter = request.environ['repoze.who.logins'] + 1\n redirect('/login', came_from=came_from, __logins=login_counter)\n\n userid = request.identity['repoze.who.userid']\n flash(_('Welcome back, %s!') % userid)\n redirect(came_from)", "def login():\n form = LoginForm()\n\n state = process_login(form)\n if state == LoginState.SHOW_LOGIN:\n return render_template('user/login.html', form=form)\n elif state == LoginState.SHOW_LOGIN_LOCKED:\n flash('User is locked, Contact Systems Administrator', 'danger')\n return redirect(url_for('user_view.login'))\n elif state == LoginState.SHOW_LOGIN_INCORRECT_PASSWORD:\n flash('Password is Incorrect', 'danger')\n return redirect(url_for('user_view.login'))\n elif state == LoginState.SHOW_LOGIN_EMAIL_NOT_EXIST:\n flash('Email does not exist', 'warning')\n return redirect(url_for('user_view.login'))\n elif state == LoginState.SHOW_DASHBOARD:\n return redirect(url_for('dashboard_view.home'))", "def i_am_in_the_login_page(browser):", "def log_in():\n form = LoginForm(request.form)\n if request.method == 'POST' and form.validate():\n if form.username.data != current_app.config['USERNAME']:\n flash('Invalid username.')\n elif form.password.data != current_app.config['PASSWORD']:\n flash('Invalid password.')\n else:\n session['logged_in'] = True\n flash('You were logged in.')\n\n return redirect(url_for('blog.show_posts'))\n\n return render_template('auth/log_in.html', form=form)", "def user_login(request):\n\n user = request.user\n if user.is_authenticated():\n status = user.get_profile().application.submitted #Getting the submission status\n if status: #If already submitted, takes to Completion Page\n return redirect('/allotter/complete/')\n else: #Otherwise to Details Submission form \n return redirect('/allotter/details/')\n\n if request.method == \"POST\":\n form = UserLoginForm(request.POST)\n if form.is_valid():\n user = form.cleaned_data\n login(request, user)\n status = user.get_profile().application.submitted #Getting the submission status \n if status:\n return redirect('/allotter/complete/') #Redirect to Completion Page\n else: \n return redirect('/allotter/details/') #Redirect to user details submission \n else:\n context = {\"form\": form}\n return render(request, 'allotter/login.html', context)\n else:\n form = UserLoginForm()\n context = {\"form\": form}\n return render(request, 'allotter/login.html', context)", "def login(self):\n\n self.__login_if_required()", "def plans_login(self, username='', password=''):\n # the provided username and password ONLY get checked\n # by the plans server if our cookie is expired.\n # hence, if we've logged in recently, this will return True even\n # if un/pw are not provided or are otherwise bad.\n login_info = {'username': username,\n 'password': password,\n 'submit': 'Login'}\n response = self._get_page('index.php', post=login_info)\n # if login is successful, we'll be redirected to home\n success = response.url[-9:] == '/home.php'\n if success:\n self.parser.feed(response.text) # parse out username\n self.username = self.parser.username\n return success", "def admin_login_form():\n\n title = 'Login'\n\n return render_template(\"admin_login_page.html\",\n title=title)", "def login():\n user_type = get_admin_type_in_session()\n login_form = LoginForm(request.form)\n current_app.logger.info(f'user_type{user_type}')\n if user_type != UserTypeEnum.NOT_LOGIN:\n return redirect(url_for('admin.admin'))\n\n if 'login' in request.form:\n # read form data\n username = request.form.get('username')\n password = request.form.get('password')\n remember = True if request.form.get('remember') else False\n staff_checked = db.session.query(Staff).filter(Staff.username == username).first()\n if not staff_checked or not check_password_hash(staff_checked.password, password):\n return render_template('accounts/login.html', msg='Không Có Tài Khoản hoặc mật khẩu sai, vui lòng kiểm tra',\n form=login_form)\n else:\n session['admin'] = username\n session['admin_type'] = UserTypeEnum.ADMIN_LOGIN\n session['admin_id'] = staff_checked.id\n session['remember'] = remember\n session['admin_avatar'] = staff_checked.avatar_url if staff_checked.avatar_url else ''\n return redirect(url_for('admin.admin'))\n return render_template('accounts/login.html',\n form=login_form)", "def login(self, came_from=url('/')):\n login_counter = request.environ['repoze.who.logins']\n if login_counter > 0:\n flash(_('Wrong credentials'), 'warning')\n return dict(page='login', login_counter=str(login_counter),\n came_from=came_from)", "def login_view(request):\n\n try:\n # If we were redirected from another page\n next = request.GET['next']\n # Print message\n return render_to_response('login.html', {'next': next})\n except:\n pass\n\n try:\n username = request.POST['login']\n password = request.POST['password']\n user = authenticate(username=username, password=password)\n\n if user is not None:\n if user.is_active:\n # All is cool\n login(request, user)\n return redirect('/feeds')\n else:\n # User is disabled\n return render_to_response('login.html', {'disabled': 'true'})\n else:\n # Invalid user\n return render_to_response('login.html', {'invalid': 'true'})\n except KeyError:\n # Or just display the login\n return render_to_response('login.html')", "def login_user():\n pass", "def log_in(self):\n print('-=' * 12 + \" Log in \" + '-=' * 12)\n mob_num, password = self._input_mob_num('Mobile Number :'), input(\"Password: \")\n self._user = self.auth.log_in(mob_num, password)\n if self._user:\n print(\"you are logged in, Welcome '{}'\".format(self._user.username))\n self.homepage()\n else:\n print(\"Mobile number or/and password is/are Invaild \\n\" + '-=' * 30)\n options = {1: self.log_in, 2: self.logging_page, 3: self.exit}\n print_out = \"(1) Try Again \\n (2) Back to Logging Page \\n (3) Exit\"\n self._take_option(options, print_out)", "def do_admin_login():\n if request.form['password'] == 'admin' and request.form['username'] == 'admin':\n teams = get_team()\n if teams:\n return render_template('team-players.html', teams=teams)\n else:\n return render_template('team-players.html')\n else:\n flash('Invalid username or password. Please try again!')\n return render_template('login.html')", "def login(self, came_from=lurl('/')):\n login_counter = request.environ.get('repoze.who.logins', 0)\n if login_counter > 0:\n flash(_('Wrong credentials'), 'warning')\n return dict(page='login', login_counter=str(login_counter),\n came_from=came_from)", "def login(self, came_from=lurl('/')):\n login_counter = request.environ.get('repoze.who.logins', 0)\n if login_counter > 0:\n flash(_('Wrong credentials'), 'warning')\n return dict(page='login', login_counter=str(login_counter),\n came_from=came_from)", "def homepage():\n form = LoginForm()\n return render_template(\"admin/index.html\", title=\"Admin\", form=form)", "def login_user(request):\n if request.user.is_authenticated: \n return redirect('dashboard')\n else:\n if request.method == \"POST\":\n login_form = LoginForm(data=request.POST)\n print(login_form)\n try:\n if login_form.is_valid():\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user)\n # Redirect to a success page.\n if request.user.is_authenticated:\n return redirect('index')\n except:\n import traceback\n traceback.print_exc()\n return render(request, \"login.html\", {\"login_form\": login_form})\n\n else:\n login_form = LoginForm()\n return render(request, \"login.html\", {\"login_form\": login_form})", "def do_login_login():\n print(inspect.stack()[1][3])\n print(request.form)\n query = select([User]).where(and_(User.columns.email == request.form['email'],User.columns.password==request.form['password'] ))\n ResultProxy = connection.execute(query)\n ResultSet = ResultProxy.fetchone()\n if ResultSet:\n session['logged_in'] = True\n else:\n flash('wrong password!')\n # return str(get_flashed_messages())\n return home(result)", "def do_login():\n\n isTeacher = False\n\n # check if this_user is admin or normal user\n this_user = User.query.filter_by(username=request.form['username']).first()\n \n # is this_user is not student or admin then check teacher table\n if this_user is None:\n this_user = Teacher.query.filter_by(username=request.form['username']).first()\n isTeacher = True\n\n # if this_user is still none -> invalid user\n if this_user is not None:\n if this_user.password == request.form[\"password\"]:\n session['authenticated'] = True\n session['username'] = this_user.username\n session['name'] = this_user.name\n session['isTeacher'] = isTeacher\n if session['username'] == \"admin\":\n session['wasAt'] = \"manageusers\"\n try:\n session['cpi'] = this_user.cpi\n session['grp_size'] = this_user.group_size\n except:\n pass\n else:\n flash(\"Incorrect Password, Please Try Again\") \n else:\n flash(\"Invalid Username, Please Try Again\")\n return home()", "def dispatch(self, request, *args, **kwargs):\n next_url = request.GET.get('next', 'home')\n if 'username' in request.GET:\n self.login_user()\n return redirect(next_url)\n elif settings.MEMBERS_ONLY_DOMAIN is None:\n superuser, _ = User.objects.get_or_create(\n is_superuser=True,\n defaults={\n 'username': 'member',\n },\n )\n login(self.request, superuser)\n return redirect(next_url)\n else:\n return super().dispatch(request, *args, **kwargs)", "def login():\n if current_user.is_authenticated:\n return redirect(url_for('main.home'))\n\n form = LoginForm()\n\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data).first()\n\n if user and bcrypt.check_password_hash(user.password, form.password.data):\n login_user(user, remember=form.remember_me.data)\n next_page = request.args.get('next') # get next url parameter and after login redirect to requested page\n flash(\"Login Successful\", 'success')\n\n # if there was request for specific page that needs authorization, then that argument assigned in\n # variable `next_page` keeps that and after login automatically user is redirect to that page\n return redirect(next_page) if next_page else redirect(url_for('main.home'))\n\n else:\n flash(\"Login Unsuccessful. Please check email and password\", \"danger\")\n\n return redirect(url_for('users.login'))\n\n return render_template('login.html', title='Log In', form=form)", "def do_login(request):\n distinct_id = request.session.pop('distinct_id')\n user = User.objects.get(id=distinct_id)\n login(request, user)\n return redirect_to_user_settings()", "def login():\n return redirect(build_authorize_url())", "def landing():\n if g.user:\n return render_template('landing.html', user=g.user)\n return redirect(url_for('login'))", "def post_login(self, came_from=lurl('/')):\n if not request.identity:\n login_counter = request.environ.get('repoze.who.logins', 0) + 1\n redirect('/login',\n params=dict(came_from=came_from, __logins=login_counter))\n userid = request.identity['repoze.who.userid']\n flash(_('Welcome back, %s!') % userid)\n\n # Do not use tg.redirect with tg.url as it will add the mountpoint\n # of the application twice.\n return HTTPFound(location=came_from)", "def login_form_valid(self, form):\n self.request.session.update({\n 'user_is_none': None,\n 'user_is_active': None\n })\n\n email = form.cleaned_data['email']\n password = form.cleaned_data['password']\n user = authenticate(email=email, password=password)\n\n if user is None:\n self.request.session['user_is_none'] = True\n return HttpResponseRedirect('/user_account/')\n elif user.active is False:\n self.request.session['user_is_active'] = False\n return HttpResponseRedirect('/user_account/')\n else:\n self.request.session.update({\n 'user_is_none': False,\n 'user_is_active': True\n })\n login(self.request, user)\n return HttpResponseRedirect('/schedule/')", "def GET_adminon(self):\r\n #check like this because c.user_is_admin is still false\r\n if not c.user.name in g.admins:\r\n return self.abort404()\r\n self.login(c.user, admin = True)\r\n\r\n dest = request.referer or '/'\r\n return self.redirect(dest)", "def custom_login(request, **kwargs):\n if request.user and request.user.is_authenticated():\n return HttpResponseRedirect(settings.LOGIN_REDIRECT_URL)\n else:\n return login(request, **kwargs)", "def handle_forbidden_for_homepage(self, request):\n\n login_url = request.link(Auth.from_request_path(request), name='login')\n\n if URL(request.url).path() == '/':\n return morepath.redirect(login_url)\n\n return handle_forbidden(self, request)", "def login(self, came_from=url('/')):\n login_counter = request.environ['repoze.who.logins']\n if login_counter > 0:\n flash(_('Wrong credentials'), 'warning')\n\n return dict(page='login', login_counter=str(login_counter),\n came_from=came_from)", "def index(request):\n if request.user.is_authenticated():\n return redirect('/matrix/')\n else:\n form = AuthenticationForm(request)\n return render(request, 'registration/login.html', {'form': form})", "def process_admin_login():\n\n entered_email = request.form.get(\"email\")\n entered_password = request.form.get(\"password\")\n admin = c.get_admin(entered_email, entered_password)\n\n if admin is False:\n flash('Invalid credentials. Please click on sign up to create an account!')\n return redirect('/')\n session['current_admin'] = entered_email\n ad_id = admin.admin_id\n flash('Logged in as %s' % entered_email)\n if admin.rescue_id is None:\n return redirect('/admin' + '/' + str(ad_id) + '/rescue-info')\n else:\n return redirect('/admin' + '/' + str(ad_id))", "def showLogin():\n if(checkLogin()):\n return redirect(url_for('catelog'))\n state = ''.join(random.choice(string.ascii_uppercase + string.digits)\n for x in xrange(32))\n login_session['state'] = state\n return render_template('login.html', STATE=state, isLogin=checkLogin())", "def login():\n pass", "def login():\n form = LoginForm()\n if not 'username' in session:\n if request.method == 'POST':\n if form.validate_on_submit():\n user = mongo.db.user.find_one({'username':form.username.data})\n if user and bcrypt.checkpw(request.form['password'].encode('utf-8'), user['hashed_password']):\n session['username'] = form.username.data\n current_user = session['username']\n flash(f'Welcome back, {current_user}!', 'success')\n return redirect(url_for('dashboard'))\n \n flash('Please check login details.', 'danger')\n return render_template('pages/login.html', title='Login', form=form)\n flash('You are already logged in. Did you mean to go to your dashboard instead?', 'info')\n return redirect(url_for('dashboard'))", "def _login(self, *args, **kwargs):\n pass", "def login():\n # Skip login form on forced SSO\n if request.method == \"GET\" and current_app.config[\"OAUTH_SKIP_LOGIN\"]:\n if not request.args.get('local') and oauth_type():\n return redirect(url_for(oauth_type() + '.login'))\n form = LoginForm(request.form)\n # Handle logging in\n if request.method == 'POST':\n if form.is_submitted() and form.validate():\n # Allow login with e-mail address\n if '@' in form.username.data:\n user_by_email = User.query.filter_by(email=form.username.data).first()\n if user_by_email:\n form.username.data = user_by_email.username\n # Validate user account\n login_user(form.user, remember=True)\n if not form.user.active:\n # Note: continue to profile page, where user is warned\n username = current_user.username\n return redirect(url_for('public.user', username=username))\n # Regular user greeting\n flash(\"Time to make something awesome. ≧◡≦\", 'info')\n return redirect_dest(fallback=url_for('public.home'))\n else:\n flash_errors(form)\n logout_user()\n return render_template(\"public/login.html\",\n form=form, oauth_type=oauth_type())", "def user_login(request):\n if request.user.is_authenticated():\n try:\n return HttpResponseRedirect(request.GET['next'])\n except:\n return HttpResponseRedirect('%s/newsletter/home/' % (SUBSITE))\n \n if request.POST:\n form = LoginForm(request.POST)\n if form.is_valid():\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n login(request, user)\n try:\n return HttpResponseRedirect(request.GET['next'])\n except:\n return HttpResponseRedirect('%s/newsletter/home/' % (SUBSITE))\n else:\n return render('inactive user')\n \n form = LoginForm()\n\n args = {\n 'form': form,\n 'SUBSITE': SUBSITE,\n }\n\n return render(request, 'login.html', args, context_instance=RequestContext(request))", "def login():\n form = LoginForm()\n if request.method == \"GET\":\n return render_template('login.html', title='Sign In', form=form)\n if request.method == \"POST\":\n if 'loggedin' in session:\n return redirect(url_for('home'))\n if form.validate_on_submit():\n username = form.username.data\n password = form.password.data\n account = db.check_item(\"username\", username)\n if account is None:\n flash('Invalid username or password')\n return redirect(url_for('login'))\n else:\n if check_password_hash(str(account['password_hash']), password):\n session['loggedin'] = True\n session['username'] = account['username']\n session['admin_auth'] = account['admin_auth']\n flash('Login successfully!')\n return redirect(url_for('home'))\n flash('Invalid username or password')\n return redirect(url_for('login'))\n else:\n return redirect(url_for('login'))", "def should_be_login_url(self) -> None:\n assert \"login\" in self.browser.current_url, \"URL not login page\"", "def login_get():\n next_url = url_for('index.index')\n if g.session:\n flash(gettext('You are already logged in'), 'success')\n return redirect(next_url)\n\n return render_template('sites/auth/login.html', title=gettext('Login'))", "def login(self):\n self.button_login.click()\n return dashboard.DashboardPage(self._driver)", "def loginFunc(self):\n username = (\n self.lineEdit.text()\n ) # Get the text from the username & password lineedit\n password = self.lineEdit_2.text() #\n # Check if password and username isnt empty, if it is, popup\n if DB.verify_login(username, password) \\\n and not DB.new_customer(username):\n self.customer.budget.set_budget(DB.get_income(self.customer.email),\n DB.get_variable_expenses(self.customer.email),\n DB.get_fixed_expenses(self.customer.email))\n self.customer.budget.set_buffert(DB.get_buffert(username))\n self.displayUi = MenuScreen()\n self.hide()\n self.displayUi.show()\n elif DB.verify_login(username, password) and DB.new_customer(username):\n self.displayUi = FirstLoginScreen()\n self.hide()\n self.displayUi.show()\n else:\n self.popUp.exec_()", "def login():\n form = LoginForm()\n if form.validate_on_submit():\n\n # check whether user exists in the database and whether\n # the password entered matches the password in the database\n user = User.query.filter_by(email=form.email.data).first()\n if user is not None and user.verify_password(\n form.password.data):\n # log user in\n login_user(user)\n\n if user.is_admin:\n return redirect(url_for('view.dashboard'))\n else:\n return redirect(url_for('view.dashboard'))\n # when login details are incorrect\n else:\n flash('Invalid email or password.')\n # load login template\n return render_template('index.html', form=form, title='Login')", "def test_login_required_dashboard(self):\r\n response = self.client.get(reverse('dashboard'))\r\n self.assertEqual(response.status_code, 302)\r\n self.assertEqual(response['Location'], 'http://testserver/accounts/login?next=/dashboard')", "def log_in():\n if request.method == 'POST':\n username = request.form['username']\n password = request.form['password']\n if PLAN.login_user(username, password):\n session['name'] = username\n flash(\"Login success ...\")\n return redirect(url_for('index'))\n flash(\"Login failed ...\")\n return render_template('login.html')\n return render_template('login.html')", "def login(self):\r\n\r\n # Open browser with the login URL\r\n self.browser.open(self.config[\"base_url\"] + \"login\")\r\n\r\n # Select the login form\r\n self.browser.select_form('form[action=\"/login/\"]')\r\n\r\n # Fill the login form.\r\n self.browser[\"email\"] = self.config[\"email\"]\r\n self.browser[\"password\"] = self.config[\"password\"]\r\n\r\n # Submit form\r\n self.browser.submit_selected()", "def login_required(view_func):\n @wraps(view_func)\n def _checklogin(request, *args, **kwargs):\n if request.user.is_active:\n # The user is valid. Continue to the admin page.\n return view_func(request, *args, **kwargs)\n return site.login(request)\n return _checklogin", "def dispatch(request):\n if request.user.is_admin:\n return redirect(reverse(\"admin-dashboard\"))\n else:\n return redirect(reverse(\"trainee-dashboard\"))", "def login_page():\n form = loginUser()\n\n if \"user\" in session:\n logged_user = session[\"user\"]\n return redirect(f\"users/{logged_user}\")\n\n if form.validate_on_submit():\n username=form.username.data\n password=form.password.data\n\n user = User.authenticate(username=username, password=password)\n\n if user:\n session[\"user\"] = user.username\n\n return redirect(f'/users/{username}')\n else:\n form.password.errors = ['Unable to log in']\n\n return render_template(\"login_form.html\", form=form)", "def adpanel():\n if 'user_id' not in session or session['user_id'] != 'admin':\n return redirect(url_for('login'))\n return render_template('adminpanel.html')", "def login_index():\n try:\n err_id = request.args['err_id']\n except:\n err_id = '0'\n if err_id == '1':\n error_message = 'Wrong username! Please make sure your username is right or you have registered before.'\n elif err_id == '2':\n error_message = 'Wrong password! Please type again with correct one!'\n else:\n error_message = ''\n if check_login():\n did_login = True\n else:\n did_login = False\n if not did_login: \n \"\"\"show login page\"\"\"\n return render_template(\"login/login.html\", message=error_message)\n else:\n \"\"\"jump to manage page\"\"\"\n return redirect(url_for('manage.manage_index'))", "def login():\n\n if current_user is not None and current_user.is_authenticated():\n return redirect(url_for(\"user.profile\"))\n\n form = LoginForm(request.form)\n if form.validate_on_submit():\n user, authenticated = User.authenticate(form.login.data,\n form.password.data)\n\n if user and authenticated:\n login_user(user, remember=form.remember_me.data)\n return redirect(request.args.get(\"next\") or\n url_for(\"forum.index\"))\n\n flash((\"Wrong username or password\"), \"danger\")\n return render_template(\"auth/login.html\", form=form)", "def test_admin_user_login_with_redirect(self):\n self.get_page(\"/\")\n self.at_page(\"/\")\n self.login(\"admin\", \"admin\", \"/admin/\")\n self.at_page(\"/admin/\")\n self.should_see(\"Django administration\")", "def login():\n authorized_redirect_URIs = ['localhost:5000', '127.0.0.1:5000']\n\n disabled = False\n if request.host not in authorized_redirect_URIs:\n flash(Markup(\"\"\"Please use\n <a href='http://localhost:5000/login'>http://localhost:5000</a>\n or\n <a href='http://127.0.0.1:5000/login'>http://127.0.0.1:5000</a>\n to access the application otherwise the OAuth will not work\"\"\"),\n category='danger')\n disabled = True\n\n if current_user.is_authenticated:\n return redirect(url_for('url.index'))\n return render_template('login.html', disabled=disabled)", "def handle_needs_login():\n flash(\"You must be logged in to access this page.\")\n return redirect(url_for('auth.login', next=request.path))", "def login():\n\n return render_template(\"login_form.html\")", "def login(self) -> redirect:\n\t\tif self.is_authorized:\n\t\t\tflash(\"You are already logged in.\")\n\t\t\treturn redirect(url_for(\"index\"))\n\t\telif request.method == \"GET\":\n\t\t\treturn render_template(\"login.jinja2\")\n\t\tsession[\"state\"] = str(uuid4())\n\t\treturn self.oauth.authorize(callback=url_for(\"authorize\", _external=True), state=session[\"state\"])", "def homepage():\n if g.user:\n return redirect(f\"/user/{g.user.id}\")\n else:\n return redirect(\"/landing\")", "def index(request):\n \n user = get_user(request)\n\n # single auth system?\n if len(ENABLED_AUTH_SYSTEMS) == 1 and not user:\n return HttpResponseRedirect(reverse(AUTH_START, args=[ENABLED_AUTH_SYSTEMS[0]])+ '?return_url=' + request.GET.get('return_url', ''))\n\n #if DEFAULT_AUTH_SYSTEM and not user:\n # return HttpResponseRedirect(reverse(start, args=[DEFAULT_AUTH_SYSTEM])+ '?return_url=' + request.GET.get('return_url', ''))\n \n default_auth_system_obj = None\n if DEFAULT_AUTH_SYSTEM:\n default_auth_system_obj = AUTH_SYSTEMS[DEFAULT_AUTH_SYSTEM]\n\n #form = password.LoginForm()\n\n return render_template(request, 'index', {'return_url' : request.GET.get('return_url', '/'),\n 'enabled_auth_systems' : ENABLED_AUTH_SYSTEMS,\n 'default_auth_system': DEFAULT_AUTH_SYSTEM,\n 'default_auth_system_obj': default_auth_system_obj})", "def login():\n # Initialise login form\n form = UserLoginForm()\n # Validate and process form data\n if form.validate_on_submit():\n # Get form data\n username = form.username.data\n password = form.password.data\n # Check if username and password is valid\n valid, userID = gdb.verifyuser(username, password)\n if(valid):\n user = gdb.getuserbyid(userID)\n login_user(user)\n return redirect(url_for('main.dashboard'))\n else:\n flash(\"Invalid username or password.\", category=\"error\")\n return redirect(url_for('main.login'))\n # Render template\n return render_template('login.html', form=form)", "def login_form(request):\n return {'login_form': LoginForm()}", "def login_required(func):\n @wraps(func)\n def f(*args, **kwargs):\n if g.user is None:\n app.logger.info('redirecting not logged in user')\n return redirect(url_for('index'))\n elif not g.user.initialized and f.__name__ not in ['profile_create','logout']:\n return redirect(url_for('profile_create'))\n else:\n return func(*args, **kwargs)\n return f", "def logged_in(request):\n return request.current_user is not None", "def local_login():\n if not current_app.config[\"USE_LOCAL_AUTH\"]:\n return redirect(url_for('auth.login'))\n login_form = BasicLoginForm()\n if request.method == \"POST\":\n email = request.form[\"email\"]\n\n user = find_user_by_email(email)\n\n if user is not None:\n login_user(user)\n session[\"user_id\"] = current_user.get_id()\n\n create_auth_event(\n auth_event_type=event_type.USER_LOGIN,\n user_guid=session[\"user_id\"],\n new_value={\n 'success': True,\n }\n )\n\n next_url = request.form.get(\"next\")\n if not is_safe_url(next_url):\n return abort(400, UNSAFE_NEXT_URL)\n\n return redirect(next_url or url_for(\"main.index\"))\n else:\n error_message = \"User {email} not found. Please contact your agency FOIL Officer to gain access to the system.\".format(\n email=email)\n flash(error_message, category=\"warning\")\n return render_template(\n \"auth/local_login_form.html\", login_form=login_form\n )\n\n elif request.method == \"GET\":\n return render_template(\n \"auth/local_login_form.html\",\n login_form=login_form,\n next_url=request.args.get(\"next\", \"\"),\n )", "def login_to_flask(self, form):\n\n flask_login.login_user(self, remember=form.remember_me.data)\n\n # handle 'next' page if we're stopping them en route\n next_page = flask.request.args.get('next')\n if not next_page or werkzeug.urls.url_parse(next_page).netloc != '':\n next_page = flask.url_for('dashboard')\n\n # create a response, inject the token into the cookie and return\n redirect = flask.redirect(next_page)\n response = app.make_response(redirect)\n response.set_cookie('kdm-manager_token', self.token)\n return response", "def do_login(self, backend, user):", "def login():\n if current_user.is_authenticated:\n return redirect(url_for('main.index'))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(\n username=form.username.data.lower()\n ).first()\n\n if user is None or not user.check_password(form.password.data):\n flash('Invalid username or password', 'error')\n return render_template('login.html',\n title='Sign In', form=form), 401\n\n if not user.email_confirmed:\n flash(\"Please confirm your email \\\n address to activate your Account\", 'error')\n return render_template('login.html',\n title='Sign In', form=form), 401\n\n login_user(user, remember=form.remember_me.data)\n\n next_page = request.args.get('next')\n if not next_page or url_parse(next_page).netloc != '':\n next_page = url_for('main.index')\n return redirect(next_page)\n return render_template('login.html', title='Sign In', form=form), 401", "def login_view():\n\n #Bypass if user is logged in\n if current_user.is_authenticated:\n return redirect(url_for(\"profiles.profile_view\",id=current_user.id))\n \n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data).first()\n\n #Check if the password matched\n if user and user.check_password(form.password.data):\n login_user(user)\n next_page = request.args.get(\"next\")\n return redirect(next_page or url_for(f\"profiles.profile_view\", id=current_user.id))\n flash(\"Invalid email and password.\")\n return redirect(url_for(\"auth.login_view\"))\n return render_template(\"login.html\", form=form)", "def adminlogin(request):\n request.GET = request.GET.copy()\n request.GET['redirect'] = '/annaleut/admin/'\n return auth_views.login(request, template_name='admin/login.html',\n redirect_field_name='redirect')", "def default():\n\treturn render_template(\"login.html\")", "def login_bot(self):\n pass", "def login():\n form = LoginForm()\n if form.validate_on_submit():\n\n # check whether employee exists in the database and whether\n # the password entered matches the password in the database\n expert_data = Expert.query.filter_by(email=form.email.data).first()\n if expert_data is not None and expert_data.verify_password(\n form.password.data):\n # log employee in\n login_user(expert_data)\n\n # redirect to the appropriate dashboard page\n if expert_data.is_admin:\n return redirect(url_for('home.admin_dashboard'))\n else:\n return redirect(url_for('home.dashboard'))\n\n # when login details are incorrect\n else:\n flash('Invalid email or password.')\n\n # load login template\n return render_template('auth/login.html', form=form, title='Login')", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return self.login()", "def require_login(self):\n\tif users.get_current_user():\n\t return True\n\telse:\n\t self.redirect(users.create_login_url(self.request.uri))\n\t return False", "def go_to_dashboard():\n\treturn render_template(\"/dashboard.html\")", "def login():\n return render_template('login.html', next=flask.request.args.get(\"next\",\"/sessions\"))", "def login(self):\n url = self._root + self._routes[\"login\"]\n self.r = self.reqsession.get(url) \n if self.r.url == 'https://console.zerodha.com/dashboard':\n cookies = self.reqsession.cookies.get_dict('console.zerodha.com')\n self.console_session = cookies['session']\n self.public_token = self.reqsession.cookies['public_token']\n return True\n else:\n raise Exception(\"Login failed or Kite session expired\")", "def login():\n return render_template('auth/login.html')" ]
[ "0.7260029", "0.7014417", "0.69468075", "0.6839301", "0.6821525", "0.67518455", "0.67216855", "0.6685984", "0.6685984", "0.65857905", "0.6580526", "0.657079", "0.65570045", "0.6549986", "0.65449136", "0.65154564", "0.6505277", "0.65018094", "0.64914584", "0.6470852", "0.64603776", "0.6457846", "0.6452123", "0.6448398", "0.6439297", "0.64336884", "0.6432248", "0.6422464", "0.64176655", "0.64147186", "0.63983583", "0.639196", "0.6375093", "0.6374986", "0.6370954", "0.63672113", "0.63672113", "0.6346925", "0.6346892", "0.63462794", "0.6340884", "0.6333538", "0.63326913", "0.6330512", "0.63191825", "0.630914", "0.63073134", "0.6305924", "0.63057685", "0.6304756", "0.6303084", "0.63015276", "0.6293615", "0.62904006", "0.6271046", "0.6268117", "0.6259583", "0.6251267", "0.6241344", "0.62325644", "0.62321794", "0.623022", "0.62277997", "0.6224387", "0.62237513", "0.6213044", "0.6204433", "0.6202735", "0.61969715", "0.61948115", "0.61906725", "0.6182023", "0.6178113", "0.6176435", "0.61592746", "0.6153192", "0.6151767", "0.6147091", "0.614629", "0.6140723", "0.6138338", "0.6137261", "0.61325943", "0.6131931", "0.61318374", "0.61206126", "0.6119482", "0.61191076", "0.6116568", "0.6116004", "0.6112763", "0.6110194", "0.61001235", "0.6088099", "0.6087309", "0.6082339", "0.60803473", "0.607683", "0.60764694", "0.607426", "0.606923" ]
0.0
-1
Login with an accesscode
def accesscode(request, code): employee = Employee.objects.get(access_code=code) user = employee.user user.backend = 'django.contrib.auth.backends.ModelBackend' login(request, user) return HttpResponseRedirect('/')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def login():", "def login():", "def login(self):\n self.open(self.urls['login'])\n self.select_form(nr=0)\n\n self.form['custno'] = self.username\n self.form['password'] = self.password\n res = self.submit()\n \n return res", "def login():\n url = AUTH_URL + '&state=' + str(uuid1())\n try:\n import webbrowser\n webbrowser.open(url)\n except:\n pass\n \n print('Go to the following link in your browser:\\n\\n\\t{}\\n'.format(url))\n\n auth_code = input('Enter verification code: ')\n print('\\nObtaining access token...')\n Spotify.refresh(auth_code)\n print('Credentials saved to {}'.format(CREDS_PATH))\n return", "def login(self):", "def post_login(self, username, password, code):\n values = {\"ctl00$ContentPlaceHolder1$MemberName\" : username,\n \"ctl00$ContentPlaceHolder1$MemberPass\" : password,\n \"ctl00$ContentPlaceHolder1$CheckCode\" : code,\n \"__EVENTTARGET\" : \"\",\n \"__EVENTARGUMENT\" : \"\",\n \"__VIEWSTATE\" : \"/wEPDwUJNjIyMTU5NjYyZGQ=\",\n \"__EVENTVALIDATION\" : \"/wEWBQLwhorcCAKxtMbHCgKNvavEBwKCi/rDCQKM5+qlBA==\",\n \"ctl00$ContentPlaceHolder1$RegBtn\": \"登 录\",\n }\n result = self.data_post(values, self.login_url, self.login_send_header)\n return result", "def login():\n auth_state = str(uuid.uuid4())\n SESSION.auth_state = auth_state\n\n # For this sample, the user selects an account to authenticate. Change\n # this value to 'none' for \"silent SSO\" behavior, and if the user is\n # already authenticated they won't need to re-authenticate.\n prompt_behavior = 'select_account'\n\n params = urllib.parse.urlencode({'response_type': 'code',\n 'client_id': config.CLIENT_ID,\n 'redirect_uri': config.REDIRECT_URI,\n 'state': auth_state,\n 'resource': config.RESOURCE,\n 'prompt': prompt_behavior})\n\n return bottle.redirect(config.AUTHORITY_URL + '/oauth2/authorize?' + params)", "def login(self):\n r = self._login_token()", "def login(self):\n login_form = {\"kid\": \"\",\n \"uni\": self.server,\n \"login\": self.username,\n \"pass\": self.password}\n url = \"https://%s.ogame.gameforge.com/main/login\" % self.country_code\n result = self.session.post(url, data=login_form)", "def login():\n pass", "def do_login(self, backend, user):", "def login(self):\r\n user_account = db.find_one({\"cpr_number\": request.form.get(\"CPR\")})\r\n if user_account is not None:\r\n if self.verify_password(user_account[\"password\"], request.form.get(\"password\")):\r\n return self.start_session(user_account)\r\n return jsonify({\"error\": \"Invalid login credentials\"}), 401", "def login():\n if app.testing:\n callback_url = url_for('user.authorize', _external=True)\n else:\n callback_url = 'https://codegolf.uqcs.org.au/user/authorize'\n return git_auth.authorize(callback=callback_url)", "def _login(self, login):\n self._tokens.clear()\n name, password = login\n\n params = {\"action\": \"query\", \"meta\": \"tokens\", \"type\": \"login\"}\n with self._api_lock:\n result = self._api_query(params, no_assert=True)\n try:\n token = result[\"query\"][\"tokens\"][\"logintoken\"]\n except KeyError:\n raise exceptions.LoginError(\"Couldn't get login token\")\n\n params = {\"action\": \"login\", \"lgname\": name, \"lgpassword\": password,\n \"lgtoken\": token}\n with self._api_lock:\n result = self._api_query(params, no_assert=True)\n\n res = result[\"login\"][\"result\"]\n if res == \"Success\":\n self._tokens.clear()\n self._save_cookiejar()\n return\n if res == \"Illegal\":\n e = \"The provided username is illegal.\"\n elif res == \"NotExists\":\n e = \"The provided username does not exist.\"\n elif res == \"EmptyPass\":\n e = \"No password was given.\"\n elif res == \"WrongPass\" or res == \"WrongPluginPass\":\n e = \"The given password is incorrect.\"\n else:\n e = \"Couldn't login; server says '{0}'.\".format(res)\n raise exceptions.LoginError(e)", "def mfa_login(self, mfacode):\n\n try:\n\n response = self.post(\"/authentication/login\",\n {\"user\": self.user, \"password\": self.password, \"token\": int(mfacode)})\n if response.status_code == 200:\n print(\"{0}: Orchestrator MFA login success\".format(self.url))\n # get and set X-XSRF-TOKEN\n for cookie in response.cookies:\n if cookie.name == \"orchCsrfToken\":\n self.headers[\"X-XSRF-TOKEN\"] = cookie.value\n return True\n else:\n print(\"{0}: Orchestrator MFA login failed: {1}\".format(self.url, response.text))\n return False\n except:\n print(\"{0}: Exception - unable to connect to Orchestrator\".format(self.url))\n return False", "def make_login(your_username, your_password):\r\n instagram.with_credentials(your_username, your_password)\r\n instagram.login()", "def login(email, password):\n rino.login.login(email, password)", "def login(email, password):\n rino.login.login(email, password)", "def login():\n login_hex = request.json.get(\"authentication\")\n if not login_hex:\n return jsonify({\"code\": \"1\", \"type\": \"user\"})\n\n qr_code_password = app.config[\"QRCODE_PASSWORD\"]\n\n if login_hex != qr_code_password:\n return jsonify({\"code\": \"3\"})\n \n jwt_token = generate_token({\"id\": generate_id()})\n\n return jsonify({\"code\": \"0\", \"token\": jwt_token})", "def login(self, name, pin):\n self.account = self.bank.get(name, pin)\n if self.account:\n return \"success\"\n else:\n return \"faliure\"", "def login(self):\n res = self.sess.get(self._login_url)\n execution = re.search('name=\"execution\" value=\"(.*?)\"', res.text).group(1)\n res = self.sess.get(url=self._pub_key_url).json()\n n, e = res['modulus'], res['exponent']\n encrypt_password = rsa_encrypt(self.password, e, n)\n data = {\n 'username': self.username,\n 'password': encrypt_password,\n 'execution': execution,\n '_eventId': 'submit'\n }\n res = self.sess.post(url=self._login_url, data=data)\n\n # check if login successfully\n if '统一身份认证' in res.content.decode():\n self.status = \"FAILED_LOGIN\"\n raise LoginError('Login failed. Please check your ZJU username and password.')\n logger.info(\"%s Successfully logined.\" % self)\n self.status = \"LOGINED\"\n return self.sess", "def login(self, account, password):\n url = 'https://ceq.nkust.edu.tw/Login'\n\n data = {\n '__RequestVerificationToken': self.csrf_key,\n 'UserAccount': account,\n 'Password': password,\n }\n res = self.main_session.post(url=url, data=data, allow_redirects=False)\n if res.status_code == 302:\n soup = BeautifulSoup(res.text, 'html.parser')\n status = soup.find('a')['href']\n if status == '/StuFillIn':\n return True\n return False", "def _login(self, *args, **kwargs):\n pass", "def login(self):\n \n self.br.open(\"http://kanji.koohii.com/login\")\n self.br.form = list(self.br.forms())[0]\n self.br[\"username\"] = USER\n self.br[\"password\"] = PASSWORD\n my_response = self.br.submit()\n print \"Login successful\"", "def do_login(user, password):\n return um.do_login(user, password)", "def login(self, response):\n\t\treturn FormRequest.from_response(response,\n\t\t\t formdata={'username': 'scanner1', 'password': 'scanner1'},\n\t\t\t callback=self.check_login_response)", "def login(self) -> int:\n r = self.session.post(\n self.api_endpoint,\n data={\n \"action\": \"login\",\n \"lgname\": self.user,\n \"lgpassword\": self.password,\n \"format\": \"json\",\n },\n )\n token = json.loads(r.text)[\"login\"][\"token\"]\n r = self.session.post(\n self.api_endpoint,\n data={\n \"action\": \"login\",\n \"lgname\": self.user,\n \"lgpassword\": self.password,\n \"lgtoken\": token,\n \"format\": \"json\",\n },\n )\n if json.loads(r.text)[\"login\"][\"result\"] != \"Success\":\n return -1\n return 0", "def login(self):\n request = self.REQUEST\n response = request['RESPONSE']\n\n login = request.get('__ac_name', '')\n password = request.get('__ac_password', '')\n\n pas_instance = self._getPAS()\n\n if pas_instance is not None:\n pas_instance.updateCredentials(request, response, login, password)", "def doLogin(self):\n\t\tlogin_data = urllib.urlencode({\n\t\t\t'operatorName' : self.username,\n\t\t\t'password' : self.password,\n\t\t\t'submit' : 'Iniciar+sesi%C3%B3n',\n\t\t})\n\n\t\tresponse = self.opener.open(\"http://172.16.0.2/tdserver/login_deal.jsp\", login_data)\t\t### deberia devolver verdadero o falso segun se logueo o no", "def steam_login(username, password, two_factor_code):\n steam_client = SteamClient() # Make steam client object\n steam_client.login(username, password, two_factor_code=two_factor_code) # Log in\n if not steam_client.logged_on: # Login failed\n raise SteamLoginException('Login failed.')\n return steam_client", "def login(self):\n self.open(base_url + '/login')\n self.type(\"#email\", test_user.email)\n self.type(\"#password\", test_user.password)\n self.click('input[type=\"submit\"]')", "def login(self, username=None, password=None):\n if not username:\n username = self.email\n if not password:\n password = self.password\n var = {\n 'client': self.client,\n 'accountType': 'HOSTED_OR_GOOGLE',\n 'service': 'reader',\n 'Email': username,\n 'Passwd': password\n }\n # do login\n self.auth_code = \"\"\n resp1 = self.make_request(url_login, var, use_get=False)\n self.auth_code = resp1['Auth']\n self._logger.info(u\"Logged in as {}\".format(username))", "def login(self):\n\n # Fetch and parse hidden inputs from login page\n # Use specific CA bundle to fix SSL verify problems if set as env.\n verify = True\n\n override_ca_bundle = os.getenv('OVERRIDE_CA_BUNDLE')\n if override_ca_bundle:\n verify = override_ca_bundle\n\n req = self.session.get(self.BASE_URL + '/im/login/privat',\n verify=verify)\n\n # Get the login form\n soup = BeautifulSoup(req.content, 'html.parser')\n login_form = soup.select('#pPin_form')\n\n # Post login to current URL\n login_post_url = req.url\n\n # Build POST data with login settings and hidden inputs\n data = self._hidden_inputs_as_dict(login_form)\n data['pPin_inp'] = self.personal_identity_number\n data['pPinKod_inp'] = self.pin_code\n\n # Login request\n req = self.session.post(login_post_url, data=data)\n self.last_req_body = req.content\n\n self._parse_tokens(req.text)\n\n return True", "def login(self, request):\n request.session['state'] = state = uuid.uuid4().hex\n auth_url = flat_url(\n PROVIDER_AUTH_URL,\n client_id=self.consumer_key,\n response_type='code',\n state=state\n )\n return HTTPFound(location=auth_url)", "def login_leetcode(driver):\n login_url = \"https://leetcode.com/accounts/login/\"\n name_field_id = \"id_login\"\n password_field_id = \"id_password\"\n login_button_selector = 'button.btn-primary'\n driver.get(login_url)\n time.sleep(2)\n username_ele = driver.find_element_by_id(name_field_id)\n password_ele = driver.find_element_by_id(password_field_id)\n username_ele.send_keys(username)\n password_ele.send_keys(password)\n driver.find_element_by_css_selector(login_button_selector).click()", "def login(self):\n # create auth payload\n payload = '{{\"grant_type\": \"password\", \"username\": \"{}\", \"password\": \"{}\"}}'.format(\n self.username, self.password)\n auth_headers = {**FTDClient.headers}\n r = requests.post(\"https://{}:{}/api/fdm/{}/fdm/token\".format(self.server_address, self.server_port, self.version),\n data=payload, verify=False, headers=auth_headers)\n if r.status_code == 400:\n raise Exception(\"Error logging in: {}\".format(r.content))\n try:\n # This token will act as the\n self.access_token = r.json()['access_token']\n # cache the original token in case we do a custom login\n self.original_access_token = self.access_token\n except:\n logging.error(\n f'Unable to log into server: https://{self.server_address}:{self.server_port}')\n raise", "def login(self) -> None:\n\n sObj = Splitwise(self.consumer_key, self.consumer_secret)\n self.url, self.login_secret = sObj.getAuthorizeURL()\n print(self.url)\n self.oauth_token = input('token: ')\n self.oauth_verifier = input('verifier: ')", "async def login_access_token(\n form_data: OAuth2PasswordRequestForm = Depends()\n):\n user = await crud.user.authenticate(\n username=form_data.username, password=form_data.password\n )\n if not user:\n raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail=\"Incorrect credentials\")\n elif not user.is_active:\n raise HTTPException(status_code=HTTP_403_FORBIDDEN, detail=\"Inactive user\")\n elif not user.is_email_verified:\n raise HTTPException(status_code=HTTP_403_FORBIDDEN, detail=\"Please verify your account via email\")\n access_token_expires = timedelta(minutes=config.ACCESS_TOKEN_EXPIRE_MINUTES)\n return {\n \"access_token\": create_access_token(\n data={\"user_id\": user.id}, expires_delta=access_token_expires\n ),\n \"token_type\": \"bearer\",\n }", "def Login(self, username, password, onSuccess, onFailure):\n pass", "def login_access_token(\n db: Session = Depends(get_db),\n form_data: OAuth2PasswordRequestForm = Depends()\n) -> Any:\n user = crud.user.authenticate(\n db, email=form_data.username, password=form_data.password\n )\n if not user:\n raise HTTPException(\n status_code=400, detail=\"Incorrect email or password\")\n elif not crud.user.is_active(user):\n raise HTTPException(status_code=400, detail=\"Inactive user\")\n access_token_expires = timedelta(\n minutes=settings.ACCESS_TOKEN_EXPIRE_MINUTES)\n return {\n \"access_token\": security.create_access_token(\n user.id, expires_delta=access_token_expires\n ),\n \"token_type\": \"bearer\",\n }", "def log_in(codecool):\n\n login = school_view.get_login()\n password = school_view.get_password()\n\n password = utilities.hash_password(password)\n\n users = codecool.managers_list + codecool.administrators_list + codecool.mentors_list + codecool.students_list\n for user in users:\n if user.login == login and user.password == password:\n return user", "def do_login(self, password):\n # Creating JSON string with authentication credentails.\n in_data = ('{{ \"username\":\"{username}\",'\n '\"password\":\"{password}\" }}'\n ).format(\n username=self.pub_user,\n password=password\n )\n\n url = self.base_url + \"/oasis/login\"\n response = self.do_request(url, in_data)\n json_response = json.loads(response.content)\n\n if json_response[\"success\"] == False:\n print(\"Invalid user id or password\")\n else:\n self.cookies = dict(sessionid=response.cookies['sessionid'])\n print(\"You are logged into Mid-tier\")\n\n logger.info( 'Log in response ' + str(response.content))", "def authentication_callback(request):\n code = request.GET.get('code')\n user = authenticate(token=code, request=request)\n if user:\n auth_login(request, user)\n set_session_from_user(request, user)\n region = request.user.endpoint\n region_name = dict(Login.get_region_choices()).get(region)\n request.session['region_endpoint'] = region\n request.session['region_name'] = region_name\n url = getattr(settings, \"LOGIN_REDIRECT_URL\", \"/\")\n resp = HttpResponseRedirect(url)\n\n return resp", "def login():\n guid = uuid.uuid4() # guid used to only accept initiated logins\n session['state'] = guid\n return msgraphapi.authorize(callback=url_for('authorized', _external=True), state=guid)", "def code_login(ui, repo, **opts):\n\tMySend(None)", "def auth():\n\tcode = request.query.code\n\tauth = 'https://foursquare.com/oauth2/access_token'\n\tparams = dict(\n\t\tclient_id=CLIENT_ID,\n\t\tclient_secret=CLIENT_SECRET,\n\t\tgrant_type='authorization_code',\n\t\tredirect_uri=REDIRECT_URI,\n\t\tcode=code\n\t)\n\tauth_says = fetch('%s?%s'%(auth, urlencode(params)))\n\tauth_response = json.loads(auth_says.content)\n\tif 'access_token' in auth_response:\n\t\toauth_token=auth_response['access_token']\n\t\tresponse.set_cookie('user', oauth_token, secret=CLIENT_SECRET)\n\t\tlogging.info('new oauth_token:%s'%oauth_token)\n\t\tredirect('/')\n\telse:\n\t\tlogging.error(auth_response)\n\t\tabort()", "def login(self):\n\t\treturn", "def login(self):\n url = 'https://ngb.to/login.php?do=login'\n\n params = {'do': 'login'}\n payload = {'vb_login_username': self.username,\n 'vb_login_password': self.password,\n 'url': \"index.php\",\n 'do': \"login\",\n 'vb_login_md5password': \"\",\n 'vb_login_md5password_utf': \"\",\n 's': \"\",\n 'securitytoken': \"guest\",\n 'cookieuser': \"1\"}\n\n self.session.post(url, data=payload, params=params)", "def einloggen(self):\n \n self.c.login(self.username.text(), self.password.text(), \"1\")", "def login():\n req = request.get_json(force=True)\n username = req.get('username', None)\n password = req.get('password', None)\n user = guard.authenticate(username, password)\n ret = {'access_token': guard.encode_jwt_token(user)}\n return ret, 200", "def do_login_login():\n print(inspect.stack()[1][3])\n print(request.form)\n query = select([User]).where(and_(User.columns.email == request.form['email'],User.columns.password==request.form['password'] ))\n ResultProxy = connection.execute(query)\n ResultSet = ResultProxy.fetchone()\n if ResultSet:\n session['logged_in'] = True\n else:\n flash('wrong password!')\n # return str(get_flashed_messages())\n return home(result)", "def login_access_token(form_data: OAuth2PasswordRequestForm = Depends()):\n user = auth_handler.authenticate_user(\n username=form_data.username, password=form_data.password\n )\n if user is None:\n raise HTTPException(\n detail=\"Incorrect username and/or password\", status_code=400\n )\n\n return APIResponse(\n msg=TokenResponse(\n access_token=auth_handler.encode_token(user.id), token_type=\"bearer\"\n )\n )", "def login():\n # if we are already logged in, go back to were we came from\n if auth.is_authenticated():\n return flask.redirect(oid.get_next_url())\n return oid.try_login(\"https://www.google.com/accounts/o8/id\",\n ask_for=['email', 'fullname', 'nickname'])", "def login():\n data = request.get_json()\n if 'username' in data and 'password' in data:\n username = data['username']\n password = data['password']\n access_token = authenticate(username, password)\n if access_token is not None:\n print('access token: ' + access_token)\n return jsonify({'access_token': access_token})\n else:\n abort(403)\n else:\n abort(400)", "def authenticate_with_github(username=None, password=None, code=None):\n if username is not None and password is not None:\n print(' (auth given as {}:{})'.format(username, '*'*len(password)))\n\n def _2fa_func():\n return code\n\n if code:\n return login(username, password, two_factor_callback=_2fa_func)\n else:\n return GitHub(username, password)", "def login(self):\n return self.client.login(username='Georgie', password='12345678')", "async def login(form_data: OAuth2PasswordRequestForm = Depends()):\n db = get_database()\n\n user = await crud.user.authenticate(\n db, username=form_data.username, password=form_data.password\n )\n\n if not user:\n raise HTTPException(\n status_code=HTTP_400_BAD_REQUEST, detail=\"Incorrect email or password\"\n )\n elif not crud.user.is_active(user):\n raise HTTPException(\n status_code=HTTP_400_BAD_REQUEST, detail=HTTP_400_BAD_REQUEST_INACTIVE_USER\n )\n\n access_token_expires = timedelta(minutes=config.ACCESS_TOKEN_EXPIRE_MINUTES)\n\n return {\n \"access_token\": create_access_token(\n data={\"username\": user.username}, expires_delta=access_token_expires\n ),\n \"token_type\": \"bearer\",\n }", "def login():\n guid = uuid.uuid4() # guid used to only accept initiated logins\n session['state'] = guid\n return msgraphapi.authorize(callback=url_for('authorized', _external=True), state=guid)", "def test_auth_code_positive(self, api):\n self.builder.add_user(api.get_user())\n resp = api.login_user(api.get_user().username, api.get_user().password)\n self.builder.del_user(api.get_user())\n assert resp.status_code == 200", "def login_user():\n pass", "def loginAsManager(self):\n self.browser.open('http://nohost/plone/')\n self.browser.getLink('Log in').click()\n self.browser.getControl('Login Name').value = 'root'\n self.browser.getControl('Password').value = 'secret'\n self.browser.getControl('Log in').click()", "async def login(form_data: OAuth2PasswordRequestForm = Depends()):\n user = get_user_info(form_data.username)\n if user == None:\n raise HTTPException(status_code=404, detail=\"Incorrect username or password\")\n hashed_password = simple_hash(form_data.username, form_data.password)\n if not hashed_password == user.password:\n raise HTTPException(status_code=400, detail=\"Incorrect username or password\")\n\n return {\"access_token\": user.name, \"token_type\": \"bearer\"}", "def by_code(request):\n\n requestData = {}\n serializer_context = {\n 'request': request,\n }\n requestData['access_code'] = request.data['access_code']\n requestData['prefix'] = request.data['prefix']\n # Notice here that we do not call `serializer.save()` like we did for\n # the registration endpoint. This is because we don't have\n # anything to save. Instead, the `validate` method on our serializer\n # handles everything we need.\n\n serializer = LoginByCodeSerializer(data=requestData,context=serializer_context)\n serializer.is_valid(raise_exception=True) # Retorna error de tipo 400\n # Responde con los datos que ya incluye el token de autenticación que fue agregado x el serializador\n return Response(serializer.validated_data, status=status.HTTP_200_OK)", "def login():\n token = generate_token()\n session['token'] = token\n return redirect(esisecurity.get_auth_uri(\n scopes=['esi-wallet.read_character_wallet.v1 esi-characters.read_contacts.v1 esi-characters.write_contacts.v1'],\n state=token,\n ))", "def login(userID, robotID, password): #@NoSelf", "def login(login_info):\n url = 'http://www.jintiankansha.me/account/signin'\n client = requests.session()\n # Retrieve the CSRF token first\n client.get(url) # sets the cookie\n client.cookies\n xsrf = client.cookies.items()[1]\n login_info[xsrf[0]] = xsrf[1]\n r = client.post(url, data=login_info)\n \n if r.status_code == 200:\n return client\n else:\n return None", "async def login_for_access_token(\n form_data: OAuth2PasswordRequestForm = Depends()\n):\n user = authenticate_user(form_data.username, form_data.password)\n if not user:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Incorrect username or password\",\n headers={\"WWW-Authenticate\": \"Bearer\"},\n )\n access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)\n access_token = create_access_token(\n data={\"sub\": user.username}, expires_delta=access_token_expires\n )\n return {\"access_token\": access_token, \"token_type\": \"bearer\"}", "def login():\n\n username = str(request.parsed_json['username'])\n password = str(request.parsed_json['password'])\n\n if not auth.check_password(username, password):\n return create_error(401, \"login failed\")\n\n return auth.create_auth_token(username, password), 200", "def login():\n domain = parser[\"learningmachine\"][\"domain\"]\n secrets_file = \"{}/{}\".format(dir_path, \"client_secret.json\")\n scope = \"https://www.googleapis.com/auth/userinfo.email\"\n redirect_uri = \"http://{}/login\".format(domain)\n login_handler = LoginHandler(secrets_file, scope, redirect_uri)\n\n if \"code\" in request.args:\n login_handler.setup_user_info(request.args[\"code\"])\n session[\"email\"] = login_handler.email\n session[\"display_name\"] = login_handler.display_name\n\n if not fm.user_exists(login_handler.email):\n msg = \"Adding user: {} with ID of {} to the database.\"\\\n .format(login_handler.email, login_handler.display_name)\n fm.add_user(login_handler.email, login_handler.display_name)\n\n msg = \"Sending user: {} to main page\".format(login_handler.email)\n app.logger.info(msg)\n return redirect(\"/static/main.html\")\n\n else:\n msg = \"No login code yet. Letting Google handle the login process at: {}\"\\\n .format(login_handler.auth_url)\n app.logger.info(msg)\n return redirect(login_handler.auth_url)", "def login():\n # Force https redirects when behind a proxy (required on Azure only)\n if 'APPINSIGHTS_INSTRUMENTATIONKEY' in os.environ: # set on Azure\n redirect_uri = url_for('authorize', _external=True, _scheme='https')\n else:\n redirect_uri = url_for('authorize', _external=True)\n\n # As this sample does not have a user session management with loging out, this\n # endpoint always starts a new authorization code flow, without detecting if the user\n # might be already logged in.\n\n return oauth.tapkey.authorize_redirect(redirect_uri)", "def attempt_login(self, expected_code, **kwargs):\r\n url = reverse('openid-provider-login')\r\n post_args = {\r\n \"openid.mode\": \"checkid_setup\",\r\n \"openid.return_to\": \"http://testserver/openid/complete/?janrain_nonce=2013-01-23T06%3A20%3A17ZaN7j6H\",\r\n \"openid.assoc_handle\": \"{HMAC-SHA1}{50ff8120}{rh87+Q==}\",\r\n \"openid.claimed_id\": \"http://specs.openid.net/auth/2.0/identifier_select\",\r\n \"openid.ns\": \"http://specs.openid.net/auth/2.0\",\r\n \"openid.realm\": \"http://testserver/\",\r\n \"openid.identity\": \"http://specs.openid.net/auth/2.0/identifier_select\",\r\n \"openid.ns.ax\": \"http://openid.net/srv/ax/1.0\",\r\n \"openid.ax.mode\": \"fetch_request\",\r\n \"openid.ax.required\": \"email,fullname,old_email,firstname,old_nickname,lastname,old_fullname,nickname\",\r\n \"openid.ax.type.fullname\": \"http://axschema.org/namePerson\",\r\n \"openid.ax.type.lastname\": \"http://axschema.org/namePerson/last\",\r\n \"openid.ax.type.firstname\": \"http://axschema.org/namePerson/first\",\r\n \"openid.ax.type.nickname\": \"http://axschema.org/namePerson/friendly\",\r\n \"openid.ax.type.email\": \"http://axschema.org/contact/email\",\r\n \"openid.ax.type.old_email\": \"http://schema.openid.net/contact/email\",\r\n \"openid.ax.type.old_nickname\": \"http://schema.openid.net/namePerson/friendly\",\r\n \"openid.ax.type.old_fullname\": \"http://schema.openid.net/namePerson\",\r\n }\r\n # override the default args with any given arguments\r\n for key in kwargs:\r\n post_args[\"openid.\" + key] = kwargs[key]\r\n\r\n resp = self.client.post(url, post_args)\r\n code = expected_code\r\n self.assertEqual(resp.status_code, code,\r\n \"got code {0} for url '{1}'. Expected code {2}\"\r\n .format(resp.status_code, url, code))", "def login():\n data = request.get_json()\n email = data.get('email')\n password = data.get('pwrd')\n user = SQLModel.get_by_attrs(('email', 'pwrd'), 'users', 'email', email)\n try:\n user_pw = user[0][1]\n user_nick = user[0][0]\n if password == user_pw:\n stuff = SQLModel.get_by_attrs(('login', 'pwrdHash', 'type', 'name'), 'users', 'login', login)\n return jsonify(stuff)\n else:\n return 'fail'\n except:\n return 'fail'", "def login(self):\n # the login url is just api, not api2\n url = 'https://simple-note.appspot.com/api/login'\n query = {'email': self.email, 'password': self.password}\n data = base64.b64encode(urllib.urlencode(query))\n try:\n fh = urllib2.urlopen(url, data)\n self.authtok = fh.read()\n except urllib2.HTTPError, e:\n # Received a non 2xx status code\n raise SimplenoteError('http error: {}'.format(e.code))\n except urllib2.URLError, e:\n # Non http error, like network issue\n raise SimplenoteError('url error: {}'.format(e.reason))\n fh.close()\n return True", "def Login(self):\n self.Send(self.EncryptString('login\\n'))\n\n # 'Please provide your membership number to authenticate:'\n print self.DecryptString(self.Recv(4096))\n\n # Flag 1.\n self.Send(self.EncryptString(self.flag_1))\n\n # 'Ah, I see, sir has a basic account. In that case, your limited cloud\n # execution access has been provisioned.'\n print self.DecryptString(self.Recv(4096))\n\n # 'Will there by anything else today, sir?'\n print self.DecryptString(self.Recv(4096))", "def perform_login(self, user_name, user_pass):\n if self.api_type == 'real':\n self.input_user_name(user_name)\n self.input_user_pass(user_pass)\n self.click_login_button()", "def login(self, username: str, password: str):\n\n self.driver.get(\"https://myrecsports.usc.edu/booking\")\n self.driver.find_element_by_id(\"loginLink\").click()\n self.driver.find_element_by_id(\"frmExternalLogin\")\n self.driver.execute_script(\"submitExternalLoginForm('Shibboleth')\")\n self.driver.find_element_by_id(\"username\").send_keys(\"noahbkim\")\n self.driver.find_element_by_id(\"password\").send_keys(\"she once was a true love of mine\")\n self.driver.find_element_by_name(\"_eventId_proceed\").click()\n self.driver.find_element_by_id(\"logoutForm\")", "def login():\n google = oauth.create_client('google') # create the google oauth client\n redirect_uri = url_for('authorize', _external=True)\n return google.authorize_redirect(redirect_uri)", "def login():\n authorization_url, state = facebook.authorization_url(authorization_base_url)\n print 'Please authorize', authorization_url\n\n return redirect(authorization_url, code=302)", "def login(self):\n\t\tbot = self.bot\n\t\tbot.get(URL)\n\t\ttime.sleep(2)\n\t\tsign_in = bot.find_element_by_class_name(\"nav__button-secondary\").click()\n\t\ttime.sleep(2)\n\t\temail = bot.find_element_by_id(\"username\")\n\t\temail.send_keys(self.username)\n\t\ttime.sleep(2)\n\t\tpassword = bot.find_element_by_id(\"password\")\n\t\tpassword.send_keys(self.password)\n\t\ttime.sleep(2)\n\t\tsign_in = bot.find_element_by_class_name(\"btn__primary--large.from__button--floating\").click()", "def _login_token(self):\n data = {\n 'cmd': 'login',\n 'login': self.username,\n 'password': self.password,\n }\n \n token = self.helper._post_request(\n self.basename,\n self.basic_auth,\n data, \n self.headers)\n\n if token.status_code == 200:\n xml_response = BeautifulSoup(token.content, 'lxml')\n self.token = xml_response.find('token').get_text()\n self.cookies = token.cookies.get_dict()\n else:\n raise Exception('[FAIL] Could not login to OpenVAS')", "def login_action(login_page, request, driver):\n login_page.login(request.config.getoption(\"--username\"), request.config.getoption(\"--password\"))", "def submit_log_in(self, user_name, password, passcode):\r\n username_txt = user_name.get()\r\n password_txt = password.get()\r\n passcode_txt = passcode.get()\r\n self.my_socket.send(dumps(\"log in\"))\r\n now = datetime.now()\r\n current_time = now.strftime(\"%H:%M:%S\").split(\":\")\r\n time = current_time[0] + current_time[1]\r\n str_log_in = username_txt + \";\" + password_txt + \";\" + passcode_txt + \";\" + time\r\n self.my_socket.send(self.rsa_object.encrypt(str_log_in.encode(), self.server_key))\r\n\r\n response = self.rsa_object.decrypt(self.my_socket.recv(1024)).decode()\r\n if response == \"access granted\":\r\n self.username = username_txt\r\n self.choose_path()\r\n else:\r\n if self.log_in_tries == 2:\r\n self.log_in_tries = 0\r\n self.after_3_wrong_attempts(\"log in\")\r\n else:\r\n self.log_in_tries += 1\r\n lbl_response = Label(self.root, text=response, font=self.title_font,\r\n bg=self.bg_color)\r\n lbl_response.pack(pady=5, padx=10)\r\n lbl_response.after(1000, lbl_response.destroy)\r\n user_name.delete(0, END)\r\n password.delete(0, END)\r\n passcode.delete(0, END)", "def do_login(opener, login_info, connection_name, username, password):\n login_payload = {\n \"client_id\": CLIENT_ID,\n \"connection\": connection_name,\n \"password\": password,\n \"popup_options\": \"{}\",\n \"protocol\": \"samlp\",\n \"redirect_uri\": \"https://signin.aws.amazon.com/saml\",\n \"response_type\": \"code\",\n \"scope\": \"openid profile email\",\n \"sso\": True,\n \"state\": login_info[\"state\"],\n \"tenant\": TENANT,\n \"username\": username,\n \"_csrf\": login_info[\"_csrf\"],\n \"_intstate\": \"deprecated\"\n }\n login_payload_json = json.dumps(login_payload).encode()\n # print(login_payload)\n\n headers = {\n \"Content-Type\": \"application/json\",\n \"Origin\": \"https://%s.auth0.com\"%TENANT,\n \"Auth0-Client\": AUTH0_CLIENT_HEADER\n }\n request = urllib.request.Request(\n \"https://%s.auth0.com/usernamepassword/login\"%TENANT,\n data=login_payload_json,\n method=\"POST\",\n headers=headers)\n try:\n response = opener.open(request)\n result = response.read().decode()\n except urllib.error.HTTPError as e:\n error = e.read().decode()\n raise RuntimeError(\"Login error: %s\"%error) from None\n # print(result)\n\n # if success we will get a form in html, post it\n parser = FormParser()\n parser.feed(result)\n callback_params = urllib.parse.urlencode(parser.fields).encode()\n headers = {\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"Origin\": \"https://%s.auth0.com\"%TENANT\n }\n request = urllib.request.Request(\n parser.action,\n data=callback_params,\n method=parser.method.upper(), # post => POST\n headers=headers)\n try:\n response = opener.open(request)\n result = response.read().decode()\n except urllib.error.HTTPError as e:\n error = e.read().decode()\n raise RuntimeError(\"Login callback error: %s\"%error) from None\n # print(result)\n\n mfa_info = {\n \"mfaServerUrl\": re.search(\"mfaServerUrl:\\s*?\\\"(.+?)\\\"\", result).group(1),\n \"requestToken\": re.search(\"requestToken:\\s*?\\\"(.+?)\\\"\", result).group(1),\n \"postActionURL\": re.search(\"postActionURL:\\s*?\\\"(.+?)\\\"\", result).group(1),\n \"globalTrackingId\": re.search(\"globalTrackingId:\\s*?\\\"(.+?)\\\"\", result).group(1),\n }\n # print(mfa_info)\n\n return mfa_info", "def login(self):\r\n\r\n # Open browser with the login URL\r\n self.browser.open(self.config[\"base_url\"] + \"login\")\r\n\r\n # Select the login form\r\n self.browser.select_form('form[action=\"/login/\"]')\r\n\r\n # Fill the login form.\r\n self.browser[\"email\"] = self.config[\"email\"]\r\n self.browser[\"password\"] = self.config[\"password\"]\r\n\r\n # Submit form\r\n self.browser.submit_selected()", "def login(self):\n self.client.login(username=self.user.username, password='test')", "def step_impl_1(context, username, pwd):\n\n br = context.browser\n br.get(context.server_url + '/accounts/login/')\n\n user = br.find_element_by_id(\"username\")\n pswd = br.find_element_by_id(\"password\")\n\n user.send_keys(username)\n pswd.send_keys(pwd)\n br.find_element_by_id(\"submit\").click()", "def login(self, email, password):\r\n self.provide_info(email, password)\r\n self.submit()", "def login(self):\n url = self._root + self._routes[\"login\"]\n self.r = self.reqsession.get(url) \n if self.r.url == 'https://console.zerodha.com/dashboard':\n cookies = self.reqsession.cookies.get_dict('console.zerodha.com')\n self.console_session = cookies['session']\n self.public_token = self.reqsession.cookies['public_token']\n return True\n else:\n raise Exception(\"Login failed or Kite session expired\")", "def login(self, returnToURL):\r\n\t\tif not self.request().hasValue('userId') or \\\r\n\t\t\tnot self.request().hasValue('password'):\r\n\t\t\tself.response().sendRedirect('/nova/login.psp?returnToURL=%s' % returnToURL)\r\n\t\t\treturn 0\r\n\t\treturn 1", "def registrieren(self):\n self.c.login(self.username.text(), self.password.text(), \"0\")", "def _login(self):\n data = self._send(self.nc_request(action=\"login\", parameters={\"apipassword\": self._api_password}))\n\n self._session_id = data[\"apisessionid\"]\n\n logging.info(f\"logged in successfully with session id {self._session_id}\")", "def master_login():\n\n master_key = get_master_key().decode()\n login_master = input(\"\\nEnter your master password to begin using Password Manager: \")\n\n if login_master == master_key:\n\n print(\"Access granted!\\n\")\n access_granted = True\n\n return access_granted\n\n else:\n\n print(\"Uh oh, that is not your master password. Try again.\")\n return master_login()", "def login(self, username, password):\n\t\turl = \"https://habitica.com/api/v3/user/auth/local/login\"\n\t\tpayload = {\"username\": username, \"password\": password}\n\t\treturn(postUrl(url, self.credentials, payload))", "def login():\n token = request.form.get('idtoken')\n if verify_token(token):\n session['logged_in'] = True\n return '', 204\n else:\n return '', 401", "def callback():\n # state variable is not used yet but should be used to invalidate the\n # session on incorrect match per client.\n state = request.args.get('state')\n code = request.args.get('code')\n error = request.args.get('error')\n if error:\n return \"Error: \" + error\n headers = {\n 'accept': 'application/json',\n 'cache-control': 'no-cache',\n 'content-type': 'application/x-www-form-urlencoded'\n }\n data = {\n 'grant_type': 'authorization_code',\n 'client_id': CLIENT_ID,\n 'redirect_uri': REDIRECT_URI,\n 'code': code,\n 'code_verifier': CODE_VERIFIER\n }\n client_auth = requests.post(TOKEN_URL, headers=headers, data=data)\n client_json = client_auth.json()\n session['access_token'] = client_json[\"access_token\"]\n return redirect(url_for('.methods'))", "def login(self, **kwargs):\n\tusername = kwargs.get('username', self.username)\n\tif not username:\n\t raise RuntimeError, 'no username provided'\n\n\tpassword = kwargs.get('password', self.password)\n\tif not password:\n\t raise RuntimeError, 'no password provided'\n\tself.call('login', username=username, password=password)", "def userlogin(self, login, password):\n\n payload = {\n 'PASSWORD': password,\n 'path': \"http://www.gamefaqs.com/\",\n 'EMAILADDR': login,\n }\n\n login_url = 'http://www.gamefaqs.com/user/login'\n\n # Grab key ID\n resp = self.session.get(login_url)\n soup = bs(resp.text, 'html.parser')\n payload['key'] = soup.find('input', class_='hidden')['value']\n\n # Login with user payload\n resp = self.session.post(login_url, data=payload)\n\n soup = bs(resp.text, 'html.parser')\n\n if soup.find_all(string='There was an error while logging you in: '):\n raise Exception('Login Failed!')\n else:\n logging.debug('{} successfully logged in.'.format(self.login))", "def login(self):\n \n try:\n # Create socket connection and connect to the server\n self.sck = socket(AF_INET, SOCK_STREAM)\n self.sck.connect((self.host, self.port))\n self.sck.send(self.alchallenge_packet()) # Send Auth Logon Challenge\n srp_rcvd = self.decode_packet(self.sck.recv(1024)) # Read SRP for sending Logon Proof\n csrp = Srp(srp_rcvd['N'], srp_rcvd['g'], self.I, self.p, srp_rcvd['s'], srp_rcvd['B'])\n # Do some math...\n A = csrp.gen_A()\n u = csrp.gen_u()\n S = csrp.gen_S()\n K = csrp.gen_K()\n M = csrp.gen_M()\n # Let's send Auth Logon Proof\n self.sck.send(self.alproof_packet(M.blittle(), A.blittle()))\n return self.decode_packet(self.sck.recv(1024)).get('login') # 1 if no errors\n except ValueError:\n return 0", "def login(self):\n headers = {'Content-Type': 'application/x-www-form-urlencoded'}\n token_url = urljoiner(self.baseurl, [\"connect/token\"])\n if(self.debug):\n print(token_url)\n new_auth = dict(self.auth_data)\n new_auth['password'] = \"XXXXX\"\n print(\"Authentication Data (without password):\")\n print(new_auth)\n r = requests.post(token_url, data=self.auth_data, headers=headers)\n # New environments do not redirect /rest/connect/token to\n # /auth/connect/token so lets check this case explicitly\n if(r.status_code > 400):\n new_token_url = self.baseurl.rstrip(\n \"/rest\") + \"/auth/connect/token\"\n if(self.debug):\n print(\"cannot connect to: \" + token_url)\n print(\"trying: \" + new_token_url)\n r = requests.post(\n new_token_url,\n data=self.auth_data,\n headers=headers)\n self.last_login = time.time()\n self.handle_error_message(r)\n self.auth_result = r.json()\n access_token = r.json().get('access_token')\n self.headers = {'Authorization': 'Bearer ' + access_token,\n 'Content-Type': 'application/json'}\n # Always relogin when time remaining on the current token is in between 1 min and 3 min\n self.refresh_window = min(max(60, 0.01 * self.auth_result['expires_in']), 180)", "def login(username, password, \n get_steamguard_code = get_steamguard_code_manual, \n solve_captcha = solve_captcha_manual,\n max_tries = 5):\n response_dict = auth.get_rsa_key(username)\n\n # The RSA information is encoded as hex strings.\n # Transform to integers.\n rsa_mod = int(response_dict['publickey_mod'], 16)\n pub_exp = int(response_dict['publickey_exp'], 16)\n\n encrypted_password = auth.get_encrypted_password(password, rsa_mod, pub_exp)\n timestamp = response_dict['timestamp']\n\n gid = ''\n text = ''\n email_auth = ''\n are_we_logged_in = False\n tries = 0\n while not are_we_logged_in and tries < max_tries:\n tries += 1\n response_dict = auth.do_login(username, encrypted_password, timestamp, email_auth, gid, text)\n if 'captcha_needed' in response_dict:\n text = solve_captcha(gid)\n elif 'emailauth_needed' in response_dict:\n email_auth = get_steamguard_code_manual()\n elif 'success' in response_dict and response_dict['success']:\n are_we_logged_in = True\n else:\n print response_dict\n raise Exception(\"I don't understand this state!\")\n\n if tries >= max_tries:\n raise Exception(\"Too many tries!\")\n\n return response_dict" ]
[ "0.7269786", "0.7269786", "0.70734197", "0.70426136", "0.6852028", "0.6803463", "0.67843354", "0.6739689", "0.6730675", "0.6720502", "0.6683724", "0.6669751", "0.66376764", "0.65688664", "0.65634507", "0.6525562", "0.651685", "0.651685", "0.649879", "0.64984405", "0.6491229", "0.64458877", "0.64118016", "0.64095795", "0.6394115", "0.6380953", "0.6379928", "0.6367268", "0.63606215", "0.6346407", "0.6345381", "0.6342872", "0.63404155", "0.6336025", "0.6334202", "0.6326229", "0.63205093", "0.6319115", "0.6312457", "0.6311932", "0.63072616", "0.6306049", "0.6296511", "0.6289371", "0.6282049", "0.62760425", "0.6274943", "0.62741256", "0.62700075", "0.6263721", "0.626188", "0.6257704", "0.6244173", "0.62358254", "0.6228825", "0.62197936", "0.62140876", "0.621091", "0.61955506", "0.61881375", "0.61851555", "0.6184508", "0.6176529", "0.6174516", "0.61707604", "0.6165689", "0.61651385", "0.61583585", "0.6150929", "0.6150226", "0.61473256", "0.6139042", "0.61357063", "0.6134327", "0.61292017", "0.61273146", "0.6126926", "0.6126703", "0.61242807", "0.61187756", "0.61114496", "0.6111142", "0.6107284", "0.6107003", "0.6106689", "0.61054415", "0.6098375", "0.609553", "0.60946375", "0.60812914", "0.60756177", "0.6069772", "0.6064025", "0.60483456", "0.6039952", "0.60351694", "0.6033701", "0.6031333", "0.60302496", "0.6029842" ]
0.74460435
0
View for all employees (in company) or for current user dependent on employee role
def all_employees(request, company_id=None): current_employee = Employee.objects.get(user__pk=request.user.pk) company_super_user = current_employee.isCompanySuperUserOrHigher() if company_id: company = Company.objects.get(pk=company_id) else: company = current_employee.company if not current_employee.isEnsoUser() and current_employee.company.pk != company.pk: raise PermissionDenied() change_company_form = ChangeCompanyForm(initial=dict(company=company)) return TemplateResponse( request, 'all_employees.html', { 'user': request.user, 'company_super_user': company_super_user, 'company': company, 'change_company_form': change_company_form, } )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_all_employees():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employees = Employee.query.all()\n \n \n ## right now this is ALL users... \n \n return render_template(\"employee_display.html\", employees = employees)", "def get_manager_employees(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n manager_employees = Employee.objects.filter(manager=current_employee, development_plan_type=None).all()\n if manager_employees:\n emp_list=[]\n for emp in manager_employees:\n emp_data={}\n emp_data[\"id\"] = emp.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"manager_id\"] = emp.manager.id\n # emp_data[\"status_questions\"] = emp.status_questions\n # employee_role = EmployeeRole.objects.filter(employee=emp).all()\n # name_role_list = []\n # for obj in employee_role:\n # name_role_list.append(obj.role.name)\n # emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n data = {\"employees:\": emp_list}\n return JsonResponse(status=201, data=data)\n else:\n return JsonResponse(\"The user with id={} isn't a manager for any user\".format(current_employee.user.id),\n status=404)", "def get_queryset(self, request):\n return models.Employee.objects.exclude(username='root')", "def employees():\n # gather data from db about all employees\n return render_template(\"employees.html\")", "def show_all_employees(self):\n try:\n employees = self.admin_repository.show_all_employees()\n if employees:\n for employee in employees:\n print(\"Employee Id : {}\".format(employee[0]))\n print(\"Name : {}\".format(employee[1]))\n print(\"Email : {}\".format(employee[2]))\n print(\"----------------------------\")\n return True\n else:\n print(\"No records found.\")\n return False\n except Exception as e:\n print(\"Some Error occurred.Please try again\")\n return False", "def index(request):\n\n context = {'employees': User.objects.select_related('profile').filter(is_staff=True).order_by('first_name')}\n return render(request, 'Employees/index.html', context)", "def list(self, request):\n employee = self.controller.retrieve_all_employees()\n serializer = data_serializers.PresentEmployeeDataSerializer(employee, many=True)\n return Response(serializer.data)", "def employee(employee_id):\n # gather data from db about all employees\n return render_template(\"employee.html\",\n employee_id=employee_id)", "def current_employee(self, request: Request) -> Response:\n serializer = self.get_serializer_class()\n serializer = serializer(request.user, context={'request': request})\n return Response(serializer.data)", "def get(self):\n resultado = EmployeeModel.query.all()\n return resultado", "def edit_employee(request, employee_id):\n employee = Employee.objects.get(pk=int(employee_id))\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n\n assert isinstance(employee, Employee)\n assert isinstance(current_employee, Employee)\n\n # if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n # raise PermissionDenied()\n\n if not current_employee.hasAccessTo(employee):\n raise PermissionDenied()\n\n form = EditEmployeeForm(request.user, employee, {\n 'first_name': employee.user.first_name,\n 'last_name': employee.user.last_name,\n 'email': employee.user.email,\n 'manager': employee.manager.id if employee.manager else 0,\n 'language_code': employee.language_code,\n # 'development_plan_type': employee.development_plan_type.id,\n 'is_manager': employee.is_manager\n })\n if 'manager' in form.fields:\n managerQS = Employee.objects.filter(is_manager=True, company__pk=employee.company.pk)\n form.fields['manager'].queryset = managerQS\n # form.fields['development_plan_type'].queryset = DevelopmentPlanType.objects.filter(\n # Q(company__pk=employee.company.pk) | Q(company__isnull=True)\n # )\n is_me = employee.user.pk == request.user.pk\n return TemplateResponse(\n request,\n 'mus/edit_employee_form.html',\n {\n 'edit_employee_form': form,\n 'employee_id': employee_id,\n 'me': is_me,\n 'name': employee.user.get_full_name()\n }\n )", "def get_queryset(self):\n #print(\"request\", self.request)\n user = self.request.user\n return Experience.objects.filter(person=user)", "def action_list(request, employee_id=None):\n if employee_id:\n employee = Employee.objects.get(pk=employee_id)\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n raise PermissionDenied()\n else:\n employee = request.user.employee_user.first()\n actions = employee.action_set.all()\n return TemplateResponse(\n request,\n 'mus/action_list.html',\n dict(\n actions=actions,\n employee=employee\n )\n )", "def employee_detail(request, employee_id):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n employee = Employee.objects.get(pk=int(employee_id))\n if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n raise PermissionDenied()\n actions = employee.action_set.all()\n if not current_employee.pk == int(employee_id):\n if not current_employee.is_manager or not current_employee.company.pk == employee.company.pk:\n if not current_employee.isCompanySuperUserOrHigher():\n return HttpResponse('unauthorized', status=401)\n\n user_files = get_files_for_employee(employee_id)\n\n if request.method == 'POST':\n\n upload_form = UploadFileToEmployeyForm(request.POST, request.FILES)\n form = EmployeeNoteForm(request.POST, instance=employee)\n\n if 'upload' in request.POST:\n if upload_form.is_valid():\n upload_form.handle_upload(employee_id, request.FILES['file'])\n\n return HttpResponseRedirect('/employee/show/{}?upload_status=ok#file-list'.format(employee_id))\n\n else:\n if form.is_valid():\n form.save(request.user, employee)\n return HttpResponseRedirect('/employee/show/%d' % form.instance.pk)\n\n else:\n form = EmployeeNoteForm(instance=employee)\n upload_form = UploadFileToEmployeyForm()\n data = {}\n data[\"first_name\"] = employee.user.first_name\n data[\"last_name\"] = employee.user.last_name\n data[\"email\"] = employee.user.email\n data[\"is_manager\"] = employee.is_manager\n data[\"language_code\"] = employee.language_code\n employee_role = EmployeeRole.objects.filter(employee=employee).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n data[\"roles\"] = name_role_list\n return JsonResponse(status=201, data=data)\n # return TemplateResponse(\n # request,\n # 'mus/detail.html',\n # {\n # 'actions': actions,\n # 'employee': employee,\n # # 'development_plans': development_plans,\n # 'form': form,\n # 'upload_form': upload_form,\n # 'user_files': user_files\n # }\n # )", "def get_queryset(self):\n\n user = self.request.user\n\n if user.is_authenticated and user.role == 'LA':\n # check if the user is a landville admin and return all records\n # even soft deleted ones\n return PropertyEnquiry.objects.all()\n\n if user.is_authenticated and user.role == 'CA':\n # if the user is a client admin, return only his records\n employer = user.employer.first()\n return PropertyEnquiry.active_objects.for_client(client=employer)\n\n # if the user is a buyer, return also only his enquiries\n return PropertyEnquiry.active_objects.for_user(user=user)", "def show_all_information():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employees = Employee.query.all()\n all_certs = employee_certification.query.all()\n \n return render_template(\"admin.html\", employees = employees, all_certs = all_certs)", "def show_overview_of_all_employees(self):\n\n print(\"OVERVIEW OF EMPLOYEES\\n\")\n\n employees_ob_list = self.llapi.get_employee_overview()\n \n for employee_ob in employees_ob_list:\n print(employee_ob.print_info_in_line(\"*\"))\n \n print(f\"\\nNAN AIR has {len(employees_ob_list)} employees\")\n\n print(\"\\nB Back\\n\")\n\n action_str = self.choose_action([\"b\"])\n while action_str == False:\n action_str = self.choose_action([\"b\"])\n\n if action_str == \"b\":\n return", "def employee():\n return Response(render_template('employee/employee.html'))", "def get_role_from_rolequeryset(self, role):\n role = super().get_role_from_rolequeryset(role=role)\n\n requesting_appname = self.request.cradmin_app.appname\n if requesting_appname in ['qualifiesforexam', 'overview_all_results']:\n if self.period_admin_access_semi_anonymous_assignments_restricted(period=role):\n raise Http404()\n\n return role", "def list(self, request):\n varemployee = employee.objects.all()\n serializer = employeeSerializer(varemployee, many=True)\n return Response(serializer.data)", "def get_queryset(self):\n qs = super().get_queryset()\n qs.filter(company=self.request.user.company)\n return qs", "def stars_employee_list(request, employee_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n employee_stars = Star.objects.filter(to_user=employee)\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(employee_stars, request)\n serializer = StarSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)", "def employees_manager(request):\n # current_employee = Employee.objects.get(user__pk=request.user.pk)\n manager_list = Employee.objects.filter(manager=request.user.employee_user, is_manager=True)\n employee = Employee.objects.get(pk=request.user.employee_user.id)\n employee_dict = model_to_dict(employee)\n employee_dict['first_name'] = employee.user.first_name\n employee_dict['last_name'] = employee.user.last_name\n employee_dict['photo'] = employee.photo.url if employee.photo else ''\n print employee_dict\n if len(manager_list) > 0:\n result_list = list(manager_list)\n all_managers_list = found_all_managers(manager_list, result_list)\n else:\n data = {\"employee_managers\": employee_dict}\n return JsonResponse(data=data, content_type='application/json', safe=False)\n employees = list()\n for manager in all_managers_list:\n manager_dict = model_to_dict(manager)\n manager_dict['first_name'] = manager.user.first_name\n manager_dict['last_name'] = manager.user.last_name\n manager_dict['photo'] = manager.photo.url if manager.photo else ''\n employees.append(manager_dict)\n employees.append(employee_dict)\n\n data = {\"employee_managers\": employees}\n return JsonResponse(data=data, content_type='application/json', safe=False)", "def list(self, request):\n teams = self.controller.retrieve_all_teams_employees()\n serializer = data_serializers.PresentTeamEmployeeDataSerializer(teams, many=True)\n return Response(serializer.data)", "def start_view(request):\n\n if request.user and Employee.objects.filter(user__pk=request.user.pk).exists():\n if Employee.objects.get(user__pk=request.user.pk).is_manager:\n return HttpResponseRedirect('/dashboard')\n else:\n return HttpResponseRedirect('/employee/show/%d/' % request.user.employee_user.first().pk)\n else:\n return HttpResponseRedirect('/login/')", "def get_employee(self):\n employee_ids = self.env['hr.employee'].search([('user_id', '=', self.env.uid)])\n return employee_ids[0] if employee_ids else False", "def get_employee_permissions(user_name: str, store_name: str, employee_name: str):\n user_name = auth.get_username_from_hash(user_name)\n permission_handler.is_permmited_to(user_name, Action.EMPLOYEE_PERMISSIONS, store_name)\n permission_handler.is_working_in_store(employee_name, store_name)\n return user_handler.get_employee_information(\n employee_name) # TODO FOR NOW RETURN INFORMATION MAYBE TO CHANGE TO NEW FUNCTION", "def get_queryset(self):\n\n user = self.request.user\n\n if user.role == 'LA':\n return PropertyEnquiry.objects.all()\n\n # check if the user is a client admin\n # and return all enquiries made on his/her property\n if user.role == 'CA':\n return PropertyEnquiry.active_objects.for_client(\n client=user.employer.first())\n\n # else if the user is a buyer return only\n # the records that are associated with him/her\n return PropertyEnquiry.active_objects.for_user(user=user)", "def get_employees(self, active_only):\n cursor = self.dbconnect.get_cursor()\n\n if active_only:\n cursor.execute(\n 'SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external, '\n 'is_admin, is_active FROM employee WHERE is_active = TRUE')\n else:\n cursor.execute(\n 'SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external, '\n 'is_admin, is_active FROM employee')\n\n employees = list()\n for row in cursor:\n obj = Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])\n employees.append(obj)\n return employees", "def get_employees(self):\n return self.employees", "def test_ReportingPeriodDetailView_current_employee_set_false(self):\n response = self.app.get(\n reverse(\n 'reports:ReportingPeriodDetailView',\n kwargs={'reporting_period': '2015-01-01'},\n )\n )\n self.assertEqual(\n len(response.html.find_all('tr', {'class': 'user'})), 2\n )", "def get_queryset(self):\n user = self.request.user\n\n if user.is_authenticated and user.role == 'LA':\n return Property.objects.all()\n\n if user.is_authenticated and user.employer.first():\n client = user.employer.first()\n return Property.active_objects.all_published_and_all_by_client(\n client=client)\n\n return Property.active_objects.all_published()", "def get_queryset(self):\n user = self.request.user\n\n if user.is_authenticated and user.role == 'LA':\n return Property.objects.all()\n\n if user.is_authenticated and user.employer.first():\n client = user.employer.first()\n return Property.active_objects.all_published_and_all_by_client(\n client=client)\n\n return Property.active_objects.all_published()", "def show_work_role():\n\n work_roles = WorkRole.query.order_by(WorkRole.id.asc())\n return render_template('general/work_role.html', title='Work Roles', work_roles=work_roles)", "def myorgs(request):\n context = RequestContext(request)\n \n user = request.user\n orgs = user.orgusers.get_query_set()\n \n context['orgs'] = orgs\n return render_to_response('myorgs.html', context)", "def get(self, request):\n varemployee = employee.objects.all()\n serializer = employeeSerializer(varemployee, many=True)\n return Response(serializer.data)", "def admin_roles(request):\n user = User.objects.get(username=request.user.username)\n permisos = get_permisos_sistema(user)\n return render_to_response('admin/roles/roles.html',{'user':user,\n 'ver_roles':'Ver roles' in permisos,\n 'crear_rol': 'Crear rol' in permisos,\n 'mod_rol': 'Modificar rol' in permisos,\n 'eliminar_rol': 'Eliminar rol' in permisos},context_instance=RequestContext(request))", "def employeeHome(request):\n # assert isinstance(request, HttpRequest)\n \n if userHasBeenCleared(request):\n \n return render(\n request,\n 'app/index_employee.html',\n {\n 'title':'Home Page',\n 'year':datetime.now().year,\n 'email':getEmailSessionVar(request),\n 'isAdmin':isAdmin(request), \n }\n )\n return login(request)", "def get_emp_list(self):\n\t\tcondition= ''\n\t\temp_list=[]\n\t\tif self.is_for_all==0:\n\t\t\tif not self.selected_employees:\n\t\t\t\tfrappe.throw(_(\"No employees for the mentioned criteria\"))\n\t\t\t#emp_list = [cstr(d.employee) for d in self.selected_employees]\n\t\t\temp_list = frappe.db.sql_list(\"\"\"\n\t\t\t\tselect\n\t\t\t\t\temployee from `tabAttendance Salary Tool Employee`\n\t\t\t\twhere\n\t\t\t\t\tparent = '%(parent)s' \n\t\t\t\"\"\"%{\"parent\": self.name})\n\t\t\tcondition+= \"\"\" and t1.employee IN %(employees)s \"\"\"\n\t\tif self.is_open_period==0:\n\t\t\tif not self.start_date or not self.end_date:\n\t\t\t\tfrappe.throw(_(\"Satart Date and End Date are Mandatories\"))\n\t\t\tcondition= \"\"\" and attendance_date >= %(start_date)s and attendance_date <= %(end_date)s\"\"\"\n\t\temp_list = frappe.db.sql(\"\"\"\n\t\t\tselect\n\t\t\t\tt1.employee as employee, count(*) as attendance_days\n\t\t\tfrom\n\t\t\t\t`tabAttendance` t1\n\t\t\twhere\n\t\t\t\tt1.attendance_salary_tool is null\n\t\t\t\tand t1.docstatus = 1 and t1.status='Present'\n\t\t\t\t{condition} group by t1.employee order by t1.employee asc\n\t\t\"\"\".format(condition=condition),{\"employees\": tuple(emp_list),\"start_date\": self.start_date,\"end_date\": self.end_date}, as_dict=True)\n\t\treturn emp_list", "def event_collaborator_detail(request, event_id, collaborator_id):\n if request.method == 'GET':\n event = get_object_or_404(Event, pk=event_id)\n collaborator = Employee.objects.all().filter(event=event, pk=collaborator_id)\n if collaborator:\n is_registered = True\n else:\n is_registered = False\n serializer = CollaboratorAttendanceSerializer(event, context={'is_registered': is_registered})\n return Response(serializer.data, status=status.HTTP_200_OK)", "def home(request):\n\n\tcontext_dict = {}\n\temployee = models.Teacher.objects.filter(\n\t\tuser=request.user\n\t).first()\n\t# context_dict = {\n\t# context_helper.get_emp_info(employee)\n\t# }\n\t# print (context_dict)\n\tcontext_dict.update(context_helper.get_emp_info(employee))\n\treturn render(request, \"home.html\", context_dict)", "def get_queryset(self):\r\n username = self.kwargs['username']\r\n return models.Experience.objects.filter(username = username).order_by('-startdate')", "def show_employee(emp_id, fields=None):\n ret = {}\n if fields is None:\n fields = \",\".join(\n (\n \"canUploadPhoto\",\n \"department\",\n \"displayName\",\n \"firstName\",\n \"id\",\n \"jobTitle\",\n \"lastName\",\n \"location\",\n \"mobilePhone\",\n \"nickname\",\n \"photoUploaded\",\n \"photoUrl\",\n \"workEmail\",\n \"workPhone\",\n \"workPhoneExtension\",\n )\n )\n\n status, result = _query(action=\"employees\", command=emp_id, args={\"fields\": fields})\n\n root = ET.fromstring(result)\n\n ret = {\"id\": emp_id}\n for item in root:\n ret[next(iter(item.values()))] = item.text\n return ret", "def is_employee():\n return _is_member('uw_employee')", "def getEmployees(self):\n return self.employees", "def get_queryset(self, request):\n qs = super(EventAdmin, self).get_queryset(request)\n if request.user.is_superuser:\n return qs\n return qs.filter(dep=request.user.profile.department)", "def get(self):\n employees = self.service.get_employees(strategy=selectinload)\n return self.schema.dump(employees, many=True), 200", "def get_admins(self):\n from Employee import Employee\n admins = list()\n cursorRoles = self.dbconnect.get_cursor()\n cursorRoles.execute('select * from employeeRoles where role=\\'admin\\'')\n for row in cursorRoles:\n admins.append(self.get_employee(row[0]))\n return admins", "def profile_detail(request, employee_id):\n current_employee = Employee.objects.filter(user__pk=request.user.pk).first()\n employee = Employee.objects.get(pk=int(employee_id))\n\n if not current_employee:\n raise PermissionDenied(\"You don't have any employee assigned to you.\", 401)\n\n if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n raise PermissionDenied()\n actions = employee.action_set.all()\n if not current_employee.pk == int(employee_id):\n if not current_employee.is_manager or not current_employee.company.pk == employee.company.pk:\n if not current_employee.isCompanySuperUserOrHigher():\n return HttpResponse('unauthorized', status=401)\n\n user_files = get_files_for_employee(employee_id)\n\n if request.method == 'POST':\n\n upload_form = UploadFileToEmployeyForm(request.POST, request.FILES)\n form = EmployeeNoteForm(request.POST, instance=employee)\n\n if 'upload' in request.POST:\n if upload_form.is_valid():\n upload_form.handle_upload(employee_id, request.FILES['file'])\n\n return HttpResponseRedirect('/employee/show/{}?upload_status=ok#file-list'.format(employee_id))\n\n else:\n if form.is_valid():\n form.save(request.user, employee)\n return HttpResponseRedirect('/employee/show/%d' % form.instance.pk)\n\n else:\n form = EmployeeNoteForm(instance=employee)\n upload_form = UploadFileToEmployeyForm()\n data = {}\n data[\"user\"] = employee.user.first_name + \" \" + employee.user.last_name\n data[\"id\"] = str(employee.user.pk)\n data[\"title\"] = employee.title\n data[\"email\"] = employee.user.email\n data[\"phone\"] = employee.phone\n company_dict = {}\n company_dict[\"name\"] = employee.company.name\n company_dict[\"id\"] = str(employee.company.pk)\n\n data[\"company\"] = company_dict\n employee_username = \"\"\n emp = Employee.objects.filter(manager=employee.manager).all()\n for obj in emp:\n employee_username = obj.manager.user.username if obj.manager else \"\"\n employee_first = obj.manager.user.first_name if obj.manager else \"\"\n employee_last = obj.manager.user.last_name if obj.manager else \"\"\n manager_dict = {}\n manager_dict[\"name\"] = employee_username\n manager_dict[\"id\"] = employee_id\n manager_dict[\"first_last_name\"] = employee_first + \" \" + employee_last\n data[\"manager\"] = manager_dict\n data[\"date_of_birth\"] = employee.date_of_birth\n data[\"status_questions\"] = employee.status_questions\n data[\"notes\"] = employee.notes\n employee_role = EmployeeRole.objects.filter(employee=employee).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n data[\"roles\"] = name_role_list\n data[\"potenciale\"] = employee.potenciale\n data[\"date_start\"] = employee.created_at\n data[\"is_manager\"] = employee.is_manager\n data[\"date_finish\"] = \"\"\n data['photo'] = employee.photo.url if employee.photo else ''\n\n return JsonResponse(status=200, data=data)\n # return TemplateResponse(\n # request,\n # 'mus/detail.html',\n # {\n # 'actions': actions,\n # 'employee': employee,\n # # 'development_plans': development_plans,\n # 'form': form,\n # 'upload_form': upload_form,\n # 'user_files': user_files\n # }\n # )", "def test_ReportingPeriodDetailView_current_employee_toggle(self):\n self.former_employee.user_data.current_employee = True\n self.former_employee.user_data.save()\n response = self.app.get(\n reverse(\n 'reports:ReportingPeriodDetailView',\n kwargs={'reporting_period': '2015-01-01'},\n )\n )\n self.assertEqual(\n len(response.html.find_all('tr', {'class': 'user'})), 3\n )\n self.former_employee", "def collection_get(request):\n\n # Our account parameter\n account = request.matchdict['id_account']\n\n # Our admin object\n admin = _get_admin(request)\n\n # Check if the account exists\n if account not in admin.list_accounts():\n request.response.status_int = 404\n return\n\n # Get the roles\n list_roles = admin.list_roles(account)\n\n # Return appropriately\n request.response.status_int = 200\n return {\n 'roles':\n list_roles\n }", "def filter_queryset(self, request, queryset, view):\n if view.action == \"retrieve\" and request.method == \"GET\":\n return queryset.model.objects.all()\n\n filtered_queryset = super().filter_queryset(request, queryset, view)\n org_users = set(\n [group.team.organization for group in request.user.groups.all()] +\n [o.user for o in filtered_queryset]\n )\n\n return queryset.model.objects.filter(user__in=org_users, user__is_active=True)", "def employee_required(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n verify_jwt_in_request()\n user_id = get_jwt_identity()\n target_user = User.query.filter_by(id=user_id).first()\n\n if target_user is None:\n return redirect('admin/login.html', code=403)\n\n if target_user.role != RoleType.ADMINISTRATOR and target_user.role != RoleType.EMPLOYEE:\n return redirect('admin/login.html', code=403)\n return fn(*args, **kwargs)\n return wrapper", "def get_queryset(self):\n user = self.request.user\n expenses = Expense.objects.filter(\n Q(userexpense__in=user.userexpense_set.all())\n | Q(group__in=user.group_set.all()))\n\n if self.request.query_params.get('q', None) is not None:\n expenses = expenses.filter(\n description__icontains=self.request.query_params.get(\n 'q', None))\n return expenses", "def do_employee_login():\n user_requested = request.form['email'].lower()\n password_requested = request.form['password']\n\n target_user = User.query.filter_by(mail=user_requested).first()\n if target_user is None:\n return Response(render_template('admin/login.html',\n message=\"Unknown Credentials\"))\n\n if not target_user.check_password(password_requested):\n return Response(render_template('admin/login.html',\n message=\"Unknown Credentials\"))\n\n if not target_user.state == StateType.ACTIVE:\n return Response(render_template('admin/login.html',\n message=\"User account deactivated. Cannot login.\"))\n\n resp = Response(render_template('employee/employee.html', user=target_user.name,\n message=\"Login succeeded\"))\n set_access_cookies(resp, create_access_token(identity=target_user.id))\n return resp", "def employee_login():\n return Response(render_template('admin/login.html'))", "def index(request):\n users = User.objects.filter(is_staff=False, is_active=True).order_by('username')\n return render(request, 'users/view_all_users.html',\n { 'users': users })", "def get_employee_information(user_name: str, employee_name: str, store_name: str):\n\n user_name = auth.get_username_from_hash(user_name)\n permission_handler.is_permmited_to(user_name=user_name, action=Action.EMPLOYEE_INFO.value,\n store_name=store_name)\n permission_handler.is_working_in_store(employee_name, store_name)\n return user_handler.get_employee_information(employee_name)", "def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):\n if context is None:\n context = {}\n if 'emp_hours' in context:\n emp_ids = resolve_o2m_operations(cr, uid, self.pool.get('emp.luggage_transfer.hours'),\n context.get('emp_hours'), [\"employee\"], context)\n args.append(('id', 'not in', [isinstance(d['employee'], tuple) and d['employee'][0] or d['employee'] for d in emp_ids]))\n if 'mission_line' in context:\n emp_ids = resolve_o2m_operations(cr, uid, self.pool.get('hr.employee.mission.line'),\n context.get('mission_line'), [\"employee_id\"], context)\n args.append(('id', 'not in', [isinstance(d['employee_id'], tuple) and d['employee_id'][0] or d['employee_id'] for d in emp_ids]))\n \n if 'illness' in context:\n emp_ids = resolve_o2m_operations(cr, uid, self.pool.get('hr.employee.illness'),\n context.get('illness'), [\"employee_id\"], context)\n args.append(('id', 'not in', [isinstance(d['employee_id'], tuple) and d['employee_id'][0] or d['employee_id'] for d in emp_ids]))\n \n\n if 'same' in context:\n emp_ids = resolve_o2m_operations(cr, uid, self.pool.get('hr.employee.mission.line'),\n context.get('same'), [\"employee_id\"], context)\n args.append(('id', 'in', [isinstance(d['employee_id'], tuple) and d['employee_id'][0] or d['employee_id'] for d in emp_ids]))\n \n \n if 'alternative_setting_id' in context:\n old_ids = super(hr_employee, self).name_search(cr, uid, name, args=args, operator=operator, context={}, limit=limit)\n\n alternative_setting_id = context.get('alternative_setting_id')\n setting_obj = self.pool.get('hr.alternative.setting')\n alternative_setting_id = setting_obj.browse(cr, uid, alternative_setting_id)\n degrees_ids = [\n x.id for x in alternative_setting_id.degrees_ids]\n degrees_ids += degrees_ids\n degrees_ids = tuple(degrees_ids)\n\n departments_ids = [\n x.id for x in alternative_setting_id.departments_ids]\n departments_ids += departments_ids\n departments_ids = tuple(departments_ids)\n\n ex_employees_ids = [\n x.id for x in alternative_setting_id.employees_ids]\n ex_employees_ids += ex_employees_ids\n ex_employees_ids = tuple(ex_employees_ids)\n\n\n old_ids_tuple = [x[0] for x in old_ids] + [x[0] for x in old_ids]\n old_ids_tuple = tuple(old_ids_tuple)\n\n accessed_ids = self.search(cr, uid, [])\n accessed_ids += accessed_ids\n accessed_ids = tuple(accessed_ids)\n\n if not old_ids_tuple:\n old_ids_tuple = (0,0)\n \n if not departments_ids:\n departments_ids = (0,0)\n cr.execute(\n ''' Select emp.id,(SELECT MAX(date) as max_date\n FROM hr_alternative_process_line\n WHERE employee_id=emp.id and state='confirmed')date\n from hr_employee emp\n where emp.degree_id in %s \n and emp.department_id not in %s \n and emp.state = 'approved' \n and emp.payroll_state = 'khartoum' \n and emp.id in %s \n and emp.gender='male' \n and emp.id in %s \n and emp.id not in %s \n order by date NULLS LAST''', (degrees_ids,departments_ids,old_ids_tuple,accessed_ids,ex_employees_ids))\n history = cr.dictfetchall()\n new_ids = []\n while True:\n try:\n new_ids.append( history.pop()['id'] )\n except:\n break\n\n temp = dict(old_ids)\n old_ids = [x for x in old_ids if x[0] in new_ids]\n #new_ids = [x for x in new_ids if x in accessed_ids]\n #print \"..........................temp\",new_ids\n #print \"......................\",[(x, temp.get(x,False) ) for x in new_ids]\n #print \"......................\",sorted(old_ids, key=lambda x :new_ids.index(x[0]))\n return sorted(old_ids, key=lambda x :new_ids.index(x[0]))\n\n return super(hr_employee, self).name_search(cr, uid, name, args=args, operator=operator, context=context, limit=limit)", "def search_encomendas_admin(request, name):\n token = getToken(request)\n superUser = isSuperUser(token)\n\n if superUser == True:\n try:\n encomendas = Encomenda.objects.filter(produtos__titulo__contains=name).order_by(\"-id\")\n except Encomenda.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n serializer = EncomendaReadSerializer(encomendas, many=True)\n return Response(serializer.data)\n else:\n return Response(status=status.HTTP_401_UNAUTHORIZED)", "def create_employee(request, company_id):\n\n company = Company.objects.get(pk=company_id)\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n if not current_employee.isEnsoUser() and current_employee.company.pk != company.pk:\n logUnauthorizedAccess(\"User tried to create_employee\", request)\n raise PermissionDenied()\n form = EmployeeForm(request, initial=dict(company=company))\n form.fields['manager'].queryset = Employee.objects.filter(is_manager=True, company=company)\n # form.fields['development_plan_type'].queryset = DevelopmentPlanType.objects.filter(\n # Q(company=company) | Q(company__isnull=True))\n # data = {\n # 'employee_form': form.cleaned_data,\n # 'company': company.cleaned_data[\"name\"]\n # }\n\n return TemplateResponse(\n request,\n 'mus/create_employee_form.html',\n {\n 'employee_form': form,\n }\n )\n # data = {\n # 'employee_form': form.cleaned_data,\n # 'company': company.cleaned_data[\"name\"]\n # }\n # return JsonResponse(status=200, data=data)", "def filter_queryset(self, queryset):\n user = self.request.user\n if user.is_superuser:\n return super().filter_queryset(queryset)\n return queryset.filter(collaborators=user)", "def employee_list(request):\n response_data = []\n for emp in Employee.objects.all().values(\n 'id', 'first_name', 'last_name', 'age', 'address', 'city',\n 'state', 'country'):\n response_data.append(emp)\n return JsonResponse(response_data, safe=False)", "def test_access_employee(self):\n # Employee can't see any SO\n with self.assertRaises(AccessError):\n self.order.with_user(self.company_data['default_user_employee']).read()\n # Employee can't edit the SO\n with self.assertRaises(AccessError):\n self.order.with_user(self.company_data['default_user_employee']).write({'team_id': self.company_data['default_sale_team'].id})\n # Employee can't create the SO\n with self.assertRaises(AccessError):\n self.env['sale.order'].with_user(self.company_data['default_user_employee']).create({\n 'partner_id': self.partner_a.id,\n })\n # Employee can't delete the SO\n with self.assertRaises(AccessError):\n self.order.with_user(self.company_data['default_user_employee']).unlink()", "def employees_json(request):\n # current_employee = Employee.objects.get(user__pk=request.user.pk)\n employee_list = Employee.objects.filter(manager=request.user.employee_user)\n employees = list()\n for employee in employee_list:\n manager_dict = model_to_dict(employee)\n manager_dict['first_name'] = employee.user.first_name\n manager_dict['last_name'] = employee.user.last_name\n employees.append(manager_dict)\n data = {\"employees\": employees}\n return JsonResponse(data=data, content_type='application/json', safe=False)", "def retrieve(self, request, pk=None):\n employee = self.get_employee_object(pk)\n print(F\"Employee: {employee}\")\n serializer = data_serializers.PresentEmployeeDataSerializer(employee)\n return Response(serializer.data)", "def default_get(self, cr, uid, fields, context=None): \n \n \n res = super(granted_rights_order, self).default_get(cr, uid, fields, context=context)\n \n employee_obj = self.pool.get('hr.employee')\n department_obj = self.pool.get('hr.department')\n manager = False\n donor_emp_id = []\n \n if uid != 1 :\n\n donor_emp_id = employee_obj.search(cr ,uid, [('user_id' , '=' , uid )])\n deparment_id = employee_obj.browse(cr,uid,donor_emp_id[0]).department_id.id\n \n if donor_emp_id[0] == department_obj.browse(cr,uid,deparment_id).manager_id.id :\n manager = True\n \n \n \n \n \n \n \n \n \n if donor_emp_id :\n res.update({ 'employee_donor': donor_emp_id[0], \n 'department_id' : deparment_id,\n 'is_a_amanger' : manager,\n })\n return res", "def list(self, **kwargs):\n # TODO(adriant): Look up user by name/id\n url = '/openstack/users/%s/roles' % kwargs['user']\n return self._list(url, 'roles')", "def get_queryset(self):\n user = self.request.user\n\n if user.is_authenticated and user.role == 'LA':\n # admins view all property, no filtering\n return Property.objects.all()\n\n if user.is_authenticated and user.employer.first():\n # if the user is a client_admin, they see all published property\n # and also their client's published and unpublished property.\n client = user.employer.first()\n return Property.active_objects.all_published_and_all_by_client(\n client=client)\n\n # other users only see published property\n return Property.active_objects.all_published()", "def _current_login_employee(self):\n hr_employee = self.env[\"hr.employee\"].search(\n [(\"user_id\", \"=\", self._current_login_user())], limit=1\n )\n return hr_employee.id", "def get_queryset(self):\r\n username = self.kwargs['username']\r\n return models.Education.objects.filter(username = username) .order_by('-startdate')", "def get_employee(self, name):\n name = name.upper()\n if name in EMPLOYEE_MAP:\n name = EMPLOYEE_MAP[name]\n try:\n int(name)\n emps = Employee.objects.filter(id=name)\n except ValueError:\n if name == 'NN':\n emps = Employee.objects.filter(user__first_name='Nieznany')\n elif Employee.objects.filter(user__username__iexact=name).exists():\n emps = Employee.objects.filter(user__username__iexact=name)\n elif len(name) == 3:\n emps = Employee.objects.filter(user__first_name__istartswith=name[0],\n user__last_name__istartswith=name[1:3],\n status=0)\n else:\n emps = Employee.objects.filter(user__first_name__istartswith=name[0],\n user__last_name__istartswith=name[1:],\n status=0)\n if not emps:\n emps = Employee.objects.filter(user__username__istartswith=name)\n if len(emps) == 1:\n return emps[0]\n elif len(emps) > 1:\n self.stdout.write(self.style.ERROR('Multiple employee matches for {}. Choices are:'\n .format(name)))\n for e in emps:\n self.stdout.write(self.style.ERROR(' -{}'.format(e.user.get_full_name())))\n else:\n raise CommandError('Employee {} does not exists! Fix your input file.'.format(name))\n\n return None", "def get_queryset(self):\n return AutomaticEmail.objects.filter(staff_user=self.request.user)", "def get_all_user_development_plans_for_manager(request, employee_id):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all()\n employee = Employee.objects.filter(pk=int(employee_id)).first()\n\n if not current_employee:\n raise PermissionDenied(\"You don't have any employee assigned to you.\", 401)\n\n if not current_employee.isEnsoUser() and current_employee.is_manager:\n raise PermissionDenied()\n actions = employee.action_set.all()\n if not int(employee_id) in [obj.id for obj in Employee.objects.filter(manager=current_employee).all()]:\n raise PermissionDenied(\"Employee with id={} is not assigned to you.\".format(employee_id), 401)\n\n if user_development_plans:\n data={}\n user_development_plans_list = []\n for plan in user_development_plans:\n\n development_plan_object_list=[]\n dev_plan = {}\n\n dev_plan[\"id\"] = plan.id\n dev_plan[\"deleted\"] = plan.deleted\n if plan.type:\n dev_plan[\"type\"] = plan.type.name\n dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects\\\n .get(employee=current_employee, development_plan = plan).finished_at\n dev_plan[\"created_at\"] = plan.created_at\n dev_plan[\"created_by\"] = plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n manager_data = {}\n manager_data[\"manager_username\"] = plan.manager_relation.user.username\n manager_data[\"id\"] = plan.manager_relation.user.id\n\n development_plan_object_list.append({\"manager_data\":manager_data})\n user_development_plans_list.append(development_plan_object_list)\n\n else:\n return JsonResponse(data={\"details\":\"Employee with id={} doesn't have any Development Plan\"\n .format(request.user.pk)}, status=404)\n\n data = {\"user_development_plans:\": user_development_plans_list}\n return JsonResponse(status=201, data=data)", "def explore_view(request):\r\n # explore items\r\n user = request.user.userprofile\r\n items = Item.objects.explore(user)\r\n context = {'items':items}\r\n return render(request, 'explore/explore.html', context)", "def get_employees(self):\n from Employee import Employee\n cursor = self.dbconnect.get_cursor()\n cursor.execute('select * from employee')\n\n employees = list()\n for row in cursor:\n employee = Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])\n employees.append(employee)\n return employees", "def users_with_role(self):\r\n entries = User.objects.filter(\r\n courseaccessrole__role=self._role_name,\r\n courseaccessrole__org=self.org,\r\n courseaccessrole__course_id=self.course_key\r\n )\r\n return entries", "def badges_employee_list(request, employee_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n employee_bages = EmployeeBadge.objects.filter(to_user=employee)\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(employee_bages, request)\n serializer = EmployeeBadgeSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)", "def query_employee(self, employee_inputs):\n\n query = \"select * from employee where \"\n row_names = [\n \"emp_ID\", \"Region_ID\", \"Emp_Lname\", \"Emp_Mi\", \"Emp_Fname\",\n \"Emp_Hiredate\"\n ]\n filled_attributes = []\n\n row_index = 0\n row_options = []\n for item in employee_inputs:\n if item is not None:\n row_options.append(row_index)\n filled_attributes.append(item)\n row_index += 1\n\n j = 0\n for i in row_options:\n if j == 0:\n query += \"{}='{}' \".format(row_names[i], filled_attributes[j])\n else:\n query += \"and {}='{}' \".format(row_names[i],\n filled_attributes[j])\n j += 1\n\n try:\n self.dbCursor.execute(query)\n return self.dbCursor.fetchall()\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)", "def user_view(request):\n user = User.objects.all()\n return render(request, template_name, {'data_set':data_set})", "def list(self , request,*args,**kwargs):\n return super(UsersViewset,self).list(request,args,kwargs)", "def employee_profile_page(cls, employee_id):\n return cls.__profile_page(employee_id, cls._logger)", "def get(self, request):\r\n\r\n if not request.user.is_staff:\r\n raise Http404\r\n data = []\r\n\r\n for course in self.get_courses(): # pylint: disable=unused-variable\r\n datum = [course.display_name, course.id]\r\n datum += [CourseEnrollment.objects.filter(\r\n course_id=course.id).count()]\r\n datum += [CourseStaffRole(course.id).users_with_role().count()]\r\n datum += [','.join([x.username for x in CourseInstructorRole(\r\n course.id).users_with_role()])]\r\n data.append(datum)\r\n\r\n datatable = dict(header=[_('Course Name'), _('course_id'),\r\n _('# enrolled'), _('# staff'),\r\n _('instructors')],\r\n title=_('Enrollment information for all courses'),\r\n data=data)\r\n context = {\r\n 'datatable': datatable,\r\n 'msg': self.msg,\r\n 'djangopid': os.getpid(),\r\n 'modeflag': {'staffing': 'active-section'},\r\n 'edx_platform_version': getattr(settings, 'EDX_PLATFORM_VERSION_STRING', ''),\r\n }\r\n return render_to_response(self.template_name, context)", "def showORGusers(**kwargs):\n sessiontoken = kwargs['sessiontoken']\n ORG_ID = kwargs['ORG_ID']\n strCSPProdURL = kwargs['strCSPProdURL']\n jsonResponse = get_csp_users_json(strCSPProdURL, ORG_ID, sessiontoken)\n if jsonResponse == None:\n print(\"API Error\")\n sys.exit(1)\n\n users = jsonResponse['results']\n table = PrettyTable(['First Name', 'Last Name', 'User Name'])\n for i in users:\n table.add_row([i['user']['firstName'],i['user']['lastName'],i['user']['username']])\n print (table.get_string(sortby=\"Last Name\"))", "def get_companies(request):\n companies = Company.objects.all()\n context={'user_id': request.user.id}\n serializer = CompanySerializers(companies, context=context)\n return Response(serializer.data)", "def display_certs(employee_id):\n\n if not g.user:\n flash(\"Please Login to continue.\", \"danger\")\n return redirect(\"/\")\n\n employee = Employee.query.get_or_404(employee_id)\n \n certs = employee_certification.query.filter_by(employee_id = employee_id).all()\n \n all_certs = Cert.query.all()\n \n return render_template(\"users/display_cert.html\", employee = employee, certs = certs, all_certs = all_certs)", "def get_queryset(self):\r\n name = self.kwargs['name']\r\n course_id_string = self.request.QUERY_PARAMS.get('course_id')\r\n if not course_id_string:\r\n raise ParseError('course_id must be specified')\r\n course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id_string)\r\n role = Role.objects.get_or_create(course_id=course_id, name=name)[0]\r\n users = role.users.all()\r\n return users", "def get_queryset(self, *args, **kwargs):\n qs = super().get_queryset(*args, **kwargs)\n user = self.request.user\n if not user.is_authenticated:\n return qs.none()\n if self.is_get and not user.has_perm(\"users.view_user\"):\n return qs.filter(pk=user.pk)\n if self.is_update and not user.has_perm(\"users.change_user\"):\n return qs.filter(pk=user.pk)\n return qs", "def get_queryset(self):\n user = self.request.user\n collabLists = ListObject.objects.filter(collaborators__id=user.id)\n return collabLists", "def employees_json_id(request, employee_id):\n curent_employee = Employee.objects.get(pk=int(employee_id))\n if curent_employee.is_manager:\n employee_list = Employee.objects.filter(manager=curent_employee)\n employees = list()\n for employee in employee_list:\n manager_dict = model_to_dict(employee)\n manager_dict['first_name'] = employee.user.first_name\n manager_dict['last_name'] = employee.user.last_name\n manager_dict['photo'] = employee.photo.url if employee.photo else ''\n employees.append(manager_dict)\n data = {\"employees\": employees}\n else:\n return JsonResponse(status=400, data={\"error\": \"Employee with id={} not is_manager\".format(int(employee_id))})\n return JsonResponse(data=data, content_type='application/json', safe=False)", "def get_job_query(self):\n context = aq_inner(self.context)\n catalog = getToolByName(context, 'portal_catalog')\n mt = getToolByName(self, 'portal_membership') \n currentUser = mt.getAuthenticatedMember() \n \n if \"Site Administrators\" not in currentUser.getGroups():\n\treturn catalog.searchResults(portal_type= 'SeniorProject.PloneAddOn.job', \t\t\t\t Creator = currentUser.getUserName())\n else: \n return catalog.searchResults(portal_type= 'SeniorProject.PloneAddOn.job')", "def test_api_can_get_all_employees(self):\n res = self.client().get(service_url_emp)\n self.assertEqual(res.status_code, 200)\n self.assertIn('name1', str(res.data))\n self.assertIn('name2', str(res.data))", "def gather_employee_entries(self):\n user_inputs = [\n self.emp_lname.get(), self.emp_mi.get(), self.emp_fname.get(),\n self.emp_hiredate.get()\n ]\n\n return self.check_input_empty(user_inputs)", "def get_list_filter(self, request):\n if request.user.is_superuser:\n return self.list_filter\n return self.list_filter_companies", "def list(self, request, *args, **kwargs):\n return super(UserViewSet, self).list(request, *args, **kwargs)", "def getELUsers(**kwargs):\n \n for key in kwargs:\n if type(kwargs[key]) == list:\n kwargs[key] = kwargs[key][0]\n \n allELUsers = ELUser.ELUser.all(**kwargs)\n allELUsersDictionaries = [dict(eluser) for eluser in allELUsers if dict(eluser)]\n \n return flask.Response(\n response = json.dumps(allELUsersDictionaries),\n status = 200,\n content_type = 'application/json'\n )", "def get_queryset(self):\n return self.request.user.setting_set.get().companies", "def get_queryset(self):\n return self.request.user.setting_set.get().companies", "def get_queryset(self):\n return self.request.user.setting_set.get().companies", "def get_queryset(self):\n return self.request.user.setting_set.get().companies" ]
[ "0.75012994", "0.676221", "0.675863", "0.66391176", "0.659517", "0.65067995", "0.63182765", "0.6227569", "0.62267536", "0.6176886", "0.61206603", "0.6090563", "0.6077315", "0.60686785", "0.6056699", "0.59416133", "0.5920941", "0.5915269", "0.5865308", "0.5852588", "0.5848536", "0.5825259", "0.5797603", "0.5784566", "0.5777118", "0.5758369", "0.5735553", "0.56972456", "0.56829065", "0.56693107", "0.5635448", "0.56236106", "0.56236106", "0.5613692", "0.56021875", "0.55998486", "0.5575284", "0.55604655", "0.5557157", "0.55234855", "0.55090505", "0.55052924", "0.5501459", "0.54979044", "0.5463077", "0.54528123", "0.54521775", "0.54039305", "0.5401544", "0.53922445", "0.5389022", "0.53780764", "0.5377212", "0.5371639", "0.5368984", "0.53680575", "0.5365055", "0.5349817", "0.53454113", "0.53447485", "0.53430444", "0.53365314", "0.5319792", "0.5317902", "0.5302403", "0.5300769", "0.52874345", "0.5241847", "0.52348393", "0.52252674", "0.5214821", "0.51963586", "0.5192847", "0.519159", "0.5189371", "0.5187232", "0.5176226", "0.5162283", "0.5159732", "0.51566255", "0.51546204", "0.514097", "0.5127809", "0.51253223", "0.5124406", "0.5120045", "0.51059484", "0.5098111", "0.5094945", "0.50798523", "0.5076737", "0.50758624", "0.5072332", "0.50717163", "0.50656736", "0.5062178", "0.5060405", "0.5060405", "0.5060405", "0.5060405" ]
0.7280209
1
Get all employees as json
def employees_json(request): # current_employee = Employee.objects.get(user__pk=request.user.pk) employee_list = Employee.objects.filter(manager=request.user.employee_user) employees = list() for employee in employee_list: manager_dict = model_to_dict(employee) manager_dict['first_name'] = employee.user.first_name manager_dict['last_name'] = employee.user.last_name employees.append(manager_dict) data = {"employees": employees} return JsonResponse(data=data, content_type='application/json', safe=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self):\n employees = self.service.get_employees(strategy=selectinload)\n return self.schema.dump(employees, many=True), 200", "def employees(employee_id=None):\n\tif not employee_id:\n\t\temployee_data = _serialize_list(Employee.query.all())\n\telse:\n\t\temployee_data = _serialize_model(Employee.query.filter_by(id=employee_id).first())\n\n\tresp = jsonify(employee_data)\n\treturn resp", "def employee_list(request):\n response_data = []\n for emp in Employee.objects.all().values(\n 'id', 'first_name', 'last_name', 'age', 'address', 'city',\n 'state', 'country'):\n response_data.append(emp)\n return JsonResponse(response_data, safe=False)", "def get(self):\n resultado = EmployeeModel.query.all()\n return resultado", "def list(self, request):\n employee = self.controller.retrieve_all_employees()\n serializer = data_serializers.PresentEmployeeDataSerializer(employee, many=True)\n return Response(serializer.data)", "def getEmployees(self):\n return self.employees", "def list(self, request):\n varemployee = employee.objects.all()\n serializer = employeeSerializer(varemployee, many=True)\n return Response(serializer.data)", "def employees_json_id(request, employee_id):\n curent_employee = Employee.objects.get(pk=int(employee_id))\n if curent_employee.is_manager:\n employee_list = Employee.objects.filter(manager=curent_employee)\n employees = list()\n for employee in employee_list:\n manager_dict = model_to_dict(employee)\n manager_dict['first_name'] = employee.user.first_name\n manager_dict['last_name'] = employee.user.last_name\n manager_dict['photo'] = employee.photo.url if employee.photo else ''\n employees.append(manager_dict)\n data = {\"employees\": employees}\n else:\n return JsonResponse(status=400, data={\"error\": \"Employee with id={} not is_manager\".format(int(employee_id))})\n return JsonResponse(data=data, content_type='application/json', safe=False)", "def get_employees(self):\n return self.employees", "def get(self, request):\n varemployee = employee.objects.all()\n serializer = employeeSerializer(varemployee, many=True)\n return Response(serializer.data)", "def get_employees_directory(self):\n response = requests.get(self._base_url + \"employees/directory\",\n auth=(self._api_key, \"pass\"),\n headers={'Accept': 'application/json'})\n if response.status_code != 200:\n response.raise_for_status()\n emps_json = json.loads(response.text)['employees']\n return {int(e['id']): Employee(e['displayName'],\n e['firstName'],\n e['lastName'],\n e['nickname']) for e in emps_json}", "def get_employees(self):\n from Employee import Employee\n cursor = self.dbconnect.get_cursor()\n cursor.execute('select * from employee')\n\n employees = list()\n for row in cursor:\n employee = Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])\n employees.append(employee)\n return employees", "def get_employee(employee_id):\n\n employee = Employee.objects.get_or_404(id=employee_id)\n\n return jsonify({\n 'employee': employee\n })", "def employees():\n # gather data from db about all employees\n return render_template(\"employees.html\")", "def list_employees(order_by=\"id\"):\n ret = {}\n status, result = _query(action=\"employees\", command=\"directory\")\n root = ET.fromstring(result)\n for cat in root:\n if cat.tag != \"employees\":\n continue\n for item in cat:\n emp_id = next(iter(item.values()))\n emp_ret = {\"id\": emp_id}\n for details in item:\n emp_ret[next(iter(details.values()))] = details.text\n ret[emp_ret[order_by]] = emp_ret\n return ret", "def get_manager_employees(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n manager_employees = Employee.objects.filter(manager=current_employee, development_plan_type=None).all()\n if manager_employees:\n emp_list=[]\n for emp in manager_employees:\n emp_data={}\n emp_data[\"id\"] = emp.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"manager_id\"] = emp.manager.id\n # emp_data[\"status_questions\"] = emp.status_questions\n # employee_role = EmployeeRole.objects.filter(employee=emp).all()\n # name_role_list = []\n # for obj in employee_role:\n # name_role_list.append(obj.role.name)\n # emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n data = {\"employees:\": emp_list}\n return JsonResponse(status=201, data=data)\n else:\n return JsonResponse(\"The user with id={} isn't a manager for any user\".format(current_employee.user.id),\n status=404)", "def employees(self) -> object:\n return self._employees", "def get_employees(self, active_only):\n cursor = self.dbconnect.get_cursor()\n\n if active_only:\n cursor.execute(\n 'SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external, '\n 'is_admin, is_active FROM employee WHERE is_active = TRUE')\n else:\n cursor.execute(\n 'SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external, '\n 'is_admin, is_active FROM employee')\n\n employees = list()\n for row in cursor:\n obj = Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])\n employees.append(obj)\n return employees", "def employees_manager(request):\n # current_employee = Employee.objects.get(user__pk=request.user.pk)\n manager_list = Employee.objects.filter(manager=request.user.employee_user, is_manager=True)\n employee = Employee.objects.get(pk=request.user.employee_user.id)\n employee_dict = model_to_dict(employee)\n employee_dict['first_name'] = employee.user.first_name\n employee_dict['last_name'] = employee.user.last_name\n employee_dict['photo'] = employee.photo.url if employee.photo else ''\n print employee_dict\n if len(manager_list) > 0:\n result_list = list(manager_list)\n all_managers_list = found_all_managers(manager_list, result_list)\n else:\n data = {\"employee_managers\": employee_dict}\n return JsonResponse(data=data, content_type='application/json', safe=False)\n employees = list()\n for manager in all_managers_list:\n manager_dict = model_to_dict(manager)\n manager_dict['first_name'] = manager.user.first_name\n manager_dict['last_name'] = manager.user.last_name\n manager_dict['photo'] = manager.photo.url if manager.photo else ''\n employees.append(manager_dict)\n employees.append(employee_dict)\n\n data = {\"employee_managers\": employees}\n return JsonResponse(data=data, content_type='application/json', safe=False)", "def get_all_data():\n return jsonify(service.get_all_data())", "def show_all_employees(self):\n try:\n employees = self.admin_repository.show_all_employees()\n if employees:\n for employee in employees:\n print(\"Employee Id : {}\".format(employee[0]))\n print(\"Name : {}\".format(employee[1]))\n print(\"Email : {}\".format(employee[2]))\n print(\"----------------------------\")\n return True\n else:\n print(\"No records found.\")\n return False\n except Exception as e:\n print(\"Some Error occurred.Please try again\")\n return False", "def get_companies():\n all_companies = storage.all(Company).values()\n list_companies = []\n for company in all_companies:\n list_companies.append(company.to_dict())\n return jsonify(list_companies)", "def test_api_can_get_all_employees(self):\n res = self.client().get(service_url_emp)\n self.assertEqual(res.status_code, 200)\n self.assertIn('name1', str(res.data))\n self.assertIn('name2', str(res.data))", "def getELUsers(**kwargs):\n \n for key in kwargs:\n if type(kwargs[key]) == list:\n kwargs[key] = kwargs[key][0]\n \n allELUsers = ELUser.ELUser.all(**kwargs)\n allELUsersDictionaries = [dict(eluser) for eluser in allELUsers if dict(eluser)]\n \n return flask.Response(\n response = json.dumps(allELUsersDictionaries),\n status = 200,\n content_type = 'application/json'\n )", "def jsonify_all(cls):\n return jsonify(accounts=[account.as_dict() for account in cls.query.all()])", "def get_persons(self):\n response = self.do_request('/management/persons/export/json/')\n if response:\n return response.json()", "def userJSON():\n user = session.query(User).all()\n result = []\n\n for i in user:\n result += [i.serialize]\n\n return jsonify(Users=result)", "def get_users():\n return jsonify([\n users.to_dict()\n for users in models.storage.all('User').values()\n ])", "def get_persons(self):\n response = self.do_request('/misc/user/export/json')\n if response:\n return response.json()", "def employee_get(emp_id):\n try:\n emp = Employee.objects.get(id=emp_id)\n except Employee.DoesNotExist:\n return JsonResponse({\n 'status': False,\n 'message': 'Employee does not exists in database'\n }, status=404)\n _data = {\n 'id': emp.id,\n 'first_name': emp.first_name,\n 'last_name': emp.last_name,\n 'age': emp.age,\n 'city': emp.city.name,\n 'state': emp.state.name,\n 'country': emp.country.name\n }\n return JsonResponse(_data, safe=False)", "def get_emp_list(self):\n\t\tcondition= ''\n\t\temp_list=[]\n\t\tif self.is_for_all==0:\n\t\t\tif not self.selected_employees:\n\t\t\t\tfrappe.throw(_(\"No employees for the mentioned criteria\"))\n\t\t\t#emp_list = [cstr(d.employee) for d in self.selected_employees]\n\t\t\temp_list = frappe.db.sql_list(\"\"\"\n\t\t\t\tselect\n\t\t\t\t\temployee from `tabAttendance Salary Tool Employee`\n\t\t\t\twhere\n\t\t\t\t\tparent = '%(parent)s' \n\t\t\t\"\"\"%{\"parent\": self.name})\n\t\t\tcondition+= \"\"\" and t1.employee IN %(employees)s \"\"\"\n\t\tif self.is_open_period==0:\n\t\t\tif not self.start_date or not self.end_date:\n\t\t\t\tfrappe.throw(_(\"Satart Date and End Date are Mandatories\"))\n\t\t\tcondition= \"\"\" and attendance_date >= %(start_date)s and attendance_date <= %(end_date)s\"\"\"\n\t\temp_list = frappe.db.sql(\"\"\"\n\t\t\tselect\n\t\t\t\tt1.employee as employee, count(*) as attendance_days\n\t\t\tfrom\n\t\t\t\t`tabAttendance` t1\n\t\t\twhere\n\t\t\t\tt1.attendance_salary_tool is null\n\t\t\t\tand t1.docstatus = 1 and t1.status='Present'\n\t\t\t\t{condition} group by t1.employee order by t1.employee asc\n\t\t\"\"\".format(condition=condition),{\"employees\": tuple(emp_list),\"start_date\": self.start_date,\"end_date\": self.end_date}, as_dict=True)\n\t\treturn emp_list", "def get_amenities():\n list_amenities = []\n for amenity in storage.all('Amenity').values():\n list_amenities.append(amenity.to_dict())\n return jsonify(list_amenities)", "def show_employee(emp_id, fields=None):\n ret = {}\n if fields is None:\n fields = \",\".join(\n (\n \"canUploadPhoto\",\n \"department\",\n \"displayName\",\n \"firstName\",\n \"id\",\n \"jobTitle\",\n \"lastName\",\n \"location\",\n \"mobilePhone\",\n \"nickname\",\n \"photoUploaded\",\n \"photoUrl\",\n \"workEmail\",\n \"workPhone\",\n \"workPhoneExtension\",\n )\n )\n\n status, result = _query(action=\"employees\", command=emp_id, args={\"fields\": fields})\n\n root = ET.fromstring(result)\n\n ret = {\"id\": emp_id}\n for item in root:\n ret[next(iter(item.values()))] = item.text\n return ret", "def data():\n empo = get(empo_url).json()\n dict_new = {}\n for u in empo:\n empo_id = str(u.get(\"id\"))\n tasks = get(tasks_url + \"?userId=\" + empo_id).json()\n task_list = []\n for i in tasks:\n if i[\"userId\"] != empo_id:\n task_list.append({\"username\": u[\"username\"],\n \"task\": i[\"title\"],\n \"completed\": i[\"completed\"]})\n dict_new[empo_id] = task_list\n with open(\"todo_all_employees.json\", \"w\") as file:\n dump(dict_new, file)", "def show_all_amenities():\n\n amenities = storage.all(Amenity).values()\n new_list = []\n for amenity in amenities:\n new_list.append(amenity.to_dict())\n return jsonify(new_list)", "def show_all_employees():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employees = Employee.query.all()\n \n \n ## right now this is ALL users... \n \n return render_template(\"employee_display.html\", employees = employees)", "def all_amenities():\n amenities_list = []\n for amenity in storage.all(Amenity).values():\n amenities_list.append(amenity.to_dict())\n return jsonify(amenities_list)", "def get(self):\n args = self.parser.parse_args()\n date = get_date_or_none(args['date'])\n start_date = get_date_or_none(args['start_date'])\n end_date = get_date_or_none(args['end_date'])\n\n if date:\n employees = self.service.get_employees_by_date_of_birth(\n date, strategy=selectinload\n )\n elif start_date and end_date:\n employees = self.service.get_employees_born_in_period(\n start_date, end_date, strategy=selectinload\n )\n else:\n return self.BAD_DATE_MESSAGE, 400\n\n return self.schema.dump(employees, many=True), 200", "def return_amenities():\n amenities = list(storage.all(Amenity).values())\n amenity_list = []\n for amenity in amenities:\n amenity_list.append(amenity.to_dict())\n return jsonify(amenity_list)", "def get_users():\n users = User.query # no need to order\n users_data = [user.to_dict() for user in users.all()]\n return jsonify(users=users_data)", "def get_all_user():\n user = UserModel.objects()\n return jsonify(user), 200", "def get_articles():\n _, articles = base_query(db_session)\n return jsonify([p.serialize for p in articles])", "def list(self, request):\n teams = self.controller.retrieve_all_teams_employees()\n serializer = data_serializers.PresentTeamEmployeeDataSerializer(teams, many=True)\n return Response(serializer.data)", "def amenity_get_all():\n am_list = []\n am_obj = storage.all(\"Amenity\")\n for obj in am_obj.values():\n am_list.append(obj.to_json())\n\n return jsonify(am_list)", "def search_and_export_json():\n\n url = 'https://jsonplaceholder.typicode.com/'\n\n users_dict = requests.get(\"{}users\".format(url)).json()\n tasks_dict = requests.get(\"{}todos\".format(url)).json()\n\n my_dict = {}\n usernames_dict = {}\n\n for info in users_dict:\n user_id = info.get('id')\n my_dict[user_id] = []\n usernames_dict[user_id] = info.get('username')\n\n for info in tasks_dict:\n tasks_all_emp = {}\n user_id = info.get('userId')\n tasks_all_emp[\"task\"] = info.get('title')\n tasks_all_emp[\"completed\"] = info.get('completed')\n tasks_all_emp[\"username\"] = usernames_dict.get(user_id)\n my_dict.get(user_id).append(tasks_all_emp)\n\n file_name = \"todo_all_employees.json\"\n with open(file_name, mode='w') as json_file:\n json.dump(my_dict, json_file)", "def get_amenities():\n amenities = []\n for amenity in storage.all(Amenity).values():\n amenities.append(amenity.to_dict())\n return jsonify(amenities)", "def get(self, request):\n employee = EmployeeDetail.objects.all()\n response = {\n 'payment_methods': EmployeeSerializer(\n employee,\n many=True\n ).data\n }\n return Response(response)", "def _get_employee_info() -> List[List[str]]:\n return [\n ['100', 'Dave', 'Team Leader'],\n ['101', 'Ram', 'Developer'],\n ['102', 'Raj', 'Developer'],\n ['103', 'Rahul', 'Tester'],\n ]", "def return_expenses():\r\n g.db.execute(\"SELECT * FROM monthly_data ORDER BY Sr\")\r\n rows = g.db.fetchall()\r\n data = []\r\n for x in rows:\r\n data.append({'sr':x[0],'name':x[1], 'id':x[2], 'item':x[3], 'price':x[5], 'date':x[4]})\r\n return jsonify(data)", "def employee(employee_id):\n # gather data from db about all employees\n return render_template(\"employee.html\",\n employee_id=employee_id)", "def amenity_ret():\n ame_list = []\n all_objs = storage.all(\"Amenity\")\n for obj in all_objs.values():\n ame_list.append(obj.to_dict())\n return jsonify(ame_list)", "def get_users():\n selection = []\n try:\n selection = [{'id':usr.id, 'username':usr.username, 'email':usr.email} \n for usr in User.query.all()]\n except:\n selection = {'error':True}\n return json.dumps(selection)", "def companies():\n res = requests.get('http://0.0.0.0:5002/companies')\n return jsonify(res.json())", "def user_ret():\n user_list = []\n all_objs = storage.all(\"User\")\n for obj in all_objs.values():\n user_list.append(obj.to_dict())\n return jsonify(user_list)", "def all_Users():\n new_dict = []\n for usr in storage.all('User').values():\n new_dict.append(usr.to_dict())\n return jsonify(new_dict)", "def get_amenities():\n amenities_dict_list = [amenity.to_dict() for amenity in\n storage.all(\"Amenity\").values()]\n return jsonify(amenities_dict_list)", "def sportsJSON():\n\n sports = session.query(Sport).all()\n return jsonify(sports=[r.serialize for r in sports])", "def restaurants_all() -> str:\n restaurant_objects = restaurants.load_restaurants()\n return jsonify(restaurant_objects)", "def all_items_handler():\n items = getAllItems()\n return jsonify(items=[i.serialize for i in items])", "def departments(department_name=None):\n\tif not department_name:\n\t\tdepartment_data = _serialize_list(Department.query.all(), backrefs=[\"employees\"])\n\t\tdepartment_data = {'departments': department_data, 'total': len(department_data)}\n\telse:\n\t\tdepartment_data = _serialize_model(Department.query.filter_by(name=department_name).first(), backrefs=[\"employees\"])\n\n\treturn jsonify(department_data)", "def get_all_users():\n users = []\n for mv in storage.all(\"User\").values():\n users.append(mv.to_dict())\n return jsonify(users)", "def return_artistnames(): \n\n names = [] #list for artist names\n rows = db.session.query(Artist.name).all()\n for row in rows: \n names.append(row[0])\n\n return jsonify(names)", "def get_users():\n users = User.query.all()\n users_schema = UserSchema()\n result = users_schema.dump(users, many=True)\n return jsonify({'users': result.data})", "def get_employees(cls, strategy=lazyload):\n cls._check_strategy(strategy)\n\n return db.session.query(Employee).options(\n strategy(Employee.department)\n ).all()", "def list_all_amenities():\n data = storage.all('Amenity')\n amenities = [v.to_dict() for k, v in data.items()]\n return jsonify(amenities)", "def jugadores():\n\tjugadores = Jugador.query.order_by(Jugador.id.desc()).filter_by(activo=True)\n\treturn jsonify([jugador.to_dict()\n\t\t for jugador in jugadores])", "def get_all_by_name():\n name = request.args['name']\n return jsonify(service.get_all_data_by_name(name))", "def all_sales(self, username):\n con = dbcon()\n cur = con.cursor()\n cur.execute(\"SELECT * FROM sales;\")\n res = cur.fetchall()\n sales_records=[]\n for a_sale in res:\n record = {\n 'sales_id':a_sale[0],\n 'attendant':a_sale[1],\n 'product_name':a_sale[2],\n 'price':a_sale[3],\n 'quantity':a_sale[4]\n }\n sales_records.append(record)\n return jsonify({\"Records\": sales_records}), 200", "def get_companies(self):\n response = self.do_request('/management/companies/export/json')\n if response:\n return response.json()", "def json_view(self, recursive=False):\n\n context = self.context.aq_inner\n data = self.export(context, recursive=recursive)\n pretty = json.dumps(data, sort_keys=True, indent=4)\n self.request.response.setHeader(\"Content-type\", \"application/json\")\n return pretty", "def allCategoriesJSON():\n categories = db_session.query(Category).all()\n return jsonify(categories=[c.serialize for c in categories])", "def all_employees(request, company_id=None):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n company_super_user = current_employee.isCompanySuperUserOrHigher()\n if company_id:\n company = Company.objects.get(pk=company_id)\n else:\n company = current_employee.company\n if not current_employee.isEnsoUser() and current_employee.company.pk != company.pk:\n raise PermissionDenied()\n change_company_form = ChangeCompanyForm(initial=dict(company=company))\n return TemplateResponse(\n request,\n 'all_employees.html',\n {\n 'user': request.user,\n 'company_super_user': company_super_user,\n 'company': company,\n 'change_company_form': change_company_form,\n }\n )", "def json_get_all_names_and_emails(request):\n\n # Check out HVZ/main/models.py for helper functions relating to Players.\n # Player.current_players() returns all Players in the current Game.\n emails = [(str(p.user.first_name) + \" \" + str(p.user.last_name), p.mailbox) for p in Player.current_players()]\n\n # json.dumps creates a string from a Python object. You can then\n # read the string and convert it into an Objective-C data\n # structure using NSJSONSerialization in Objective-C.\n json_data = json.dumps(emails)\n\n return HttpResponse(\n json_data,\n content_type=\"application/json\"\n )", "def get_stores():\n stores = Store.query # no need to order\n stores_data = [store.to_dict() for store in stores.all()]\n return jsonify(stores=stores_data)", "def read_all():\n # Create the list of users from our data\n users = User.query.order_by(User.first_name).all()\n\n # Serialize the data for the response\n user_schema = UserSchema(many=True)\n data = user_schema.dump(users)\n return data", "def list_users():\n return jsonify(user=\"joe\")", "def names():\n\n df = pd.read_sql_query(f\"SELECT * FROM olympics_raw\", con = engine)\n print(df.head())\n \n\n # return jsonify(all_olympians)\n return jsonify(df.to_dict(orient='records'))", "def get(cls, employee_id):\n employee = EmployeeModel.find_by_id(employee_id)\n if not employee:\n return {'message': 'Employee not found, or you do not have the access'}, 404\n\n return employee.json()", "def get_mentee_list():\n # Get db object and users table\n db = get_db()\n users = db.users\n \n # Search database for mentees\n cursor = users.find({\"role\": \"Mentee\"})\n \n context = {'mentees': []}\n \n for document in cursor:\n temp = document\n del temp['_id']\n context['mentees'].append(temp)\n \n context['url'] = \"/api/v1/mentees/\"\n return flask.jsonify(**context)", "def get(self):\n\n users = UserModel.get_top_earners()\n users_json = [user.json() for user in users]\n return {\"users\": users_json}", "def get_exercises():\n email = session.get(\"email\")\n tag_arg = request.args.get(\"tag\")\n exercises = fm.get_all_exercises(email, tag_arg)\n msg = \"Found {} exercises for {}\".format(len(exercises), email)\n app.logger.info(msg)\n return jsonify(dict(exercises=exercises))", "def list():\n trucks = Foodtruck.query.all()\n return jsonify(foodtrucks=[truck.to_dict() for truck in trucks])", "def get_users():\n users = storage.all('User')\n users_list = []\n for user in users.values():\n users_list.append(user.to_dict())\n return jsonify(users_list), 200", "def get(self):\n users = User.query.all()\n usersJSON = []\n for u in users:\n usersJSON.append({'id': u.id, 'admin': u.admin})\n return {'users': usersJSON}", "def all_entries(cls):\n info = Diary.entries\n response = jsonify({\"data\": info})\n response.status_code = 200\n return response", "def employers_get(label=None, page=None, per_page=None): # noqa: E501\n\n\n return query_manager.get_resource(\n label=label,\n page=page,\n per_page=per_page,\n rdf_type_uri=EMPLOYER_TYPE_URI,\n rdf_type_name=EMPLOYER_TYPE_NAME, \n kls=Employer)", "def get_countries():\n countries = Country.query # no need to order\n countries_data = [country.to_dict() for country in countries.all()]\n return jsonify(countries=countries_data)", "def getall():\n elements = Advertisements().get_all_elements()\n data = jsonify(elements)\n data.statut_code = 200\n return data", "def amenities_all():\n return jsonify(list(map(lambda x: x.to_dict(),\n list(storage.all(Amenity).values()))))", "def get_all_project_records():\r\n records = flask.request.db_api.get_all_project_record()\r\n return flask.jsonify(records=records)", "def happy():\n # Query all venues\n results = session.query(VP.name, VP.latitude, VP.longitude).all()\n \n # Create a dictionary from the row data and append to a list of all_venue\n all_venues = []\n for name, lat, lon in results:\n venue_dict = {}\n venue_dict[\"name\"] = name\n venue_dict[\"latitude\"] = lat\n venue_dict[\"longitude\"] = lon\n all_venues.append(venue_dict)\n \n return jsonify(all_venues)", "def list(self):\n return JSONResponse(self.request).data(items=self._get_agenda_items()).dump()", "def get(cls):\n return {'products': [product.to_json() for product in ProductModel.find_all()]}", "def get(self):\n users = User.query.all()\n usersJSON = []\n for u in users:\n usersJSON.append({'id':u.id, 'admin':u.admin})\n return { 'users' : usersJSON }", "def current_employee(self, request: Request) -> Response:\n serializer = self.get_serializer_class()\n serializer = serializer(request.user, context={'request': request})\n return Response(serializer.data)", "def get_employee():\n\n employee_id = get_employee_input_int('Enter employee ID to get the data ')\n employee = db.get_employee(employee_id)\n if not employee:\n print(\"No employee found with id \", employee_id)\n else:\n payscale = db.get_payScale(employee.grade)\n print('DATA:-> {} {} has grade = {} which gives {} per hours\\n'\n .format(employee.first_name, employee.last_name, employee.grade, payscale.salary))", "def get_books():\n return jsonify({'books': Book.get_all_books()})", "def get_books():\n return jsonify({'books': Book.get_all_books()})", "def retrieve(self, request, pk=None):\n employee = self.get_employee_object(pk)\n print(F\"Employee: {employee}\")\n serializer = data_serializers.PresentEmployeeDataSerializer(employee)\n return Response(serializer.data)", "def get(self):\n all_patients = model_patient.query.all()\n return jsonify(all_patients)" ]
[ "0.80177975", "0.79526293", "0.78115326", "0.72639924", "0.7209743", "0.71376187", "0.71241915", "0.7055127", "0.7054203", "0.7002898", "0.68735474", "0.6857624", "0.68054706", "0.67973894", "0.6706132", "0.66616935", "0.6618934", "0.6581776", "0.6578094", "0.65394276", "0.64805084", "0.64098907", "0.6351272", "0.6322406", "0.6278039", "0.6247345", "0.62471193", "0.6222043", "0.6201518", "0.6161673", "0.61550957", "0.6143976", "0.61379087", "0.6111975", "0.6108984", "0.6103555", "0.61003774", "0.60972583", "0.6095555", "0.60948074", "0.6083758", "0.60582894", "0.60456747", "0.6030361", "0.60233027", "0.6018328", "0.6002323", "0.59965116", "0.5982986", "0.59603274", "0.5948698", "0.592496", "0.59212166", "0.5877531", "0.5869281", "0.5860559", "0.5858288", "0.5849215", "0.58439463", "0.5843509", "0.5842587", "0.5833205", "0.58226055", "0.5817468", "0.5799135", "0.5798358", "0.5779523", "0.57713354", "0.5763276", "0.57588625", "0.57579094", "0.5754775", "0.5749727", "0.5748318", "0.5737075", "0.57322073", "0.57300824", "0.5725105", "0.5708375", "0.5696335", "0.5691881", "0.56893724", "0.56828105", "0.56800884", "0.56727463", "0.5669546", "0.56485367", "0.5644668", "0.56350523", "0.5633134", "0.562746", "0.562347", "0.5621738", "0.5618645", "0.5605881", "0.5605512", "0.55904907", "0.55904907", "0.5587724", "0.5585772" ]
0.760211
3